# -*- coding: utf-8 -*-
"""Precursors_original_from_Stefano1.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1L0uUX5vbtjCZpY1C1HcBOQS1qJXqIukk
"""

from google.colab import drive
drive.mount('/content/drive')

cd "/content/drive/MyDrive/NeNet/"

"""# Code to detect precursors in eartquake data
This code is part of mainscript.py written by Veda Ong.
"""

# Copy data from Google Drive and load them
# This script has to be run after preprocessing.py and after the saved data are put on Google Drive

#!cp -r ./drive/MyDrive/AI/*.p ./  

import pickle
#normnoise = pickle.load( open( "noise_nonorm.p", "rb" ) )
#normpre = pickle.load( open( "pre_nonorm.p", "rb" ) )
#normnoise = pickle.load( open( "normnoise_proper.p", "rb" ) )
#normpre = pickle.load( open( "normpre_proper.p", "rb" ) )


testlist = pickle.load( open( "testlist.p", "rb" ) )
trainlist = pickle.load( open( "trainlist.p", "rb" ) )


normnoise = pickle.load( open( "normnoise_strange.p", "rb" ) )
normpre = pickle.load( open( "normpre_strange.p", "rb" ) )

testlist

# Normalised data

x_train = [normpre[i] for i in trainlist]

x_test = [normpre[i] for i in testlist]


x_noise_train = [normnoise[i] for i in trainlist]

x_noise_test = [normnoise[i] for i in testlist]

import numpy as np

window_length=16384

#Make data into a format such that it can be concatenated
x_train2 = np.reshape(x_train, (len(x_train)*len(x_train[0]), window_length, 3))

x_test2 = np.reshape(x_test, (len(x_test)*len(x_test[0]), window_length, 3))


x_noise_train2 = np.expand_dims(x_noise_train, axis=-1)

x_noise_test2 = np.expand_dims(x_noise_test, axis=-1)


x_noise_train2 = np.reshape(x_noise_train2, (len(x_noise_train)*len(x_noise_train[0]), window_length, 3))

x_noise_test2 = np.reshape(x_noise_test2, (len(x_noise_test)*len(x_noise_test[0]), window_length, 3))

# Generate ground truth (ones = 'precursor' class, zeros = 'noise' class)

y_train2 = np.ones(len(x_train2))
y_test2 =np.ones(len(x_test2))

y_noise_train2 = np.zeros(len(x_noise_train2))
y_noise_test2 = np.zeros(len(x_noise_test2))

y_train2  = y_train2.tolist()
y_test2 = y_test2.tolist()

y_noise_train2 = y_noise_train2.tolist()
y_noise_test2 = y_noise_test2.tolist()

#Concatenate noise and precursor datasets 
x_train = np.append(x_train2,x_noise_train2, axis=0)
x_test = np.append(x_test2,x_noise_test2, axis=0)

y_train = np.append(y_train2,y_noise_train2, axis=0)
y_test = np.append(y_test2,y_noise_test2, axis=0)

# Modified this part compared to mainscript.py to work with the new version of TensoFlow

!pip install pip install np_utils

#from keras.utils import to_categorical
from keras.utils.np_utils import to_categorical
y_train= to_categorical(y_train, num_classes=2)
y_test= to_categorical(y_test, num_classes=2)

import tensorflow as tf 
import numpy as np
import time
import os
from keras.models import Model
from keras.layers import Conv1D, BatchNormalization, Add, MaxPooling1D, Dropout, Dense, Flatten, CuDNNLSTM, ZeroPadding1D
from keras.layers import Activation, Input, concatenate, GaussianNoise, GlobalMaxPooling1D, GlobalAveragePooling1D, Softmax, Permute, Multiply, Masking
#from keras.optimizers import Adam, RMSprop, SGD
from tensorflow.keras.optimizers import Adam,RMSprop, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
from keras.regularizers import *  
def encoder(input_shape, nb_classes):
   
    input_layer= Input(input_shape)
    random = Conv1D(3, 3, padding='same', kernel_initializer='random_normal')(input_layer)

    X = Add()([input_layer, random])

    conv0 = Conv1D(filters=32, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(X)
    conv0 = Conv1D(filters=32, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(conv0)
    conv0 = MaxPooling1D(3, strides=1)(conv0)
    conv0 = BatchNormalization()(conv0)
    conv0 = Activation('relu')(conv0) 

    conv1 = Conv1D(64, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(conv0)
    conv1 = Conv1D(64, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(conv1)        
    conv1 = MaxPooling1D(3, strides=2)(conv1)
    conv1 = BatchNormalization()(conv1)
    conv1 = Activation('relu')(conv1)

    conv2 = Conv1D(filters=64, kernel_size=5, padding='same', dilation_rate=2, kernel_initializer='random_normal')(conv1)
    conv2 = MaxPooling1D(3, strides=1)(conv2)
    conv2 = BatchNormalization()(conv2)
    conv2 = Activation('relu')(conv2)
        
    conv3 = Conv1D(filters=128, kernel_size=5,padding='same', dilation_rate=4 , kernel_initializer='random_normal')(conv2)
    conv3 = MaxPooling1D(3, strides=1)(conv3)
    conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)

    conv4 = Conv1D(filters=128, kernel_size=5,padding='same', dilation_rate=8 , kernel_initializer='random_normal')(conv3)
    conv4 = MaxPooling1D(3, strides=1)(conv4)
    conv4 = BatchNormalization()(conv4)
    conv4 = Activation('relu')(conv4)

    conv5 = Conv1D(256, kernel_size=3,padding='same',  dilation_rate=16 , kernel_initializer='random_normal')(conv4)
    conv5 = MaxPooling1D(3, strides=1)(conv5)
    conv5 = BatchNormalization()(conv5)
    conv5 = Activation('relu')(conv5)

    conv6 = Conv1D(filters=256, kernel_size=3, padding='same', dilation_rate=32, kernel_initializer='random_normal')(conv5)
    conv6 = MaxPooling1D(3, strides=1)(conv6)
    conv6 = BatchNormalization()(conv6)
    conv6 = Activation('relu')(conv6)
    conv6 = Dropout(0.02)(conv6)
 
    Y = GlobalAveragePooling1D()(conv6)
    Y = Dense(256, activation='relu', kernel_initializer='random_normal')(Y)
    Y = Dropout(0.02)(Y)

 
    output_layer = Dense(nb_classes, activation='softmax', kernel_initializer='random_normal')(Y)
   
    model = Model(inputs=input_layer, outputs=output_layer)

        
    return model 

model=encoder((window_length, 3), 2)
model.compile(loss='binary_crossentropy', optimizer=RMSprop(0.0001), metrics=['accuracy'])
model.summary()

tnum=1
filepath='./model'+str(tnum)+'.h5'

tnum=1
filepath='./model'+str(tnum)+'.h5'

"""import tensorflow as tf 
import numpy as np
import time
import os
from keras.models import Model
from keras.layers import Conv1D, BatchNormalization, Add, MaxPooling1D, Dropout, Dense, Flatten, CuDNNLSTM, ZeroPadding1D
from keras.layers import Activation, Input, concatenate, GaussianNoise, GlobalMaxPooling1D, GlobalAveragePooling1D, Softmax, Permute, Multiply, Masking
from keras.optimizers import Adam, RMSprop, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
from keras.regularizers import *  
def encoder(input_shape, nb_classes):
   
    input_layer= Input(input_shape)
    random = Conv1D(3, 3, padding='same', kernel_initializer='random_normal')(input_layer)

    X = Add()([input_layer, random])

    conv3 = Conv1D(filters=32, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(X)
    conv3 = Conv1D(filters=32, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(conv3)
    conv3 = MaxPooling1D(3, strides=1)(conv3)
    conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)

    conv4 = Conv1D(64, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(conv3)
    conv4 = Conv1D(64, kernel_size=7,padding='same', strides=1, kernel_initializer='random_normal')(conv4)        
    conv4 = MaxPooling1D(3, strides=2)(conv4)
    conv4 = BatchNormalization()(conv4)
    conv4 = Activation('relu')(conv4)

    conv5 = Conv1D(filters=64, kernel_size=5, padding='same', dilation_rate=2, kernel_initializer='random_normal')(conv4)
    conv5 = MaxPooling1D(3, strides=1)(conv5)
    conv5 = BatchNormalization()(conv5)
    conv5 = Activation('relu')(conv5)
        
    conv3 = Conv1D(filters=128, kernel_size=5,padding='same', dilation_rate=4 , kernel_initializer='random_normal')(conv5)
    conv3 = MaxPooling1D(3, strides=1)(conv3)
    conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)

    conv3 = Conv1D(filters=128, kernel_size=5,padding='same', dilation_rate=8 , kernel_initializer='random_normal')(conv3)
    conv3 = MaxPooling1D(3, strides=1)(conv3)
    conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)

    conv4 = Conv1D(256, kernel_size=3,padding='same',  dilation_rate=16 , kernel_initializer='random_normal')(conv3)
    conv4 = MaxPooling1D(3, strides=1)(conv4)
    conv4 = BatchNormalization()(conv4)
    conv4 = Activation('relu')(conv4)

    conv5 = Conv1D(filters=256, kernel_size=3, padding='same', dilation_rate=32, kernel_initializer='random_normal')(conv4)
    conv5 = MaxPooling1D(3, strides=1)(conv5)
    conv5 = BatchNormalization()(conv5)
    conv5 = Activation('relu')(conv5)
    conv5 = Dropout(0.02)(conv5)
 
    Y = GlobalAveragePooling1D()(conv5)
    Y = Dense(256, activation='relu', kernel_initializer='random_normal')(Y)
    Y = Dropout(0.02)(Y)

 
    output_layer = Dense(nb_classes, activation='softmax', kernel_initializer='random_normal')(Y)
   
    model = Model(inputs=input_layer, outputs=output_layer)

        
    return model 

model=encoder((window_length, 3), 2)
model.compile(loss='binary_crossentropy', optimizer=RMSprop(0.0001), metrics=['accuracy'])
model.summary()  
"""