repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
Paddle
Paddle-master/python/paddle/fluid/tests/test_python_operator_overriding.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.layers as layers import paddle.fluid.framework as framework import paddle.fluid as fluid class TestPythonOperatorOverride(unittest.TestCase): def check_result(self, fn, place, dtype): shape = [9, 10] x_data = np.random.random(size=shape).astype(dtype) y_data = np.random.random(size=shape).astype(dtype) python_out = fn(x_data, y_data) x_var = layers.create_global_var( name='x', shape=shape, value=0.0, dtype=dtype, persistable=True) y_var = layers.create_global_var( name='y', shape=shape, value=0.0, dtype=dtype, persistable=True) out = fn(x_var, y_var) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fluid_out = exe.run(fluid.default_main_program(), feed={'x': x_data, 'y': y_data}, fetch_list=[out]) np.testing.assert_array_equal(python_out, fluid_out[0]) def test_override(self): # compare func to check compare_fns = [ lambda _a, _b: _a == _b, lambda _a, _b: _a != _b, lambda _a, _b: _a < _b, lambda _a, _b: _a <= _b, lambda _a, _b: _a > _b, lambda _a, _b: _a >= _b, ] # places to check places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) # dtypes to check dtypes = ['int32', 'float32'] for place in places: for dtype in dtypes: for compare_fn in compare_fns: with framework.program_guard(framework.Program(), framework.Program()): self.check_result(compare_fn, place, dtype) if __name__ == '__main__': unittest.main()
2,568
32.363636
76
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_error_clip.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import paddle import paddle.fluid as fluid BATCH_SIZE = 128 CLIP_MAX = 2e-6 CLIP_MIN = -1e-6 prog = fluid.framework.Program() with fluid.program_guard(main_program=prog): image = fluid.layers.data(name='x', shape=[784], dtype='float32') hidden1 = fluid.layers.fc(input=image, size=128, act='relu') hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() prog_clip.block(0).var(hidden1.name).set_error_clip( fluid.clip.ErrorClipByValue( max=CLIP_MAX, min=CLIP_MIN)) avg_cost_clip = prog_clip.block(0).var(avg_cost.name) fluid.backward.append_backward(loss=avg_cost) fluid.backward.append_backward( loss=avg_cost_clip, callbacks=[fluid.clip.error_clip_callback]) hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD") hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") hidden2_grad = prog.block(0).var(hidden2.name + "@GRAD") hidden2_grad_clip = prog_clip.block(0).var(hidden2.name + "@GRAD") train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), batch_size=BATCH_SIZE) place = fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[image, label], place=place) exe.run(fluid.default_startup_program()) count = 0 for data in train_reader(): count += 1 if count > 5: break out1, out2 = exe.run(prog, feed=feeder.feed(data), fetch_list=[hidden1_grad, hidden2_grad]) out1_clip, out2_clip = exe.run( prog_clip, feed=feeder.feed(data), fetch_list=[hidden1_grad_clip, hidden2_grad_clip]) if not ((out1.clip( min=CLIP_MIN, max=CLIP_MAX) == out1_clip).all() and (out2 == out2_clip).all()): exit(1) exit(0)
2,717
32.146341
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_word2vec.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid as fluid import unittest import os import numpy as np import math import sys def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): PASS_NUM = 100 EMBED_SIZE = 32 HIDDEN_SIZE = 256 N = 5 BATCH_SIZE = 32 IS_SPARSE = is_sparse def __network__(words): embed_first = fluid.layers.embedding( input=words[0], size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, param_attr='shared_w') embed_second = fluid.layers.embedding( input=words[1], size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, param_attr='shared_w') embed_third = fluid.layers.embedding( input=words[2], size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, param_attr='shared_w') embed_forth = fluid.layers.embedding( input=words[3], size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, param_attr='shared_w') concat_embed = fluid.layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], axis=1) hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) avg_cost = fluid.layers.mean(cost) return avg_cost, predict_word word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') if not is_parallel: avg_cost, predict_word = __network__( [first_word, second_word, third_word, forth_word, next_word]) else: places = fluid.layers.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): avg_cost, predict_word = __network__( map(pd.read_input, [ first_word, second_word, third_word, forth_word, next_word ])) pd.write_output(avg_cost) avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder( feed_list=[first_word, second_word, third_word, forth_word, next_word], place=place) def train_loop(main_program): exe.run(fluid.default_startup_program()) for pass_id in range(PASS_NUM): for data in train_reader(): avg_cost_np = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) if avg_cost_np[0] < 5.0: if save_dirname is not None: fluid.io.save_inference_model(save_dirname, [ 'firstw', 'secondw', 'thirdw', 'forthw' ], [predict_word], exe) return if math.isnan(float(avg_cost_np[0])): sys.exit("got NaN loss, training failed.") raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word # is simply an index to look up for the corresponding word vector and hence # the shape of word (base_shape) should be [1]. The length-based level of # detail (lod) info of each LoDtensor should be [[1]] meaning there is only # one lod_level and there is only one sequence of one word on this level. # Note that lod info should be a list of lists. lod = [[1]] base_shape = [1] # The range of random integers is [low, high] first_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) second_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) third_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) fourth_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) assert feed_target_names[0] == 'firstw' assert feed_target_names[1] == 'secondw' assert feed_target_names[2] == 'thirdw' assert feed_target_names[3] == 'forthw' # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. results = exe.run(inference_program, feed={ feed_target_names[0]: first_word, feed_target_names[1]: second_word, feed_target_names[2]: third_word, feed_target_names[3]: fourth_word }, fetch_list=fetch_targets, return_numpy=False) print(results[0].lod()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) def main(use_cuda, is_sparse, is_parallel): if use_cuda and not fluid.core.is_compiled_with_cuda(): return if not is_parallel: save_dirname = "word2vec.inference.model" else: save_dirname = None train(use_cuda, is_sparse, is_parallel, save_dirname) infer(use_cuda, save_dirname) FULL_TEST = os.getenv('FULL_TEST', '0').lower() in ['true', '1', 't', 'y', 'yes', 'on'] SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster" class W2VTest(unittest.TestCase): pass def inject_test_method(use_cuda, is_sparse, is_parallel): fn_name = "test_{0}_{1}_{2}".format("cuda" if use_cuda else "cpu", "sparse" if is_sparse else "dense", "parallel" if is_parallel else "normal") def __impl__(*args, **kwargs): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): main( use_cuda=use_cuda, is_sparse=is_sparse, is_parallel=is_parallel) if use_cuda and is_sparse: fn = __impl__ else: # skip the other test when on CI server fn = unittest.skipUnless( condition=FULL_TEST, reason=SKIP_REASON)(__impl__) setattr(W2VTest, fn_name, fn) for use_cuda in (False, True): for is_sparse in (False, True): for is_parallel in (False, True): inject_test_method(use_cuda, is_sparse, is_parallel) if __name__ == '__main__': unittest.main()
10,281
38.095057
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework import paddle.fluid.layers as layers import contextlib import math import sys import unittest from paddle.fluid.executor import Executor dict_size = 30000 source_dict_dim = target_dict_dim = dict_size src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) hidden_dim = 32 embedding_dim = 16 batch_size = 10 max_length = 50 topk_size = 50 encoder_size = decoder_size = hidden_dim IS_SPARSE = True USE_PEEPHOLES = False def bi_lstm_encoder(input_seq, hidden_size): input_forward_proj = fluid.layers.fc(input=input_seq, size=hidden_size * 4, bias_attr=True) forward, _ = fluid.layers.dynamic_lstm( input=input_forward_proj, size=hidden_size * 4, use_peepholes=USE_PEEPHOLES) input_backward_proj = fluid.layers.fc(input=input_seq, size=hidden_size * 4, bias_attr=True) backward, _ = fluid.layers.dynamic_lstm( input=input_backward_proj, size=hidden_size * 4, is_reverse=True, use_peepholes=USE_PEEPHOLES) forward_last = fluid.layers.sequence_last_step(input=forward) backward_first = fluid.layers.sequence_first_step(input=backward) return forward_last, backward_first # FIXME(peterzhang2029): Replace this function with the lstm_unit_op. def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): def linear(inputs): return fluid.layers.fc(input=inputs, size=size, bias_attr=True) forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t])) cell_t = fluid.layers.sums(input=[ fluid.layers.elementwise_mul( x=forget_gate, y=cell_t_prev), fluid.layers.elementwise_mul( x=input_gate, y=cell_tilde) ]) hidden_t = fluid.layers.elementwise_mul( x=output_gate, y=fluid.layers.tanh(x=cell_t)) return hidden_t, cell_t def lstm_decoder_without_attention(target_embedding, decoder_boot, context, decoder_size): rnn = fluid.layers.DynamicRNN() cell_init = fluid.layers.fill_constant_batch_size_like( input=decoder_boot, value=0.0, shape=[-1, decoder_size], dtype='float32') cell_init.stop_gradient = False with rnn.block(): current_word = rnn.step_input(target_embedding) context = rnn.static_input(context) hidden_mem = rnn.memory(init=decoder_boot, need_reorder=True) cell_mem = rnn.memory(init=cell_init) decoder_inputs = fluid.layers.concat( input=[context, current_word], axis=1) h, c = lstm_step(decoder_inputs, hidden_mem, cell_mem, decoder_size) rnn.update_memory(hidden_mem, h) rnn.update_memory(cell_mem, c) out = fluid.layers.fc(input=h, size=target_dict_dim, bias_attr=True, act='softmax') rnn.output(out) return rnn() def seq_to_seq_net(): """Construct a seq2seq network.""" src_word_idx = fluid.layers.data( name='source_sequence', shape=[1], dtype='int64', lod_level=1) src_embedding = fluid.layers.embedding( input=src_word_idx, size=[source_dict_dim, embedding_dim], dtype='float32') src_forward_last, src_backward_first = bi_lstm_encoder( input_seq=src_embedding, hidden_size=encoder_size) encoded_vector = fluid.layers.concat( input=[src_forward_last, src_backward_first], axis=1) decoder_boot = fluid.layers.fc(input=src_backward_first, size=decoder_size, bias_attr=False, act='tanh') trg_word_idx = fluid.layers.data( name='target_sequence', shape=[1], dtype='int64', lod_level=1) trg_embedding = fluid.layers.embedding( input=trg_word_idx, size=[target_dict_dim, embedding_dim], dtype='float32') prediction = lstm_decoder_without_attention(trg_embedding, decoder_boot, encoded_vector, decoder_size) label = fluid.layers.data( name='label_sequence', shape=[1], dtype='int64', lod_level=1) cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) return avg_cost, prediction def train(use_cuda, save_dirname=None): [avg_cost, prediction] = seq_to_seq_net() optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) feed_order = ['source_sequence', 'target_sequence', 'label_sequence'] feed_list = [ framework.default_main_program().global_block().var(var_name) for var_name in feed_order ] feeder = fluid.DataFeeder(feed_list, place) batch_id = 0 for pass_id in xrange(2): for data in train_data(): outs = exe.run(framework.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + " avg_cost=" + str(avg_cost_val)) if math.isnan(float(avg_cost_val[0])): sys.exit("got NaN loss, training failed.") if batch_id > 3: if save_dirname is not None: fluid.io.save_inference_model( save_dirname, ['source_sequence', 'target_sequence'], [prediction], exe) return batch_id += 1 def infer(use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[4, 6]], # which has only one lod level. Then the created LoDTensor will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for two sentences of # length 4 and 6, respectively. # Note that lod info should be a list of lists. lod = [[4, 6]] base_shape = [1] # The range of random integers is [low, high] word_data = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=1) trg_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. assert feed_target_names[0] == 'source_sequence' assert feed_target_names[1] == 'target_sequence' results = exe.run(inference_program, feed={ feed_target_names[0]: word_data, feed_target_names[1]: trg_word, }, fetch_list=fetch_targets, return_numpy=False) print(results[0].lod()) np_data = np.array(results[0]) print("Inference shape: ", np_data.shape) print("Inference results: ", np_data) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "rnn_encoder_decoder.inference.model" train(use_cuda, save_dirname) infer(use_cuda, save_dirname) class TestRnnEncoderDecoder(unittest.TestCase): def test_cuda(self): with self.scope_prog_guard(): main(use_cuda=True) def test_cpu(self): with self.scope_prog_guard(): main(use_cuda=False) @contextlib.contextmanager def scope_prog_guard(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield if __name__ == '__main__': unittest.main()
10,324
35.613475
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_label_semantic_roles.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import math import numpy as np import os import time import unittest import paddle import paddle.dataset.conll05 as conll05 import paddle.fluid as fluid word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) label_dict_len = len(label_dict) pred_dict_len = len(verb_dict) mark_dict_len = 2 word_dim = 32 mark_dim = 5 hidden_dim = 512 depth = 8 mix_hidden_lr = 1e-3 IS_SPARSE = True PASS_NUM = 10 BATCH_SIZE = 10 embedding_name = 'emb' def load_parameter(file_name, h, w): with open(file_name, 'rb') as f: f.read(16) # skip header. return np.fromfile(f, dtype=np.float32).reshape(h, w) def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, **ignored): # 8 features predicate_embedding = fluid.layers.embedding( input=predicate, size=[pred_dict_len, word_dim], dtype='float32', is_sparse=IS_SPARSE, param_attr='vemb') mark_embedding = fluid.layers.embedding( input=mark, size=[mark_dict_len, mark_dim], dtype='float32', is_sparse=IS_SPARSE) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ fluid.layers.embedding( size=[word_dict_len, word_dim], input=x, param_attr=fluid.ParamAttr( name=embedding_name, trainable=False)) for x in word_input ] emb_layers.append(predicate_embedding) emb_layers.append(mark_embedding) hidden_0_layers = [ fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') for emb in emb_layers ] hidden_0 = fluid.layers.sums(input=hidden_0_layers) lstm_0 = fluid.layers.dynamic_lstm( input=hidden_0, size=hidden_dim, candidate_activation='relu', gate_activation='sigmoid', cell_activation='sigmoid') # stack L-LSTM and R-LSTM with direct edges input_tmp = [hidden_0, lstm_0] for i in range(1, depth): mix_hidden = fluid.layers.sums(input=[ fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') ]) lstm = fluid.layers.dynamic_lstm( input=mix_hidden, size=hidden_dim, candidate_activation='relu', gate_activation='sigmoid', cell_activation='sigmoid', is_reverse=((i % 2) == 1)) input_tmp = [mix_hidden, lstm] feature_out = fluid.layers.sums(input=[ fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') ]) return feature_out def train(use_cuda, save_dirname=None, is_local=True): # define network topology word = fluid.layers.data( name='word_data', shape=[1], dtype='int64', lod_level=1) predicate = fluid.layers.data( name='verb_data', shape=[1], dtype='int64', lod_level=1) ctx_n2 = fluid.layers.data( name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) ctx_n1 = fluid.layers.data( name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) ctx_0 = fluid.layers.data( name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) ctx_p1 = fluid.layers.data( name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) ctx_p2 = fluid.layers.data( name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) mark = fluid.layers.data( name='mark_data', shape=[1], dtype='int64', lod_level=1) feature_out = db_lstm(**locals()) target = fluid.layers.data( name='target', shape=[1], dtype='int64', lod_level=1) crf_cost = fluid.layers.linear_chain_crf( input=feature_out, label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.exponential_decay( learning_rate=0.01, decay_steps=100000, decay_rate=0.5, staircase=True)) sgd_optimizer.minimize(avg_cost) # TODO(qiao) # add dependency track and move this config before optimizer crf_decode = fluid.layers.crf_decoding( input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() feeder = fluid.DataFeeder( feed_list=[ word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target ], place=place) exe = fluid.Executor(place) def train_loop(main_program): exe.run(fluid.default_startup_program()) embedding_param = fluid.global_scope().find_var( embedding_name).get_tensor() embedding_param.set( load_parameter(conll05.get_embedding(), word_dict_len, word_dim), place) start_time = time.time() batch_id = 0 for pass_id in xrange(PASS_NUM): for data in train_data(): cost = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) cost = cost[0] if batch_id % 10 == 0: print("avg_cost:" + str(cost)) if batch_id != 0: print("second per batch: " + str((time.time( ) - start_time) / batch_id)) # Set the threshold low to speed up the CI test if float(cost) < 60.0: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ 'word_data', 'verb_data', 'ctx_n2_data', 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', 'ctx_p2_data', 'mark_data' ], [feature_out], exe) return batch_id = batch_id + 1 if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # Setup inputs by creating LoDTensors to represent sequences of words. # Here each word is the basic element of these LoDTensors and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # which has only one lod level. Then the created LoDTensors will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for three sentences of # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. lod = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) pred = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=pred_dict_len - 1) ctx_n2 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) ctx_n1 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) ctx_0 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) ctx_p1 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) ctx_p2 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) mark = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=mark_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. assert feed_target_names[0] == 'word_data' assert feed_target_names[1] == 'verb_data' assert feed_target_names[2] == 'ctx_n2_data' assert feed_target_names[3] == 'ctx_n1_data' assert feed_target_names[4] == 'ctx_0_data' assert feed_target_names[5] == 'ctx_p1_data' assert feed_target_names[6] == 'ctx_p2_data' assert feed_target_names[7] == 'mark_data' results = exe.run(inference_program, feed={ feed_target_names[0]: word, feed_target_names[1]: pred, feed_target_names[2]: ctx_n2, feed_target_names[3]: ctx_n1, feed_target_names[4]: ctx_0, feed_target_names[5]: ctx_p1, feed_target_names[6]: ctx_p2, feed_target_names[7]: mark }, fetch_list=fetch_targets, return_numpy=False) print(results[0].lod()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) def main(use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "label_semantic_roles.inference.model" train(use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) class TestLabelSemanticRoles(unittest.TestCase): def test_cuda(self): with self.scope_prog_guard(): main(use_cuda=True) def test_cpu(self): with self.scope_prog_guard(): main(use_cuda=False) @contextlib.contextmanager def scope_prog_guard(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield if __name__ == '__main__': unittest.main()
12,919
36.777778
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_recommender_system.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import sys import os import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as layers import paddle.fluid.nets as nets from paddle.fluid.executor import Executor from paddle.fluid.optimizer import SGDOptimizer IS_SPARSE = True USE_GPU = False BATCH_SIZE = 256 def get_usr_combined_features(): # FIXME(dzh) : old API integer_value(10) may has range check. # currently we don't have user configurated check. USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 uid = layers.data(name='user_id', shape=[1], dtype='int64') usr_emb = layers.embedding( input=uid, dtype='float32', size=[USR_DICT_SIZE, 32], param_attr='user_table', is_sparse=IS_SPARSE) usr_fc = layers.fc(input=usr_emb, size=32) USR_GENDER_DICT_SIZE = 2 usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') usr_gender_emb = layers.embedding( input=usr_gender_id, size=[USR_GENDER_DICT_SIZE, 16], param_attr='gender_table', is_sparse=IS_SPARSE) usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") usr_age_emb = layers.embedding( input=usr_age_id, size=[USR_AGE_DICT_SIZE, 16], is_sparse=IS_SPARSE, param_attr='age_table') usr_age_fc = layers.fc(input=usr_age_emb, size=16) USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") usr_job_emb = layers.embedding( input=usr_job_id, size=[USR_JOB_DICT_SIZE, 16], param_attr='job_table', is_sparse=IS_SPARSE) usr_job_fc = layers.fc(input=usr_job_emb, size=16) concat_embed = layers.concat( input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") return usr_combined_features def get_mov_combined_features(): MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') mov_emb = layers.embedding( input=mov_id, dtype='float32', size=[MOV_DICT_SIZE, 32], param_attr='movie_table', is_sparse=IS_SPARSE) mov_fc = layers.fc(input=mov_emb, size=32) CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) category_id = layers.data( name='category_id', shape=[1], dtype='int64', lod_level=1) mov_categories_emb = layers.embedding( input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) mov_categories_hidden = layers.sequence_pool( input=mov_categories_emb, pool_type="sum") MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) mov_title_id = layers.data( name='movie_title', shape=[1], dtype='int64', lod_level=1) mov_title_emb = layers.embedding( input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) mov_title_conv = nets.sequence_conv_pool( input=mov_title_emb, num_filters=32, filter_size=3, act="tanh", pool_type="sum") concat_embed = layers.concat( input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) # FIXME(dzh) : need tanh operator mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") return mov_combined_features def model(): usr_combined_features = get_usr_combined_features() mov_combined_features = get_mov_combined_features() # need cos sim inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) scale_infer = layers.scale(x=inference, scale=5.0) label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) avg_cost = layers.mean(square_cost) return scale_infer, avg_cost def train(use_cuda, save_dirname, is_local=True): scale_infer, avg_cost = model() # test program test_program = fluid.default_main_program().clone(for_test=True) sgd_optimizer = SGDOptimizer(learning_rate=0.2) sgd_optimizer.minimize(avg_cost) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.movielens.train(), buf_size=8192), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) feed_order = [ 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', 'movie_title', 'score' ] def train_loop(main_program): exe.run(framework.default_startup_program()) feed_list = [ main_program.global_block().var(var_name) for var_name in feed_order ] feeder = fluid.DataFeeder(feed_list, place) PASS_NUM = 100 for pass_id in range(PASS_NUM): for batch_id, data in enumerate(train_reader()): # train a mini-batch outs = exe.run(program=main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) out = np.array(outs[0]) if (batch_id + 1) % 10 == 0: avg_cost_set = [] for test_data in test_reader(): avg_cost_np = exe.run(program=test_program, feed=feeder.feed(test_data), fetch_list=[avg_cost]) avg_cost_set.append(avg_cost_np[0]) break # test only 1 segment for speeding up CI # get test avg_cost test_avg_cost = np.array(avg_cost_set).mean() if test_avg_cost < 6.0: # if avg_cost less than 6.0, we think our code is good. if save_dirname is not None: fluid.io.save_inference_model(save_dirname, [ "user_id", "gender_id", "age_id", "job_id", "movie_id", "category_id", "movie_title" ], [scale_infer], exe) return if math.isnan(float(out[0])): sys.exit("got NaN loss, training failed.") if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # Use the first data from paddle.dataset.movielens.test() as input assert feed_target_names[0] == "user_id" # Use create_lod_tensor(data, lod, place) API to generate LoD Tensor # where `data` is a list of sequences of index numbers, `lod` is # the level of detail (lod) info associated with `data`. # For example, data = [[10, 2, 3], [2, 3]] means that it contains # two sequences of indexes, of length 3 and 2, respectively. # Correspondingly, lod = [[3, 2]] contains one level of detail info, # indicating that `data` consists of two sequences of length 3 and 2. user_id = fluid.create_lod_tensor([[1]], [[1]], place) assert feed_target_names[1] == "gender_id" gender_id = fluid.create_lod_tensor([[1]], [[1]], place) assert feed_target_names[2] == "age_id" age_id = fluid.create_lod_tensor([[0]], [[1]], place) assert feed_target_names[3] == "job_id" job_id = fluid.create_lod_tensor([[10]], [[1]], place) assert feed_target_names[4] == "movie_id" movie_id = fluid.create_lod_tensor([[783]], [[1]], place) assert feed_target_names[5] == "category_id" category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) assert feed_target_names[6] == "movie_title" movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], [[5]], place) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. results = exe.run(inference_program, feed={ feed_target_names[0]: user_id, feed_target_names[1]: gender_id, feed_target_names[2]: age_id, feed_target_names[3]: job_id, feed_target_names[4]: movie_id, feed_target_names[5]: category_id, feed_target_names[6]: movie_title }, fetch_list=fetch_targets, return_numpy=False) print("inferred score: ", np.array(results[0])) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the inference model save_dirname = "recommender_system.inference.model" train(use_cuda, save_dirname) infer(use_cuda, save_dirname) if __name__ == '__main__': main(USE_GPU)
11,779
35.697819
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_image_classification.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid import contextlib import math import sys import numpy import unittest import os import numpy as np def resnet_cifar10(input, depth=32): def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu', bias_attr=False): tmp = fluid.layers.conv2d( input=input, filter_size=filter_size, num_filters=ch_out, stride=stride, padding=padding, act=None, bias_attr=bias_attr) return fluid.layers.batch_norm(input=tmp, act=act) def shortcut(input, ch_in, ch_out, stride): if ch_in != ch_out: return conv_bn_layer(input, ch_out, 1, stride, 0, None) else: return input def basicblock(input, ch_in, ch_out, stride): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) short = shortcut(input, ch_in, ch_out, stride) return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') def layer_warp(block_func, input, ch_in, ch_out, count, stride): tmp = block_func(input, ch_in, ch_out, stride) for i in range(1, count): tmp = block_func(tmp, ch_out, ch_out, 1) return tmp assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 conv1 = conv_bn_layer( input=input, ch_out=16, filter_size=3, stride=1, padding=1) res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) pool = fluid.layers.pool2d( input=res3, pool_size=8, pool_type='avg', pool_stride=1) return pool def vgg16_bn_drop(input): def conv_block(input, num_filter, groups, dropouts): return fluid.nets.img_conv_group( input=input, pool_size=2, pool_stride=2, conv_num_filter=[num_filter] * groups, conv_filter_size=3, conv_act='relu', conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, pool_type='max') conv1 = conv_block(input, 64, 2, [0.3, 0]) conv2 = conv_block(conv1, 128, 2, [0.4, 0]) conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) fc1 = fluid.layers.fc(input=drop, size=4096, act=None) bn = fluid.layers.batch_norm(input=fc1, act='relu') drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) fc2 = fluid.layers.fc(input=drop2, size=4096, act=None) return fc2 def train(net_type, use_cuda, save_dirname, is_local): classdim = 10 data_shape = [3, 32, 32] images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') if net_type == "vgg": print("train vgg net") net = vgg16_bn_drop(images) elif net_type == "resnet": print("train resnet") net = resnet_cifar10(images, 32) else: raise ValueError("%s network is not supported" % net_type) predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) # Test program test_program = fluid.default_main_program().clone(for_test=True) optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimizer.minimize(avg_cost) BATCH_SIZE = 128 PASS_NUM = 1 train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.cifar.train10(), buf_size=128 * 10), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) def train_loop(main_program): exe.run(fluid.default_startup_program()) loss = 0.0 for pass_id in range(PASS_NUM): for batch_id, data in enumerate(train_reader()): exe.run(main_program, feed=feeder.feed(data)) if (batch_id % 10) == 0: acc_list = [] avg_loss_list = [] for tid, test_data in enumerate(test_reader()): loss_t, acc_t = exe.run(program=test_program, feed=feeder.feed(test_data), fetch_list=[avg_cost, acc]) if math.isnan(float(loss_t)): sys.exit("got NaN loss, training failed.") acc_list.append(float(acc_t)) avg_loss_list.append(float(loss_t)) break # Use 1 segment for speeding up CI acc_value = numpy.array(acc_list).mean() avg_loss_value = numpy.array(avg_loss_list).mean() print( 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. format(pass_id, batch_id + 1, float(avg_loss_value), float(acc_value))) if acc_value > 0.01: # Low threshold for speeding up CI fluid.io.save_inference_model(save_dirname, ["pixel"], [predict], exe) return if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [0, 1.0]. batch_size = 1 tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32") # Use inference_transpiler to speedup inference_transpiler_program = inference_program.clone() t = fluid.InferenceTranspiler() t.transpile(inference_transpiler_program, place) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) transpiler_results = exe.run(inference_transpiler_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) assert len(results[0]) == len(transpiler_results[0]) for i in range(len(results[0])): np.testing.assert_almost_equal( results[0][i], transpiler_results[0][i], decimal=5) print("infer results: ", results[0]) fluid.io.save_inference_model(save_dirname, feed_target_names, fetch_targets, exe, inference_transpiler_program) def main(net_type, use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "image_classification_" + net_type + ".inference.model" train(net_type, use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) class TestImageClassification(unittest.TestCase): def test_vgg_cuda(self): with self.scope_prog_guard(): main('vgg', use_cuda=True) def test_resnet_cuda(self): with self.scope_prog_guard(): main('resnet', use_cuda=True) def test_vgg_cpu(self): with self.scope_prog_guard(): main('vgg', use_cuda=False) def test_resnet_cpu(self): with self.scope_prog_guard(): main('resnet', use_cuda=False) @contextlib.contextmanager def scope_prog_guard(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield if __name__ == '__main__': unittest.main()
10,966
36.687285
91
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
612
42.785714
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_recognize_digits.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse import paddle.fluid as fluid import paddle import sys import numpy import unittest import math import sys import os BATCH_SIZE = 64 def loss_net(hidden, label): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) avg_loss = fluid.layers.mean(loss) acc = fluid.layers.accuracy(input=prediction, label=label) return prediction, avg_loss, acc def mlp(img, label): hidden = fluid.layers.fc(input=img, size=200, act='tanh') hidden = fluid.layers.fc(input=hidden, size=200, act='tanh') return loss_net(hidden, label) def conv_net(img, label): conv_pool_1 = fluid.nets.simple_img_conv_pool( input=img, filter_size=5, num_filters=20, pool_size=2, pool_stride=2, act="relu") conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, num_filters=50, pool_size=2, pool_stride=2, act="relu") return loss_net(conv_pool_2, label) def train(nn_type, use_cuda, parallel, save_dirname=None, model_filename=None, params_filename=None, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') if nn_type == 'mlp': net_conf = mlp else: net_conf = conv_net if parallel: places = fluid.layers.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): img_ = pd.read_input(img) label_ = pd.read_input(label) prediction, avg_loss, acc = net_conf(img_, label_) for o in [avg_loss, acc]: pd.write_output(o) avg_loss, acc = pd() # get mean loss and acc through every devices. avg_loss = fluid.layers.mean(avg_loss) acc = fluid.layers.mean(acc) else: prediction, avg_loss, acc = net_conf(img, label) test_program = fluid.default_main_program().clone(for_test=True) optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimizer.minimize(avg_loss) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=500), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) feeder = fluid.DataFeeder(feed_list=[img, label], place=place) def train_loop(main_program): exe.run(fluid.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): for batch_id, data in enumerate(train_reader()): # train a mini-batch, fetch nothing exe.run(main_program, feed=feeder.feed(data)) if (batch_id + 1) % 10 == 0: acc_set = [] avg_loss_set = [] for test_data in test_reader(): acc_np, avg_loss_np = exe.run( program=test_program, feed=feeder.feed(test_data), fetch_list=[acc, avg_loss]) acc_set.append(float(acc_np)) avg_loss_set.append(float(avg_loss_np)) # get test acc and loss acc_val = numpy.array(acc_set).mean() avg_loss_val = numpy.array(avg_loss_set).mean() if float(acc_val ) > 0.2: # Smaller value to increase CI speed if save_dirname is not None: fluid.io.save_inference_model( save_dirname, ["img"], [prediction], exe, model_filename=model_filename, params_filename=params_filename) return else: print( 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. format(pass_id, batch_id + 1, float(avg_loss_val), float(acc_val))) if math.isnan(float(avg_loss_val)): sys.exit("got NaN loss, training failed.") raise AssertionError("Loss of recognize digits is too large") if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None, model_filename=None, params_filename=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model( save_dirname, exe, model_filename, params_filename) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. batch_size = 1 tensor_img = numpy.random.uniform( -1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32") # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) print("infer results: ", results[0]) def main(use_cuda, parallel, nn_type, combine): save_dirname = None model_filename = None params_filename = None if not use_cuda and not parallel: save_dirname = "recognize_digits_" + nn_type + ".inference.model" if combine == True: model_filename = "__model_combined__" params_filename = "__params_combined__" # call train() with is_local argument to run distributed train train( nn_type=nn_type, use_cuda=use_cuda, parallel=parallel, save_dirname=save_dirname, model_filename=model_filename, params_filename=params_filename) infer( use_cuda=use_cuda, save_dirname=save_dirname, model_filename=model_filename, params_filename=params_filename) class TestRecognizeDigits(unittest.TestCase): pass def inject_test_method(use_cuda, parallel, nn_type, combine): def __impl__(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): main(use_cuda, parallel, nn_type, combine) fn = 'test_{0}_{1}_{2}_{3}'.format(nn_type, 'cuda' if use_cuda else 'cpu', 'parallel' if parallel else 'normal', 'combine' if combine else 'separate') setattr(TestRecognizeDigits, fn, __impl__) def inject_all_tests(): for use_cuda in (False, True): for parallel in (False, True): for nn_type in ('mlp', 'conv'): inject_test_method(use_cuda, parallel, nn_type, True) # Two unit-test for saving parameters as separate files inject_test_method(False, False, 'mlp', False) inject_test_method(False, False, 'conv', False) inject_all_tests() if __name__ == '__main__': unittest.main()
9,963
35.767528
94
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/notest_understand_sentiment.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle.fluid as fluid import paddle import contextlib import math import numpy as np import sys import os def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, hid_dim=32): emb = fluid.layers.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=3, act="tanh", pool_type="sqrt") conv_4 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=4, act="tanh", pool_type="sqrt") prediction = fluid.layers.fc(input=[conv_3, conv_4], size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32, lstm_size=128): emb = fluid.layers.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh') rnn = fluid.layers.DynamicRNN() with rnn.block(): word = rnn.step_input(sentence) prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) def gate_common(ipt, hidden, size): gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) return gate0 + gate1 forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) cell = forget_gate * prev_cell + input_gate * cell_gate hidden = output_gate * fluid.layers.tanh(x=cell) rnn.update_memory(prev_cell, cell) rnn.update_memory(prev_hidden, hidden) rnn.output(hidden) last = fluid.layers.sequence_last_step(rnn()) prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction def stacked_lstm_net(data, label, input_dim, class_dim=2, emb_dim=128, hid_dim=512, stacked_num=3): assert stacked_num % 2 == 1 emb = fluid.layers.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) # add bias attr # TODO(qijun) linear act fc1 = fluid.layers.fc(input=emb, size=hid_dim) lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) inputs = [fc1, lstm1] for i in range(2, stacked_num + 1): fc = fluid.layers.fc(input=inputs, size=hid_dim) lstm, cell = fluid.layers.dynamic_lstm( input=fc, size=hid_dim, is_reverse=(i % 2) == 0) inputs = [fc, lstm] fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') prediction = fluid.layers.fc(input=[fc_last, lstm_last], size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None, is_local=True): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) class_dim = 2 data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") if not parallel: cost, acc_out, prediction = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) else: places = fluid.layers.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): cost, acc, _ = net_method( pd.read_input(data), pd.read_input(label), input_dim=dict_dim, class_dim=class_dim) pd.write_output(cost) pd.write_output(acc) cost, acc = pd() cost = fluid.layers.mean(cost) acc_out = fluid.layers.mean(acc) prediction = None assert save_dirname is None adagrad = fluid.optimizer.Adagrad(learning_rate=0.002) adagrad.minimize(cost) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=1000), batch_size=BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[data, label], place=place) def train_loop(main_program): exe.run(fluid.default_startup_program()) for pass_id in xrange(PASS_NUM): for data in train_data(): cost_val, acc_val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[cost, acc_out]) print("cost=" + str(cost_val) + " acc=" + str(acc_val)) if cost_val < 0.4 and acc_val > 0.8: if save_dirname is not None: fluid.io.save_inference_model(save_dirname, ["words"], prediction, exe) return if math.isnan(float(cost_val)): sys.exit("got NaN loss, training failed.") raise AssertionError("Cost is too large for {0}".format( net_method.__name__)) if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(word_dict, use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) word_dict_len = len(word_dict) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # which has only one lod level. Then the created LoDTensor will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for three sentences of # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. lod = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] tensor_words = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=word_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. assert feed_target_names[0] == "words" results = exe.run(inference_program, feed={feed_target_names[0]: tensor_words}, fetch_list=fetch_targets, return_numpy=False) print(results[0].lod()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) print("Inference results: ", np_data) def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): if use_cuda and not fluid.core.is_compiled_with_cuda(): return train( word_dict, net_method, use_cuda, parallel=parallel, save_dirname=save_dirname) infer(word_dict, use_cuda, save_dirname) class TestUnderstandSentiment(unittest.TestCase): @classmethod def setUpClass(cls): cls.word_dict = paddle.dataset.imdb.word_dict() @contextlib.contextmanager def new_program_scope(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield def test_conv_cpu(self): with self.new_program_scope(): main( self.word_dict, net_method=convolution_net, use_cuda=False, save_dirname="understand_sentiment_conv.inference.model") def test_conv_cpu_parallel(self): with self.new_program_scope(): main( self.word_dict, net_method=convolution_net, use_cuda=False, parallel=True) @unittest.skip(reason="make CI faster") def test_stacked_lstm_cpu(self): with self.new_program_scope(): main( self.word_dict, net_method=stacked_lstm_net, use_cuda=False, save_dirname="understand_sentiment_stacked_lstm.inference.model") def test_stacked_lstm_cpu_parallel(self): with self.new_program_scope(): main( self.word_dict, net_method=stacked_lstm_net, use_cuda=False, parallel=True) def test_conv_gpu(self): with self.new_program_scope(): main( self.word_dict, net_method=convolution_net, use_cuda=True, save_dirname="understand_sentiment_conv.inference.model") def test_conv_gpu_parallel(self): with self.new_program_scope(): main( self.word_dict, net_method=convolution_net, use_cuda=True, parallel=True) @unittest.skip(reason="make CI faster") def test_stacked_lstm_gpu(self): with self.new_program_scope(): main( self.word_dict, net_method=stacked_lstm_net, use_cuda=True, save_dirname="understand_sentiment_stacked_lstm.inference.model") def test_stacked_lstm_gpu_parallel(self): with self.new_program_scope(): main( self.word_dict, net_method=stacked_lstm_net, use_cuda=True, parallel=True) @unittest.skip(reason='make CI faster') def test_dynrnn_lstm_gpu(self): with self.new_program_scope(): main( self.word_dict, net_method=dyn_rnn_lstm, use_cuda=True, parallel=False) def test_dynrnn_lstm_gpu_parallel(self): with self.new_program_scope(): main( self.word_dict, net_method=dyn_rnn_lstm, use_cuda=True, parallel=True) if __name__ == '__main__': unittest.main()
14,180
36.416887
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_fit_a_line.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid as fluid import contextlib import numpy import unittest import math import sys import os def train(use_cuda, save_dirname, is_local): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) BATCH_SIZE = 20 train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) def train_loop(main_program): feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe.run(fluid.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): avg_loss_value, = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) print(avg_loss_value) if avg_loss_value[0] < 10.0: if save_dirname is not None: fluid.io.save_inference_model(save_dirname, ['x'], [y_predict], exe) return if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") raise AssertionError("Fit a line cost is too large, {0:2.2}".format( avg_loss_value[0])) if is_local: train_loop(fluid.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): if save_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # The input's dimension should be 2-D and the second dim is 13 # The input data should be >= 0 batch_size = 10 tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") assert feed_target_names[0] == 'x' results = exe.run(inference_program, feed={feed_target_names[0]: tensor_x}, fetch_list=fetch_targets) print("infer shape: ", results[0].shape) print("infer results: ", results[0]) def main(use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "fit_a_line.inference.model" train(use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) class TestFitALine(unittest.TestCase): def test_cpu(self): with self.program_scope_guard(): main(use_cuda=False) def test_cuda(self): with self.program_scope_guard(): main(use_cuda=True) @contextlib.contextmanager def program_scope_guard(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield if __name__ == '__main__': unittest.main()
5,667
35.567742
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/test_machine_translation.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as pd from paddle.fluid.executor import Executor import unittest import os dict_size = 30000 source_dict_dim = target_dict_dim = dict_size hidden_dim = 32 word_dim = 16 batch_size = 2 max_length = 8 topk_size = 50 trg_dic_size = 10000 beam_size = 2 decoder_size = hidden_dim def encoder(is_sparse): # encoder src_word_id = pd.data( name="src_word_id", shape=[1], dtype='int64', lod_level=1) src_embedding = pd.embedding( input=src_word_id, size=[dict_size, word_dim], dtype='float32', is_sparse=is_sparse, param_attr=fluid.ParamAttr(name='vemb')) fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4) encoder_out = pd.sequence_last_step(input=lstm_hidden0) return encoder_out def decoder_train(context, is_sparse): # decoder trg_language_word = pd.data( name="target_language_word", shape=[1], dtype='int64', lod_level=1) trg_embedding = pd.embedding( input=trg_language_word, size=[dict_size, word_dim], dtype='float32', is_sparse=is_sparse, param_attr=fluid.ParamAttr(name='vemb')) rnn = pd.DynamicRNN() with rnn.block(): current_word = rnn.step_input(trg_embedding) pre_state = rnn.memory(init=context) current_state = pd.fc(input=[current_word, pre_state], size=decoder_size, act='tanh') current_score = pd.fc(input=current_state, size=target_dict_dim, act='softmax') rnn.update_memory(pre_state, current_state) rnn.output(current_score) return rnn() def decoder_decode(context, is_sparse): init_state = context array_len = pd.fill_constant(shape=[1], dtype='int64', value=max_length) counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True) # fill the first element with init_state state_array = pd.create_array('float32') pd.array_write(init_state, array=state_array, i=counter) # ids, scores as memory ids_array = pd.create_array('int64') scores_array = pd.create_array('float32') init_ids = pd.data(name="init_ids", shape=[1], dtype="int64", lod_level=2) init_scores = pd.data( name="init_scores", shape=[1], dtype="float32", lod_level=2) pd.array_write(init_ids, array=ids_array, i=counter) pd.array_write(init_scores, array=scores_array, i=counter) cond = pd.less_than(x=counter, y=array_len) while_op = pd.While(cond=cond) with while_op.block(): pre_ids = pd.array_read(array=ids_array, i=counter) pre_state = pd.array_read(array=state_array, i=counter) pre_score = pd.array_read(array=scores_array, i=counter) # expand the lod of pre_state to be the same with pre_score pre_state_expanded = pd.sequence_expand(pre_state, pre_score) pre_ids_emb = pd.embedding( input=pre_ids, size=[dict_size, word_dim], dtype='float32', is_sparse=is_sparse) # use rnn unit to update rnn current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], size=decoder_size, act='tanh') current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) # use score to do beam search current_score = pd.fc(input=current_state_with_lod, size=target_dict_dim, act='softmax') topk_scores, topk_indices = pd.topk(current_score, k=50) selected_ids, selected_scores = pd.beam_search( pre_ids, topk_indices, topk_scores, beam_size, end_id=10, level=0) pd.increment(x=counter, value=1, in_place=True) # update the memories pd.array_write(current_state, array=state_array, i=counter) pd.array_write(selected_ids, array=ids_array, i=counter) pd.array_write(selected_scores, array=scores_array, i=counter) pd.less_than(x=counter, y=array_len, cond=cond) translation_ids, translation_scores = pd.beam_search_decode( ids=ids_array, scores=scores_array) # return init_ids, init_scores return translation_ids, translation_scores def train_main(use_cuda, is_sparse, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() context = encoder(is_sparse) rnn_out = decoder_train(context, is_sparse) label = pd.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = pd.cross_entropy(input=rnn_out, label=label) avg_cost = pd.mean(cost) optimizer = fluid.optimizer.Adagrad( learning_rate=1e-4, regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=0.1)) optimizer.minimize(avg_cost) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) feed_order = [ 'src_word_id', 'target_language_word', 'target_language_next_word' ] exe = Executor(place) def train_loop(main_program): exe.run(framework.default_startup_program()) feed_list = [ main_program.global_block().var(var_name) for var_name in feed_order ] feeder = fluid.DataFeeder(feed_list, place) batch_id = 0 for pass_id in xrange(1): for data in train_data(): outs = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + " avg_cost=" + str(avg_cost_val)) if batch_id > 3: break batch_id += 1 if is_local: train_loop(framework.default_main_program()) else: port = os.getenv("PADDLE_INIT_PORT", "6174") pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) def decode_main(use_cuda, is_sparse): if use_cuda and not fluid.core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() context = encoder(is_sparse) translation_ids, translation_scores = decoder_decode(context, is_sparse) exe = Executor(place) exe.run(framework.default_startup_program()) init_ids_data = np.array([1 for _ in range(batch_size)], dtype='int64') init_scores_data = np.array( [1. for _ in range(batch_size)], dtype='float32') init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) init_lod = [1] * batch_size init_lod = [init_lod, init_lod] init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) feed_order = ['src_word_id'] feed_list = [ framework.default_main_program().global_block().var(var_name) for var_name in feed_order ] feeder = fluid.DataFeeder(feed_list, place) for data in train_data(): feed_dict = feeder.feed(map(lambda x: [x[0]], data)) feed_dict['init_ids'] = init_ids feed_dict['init_scores'] = init_scores result_ids, result_scores = exe.run( framework.default_main_program(), feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) print result_ids.lod() break class TestMachineTranslation(unittest.TestCase): pass @contextlib.contextmanager def scope_prog_guard(): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield def inject_test_train(use_cuda, is_sparse): f_name = 'test_{0}_{1}_train'.format('cuda' if use_cuda else 'cpu', 'sparse' if is_sparse else 'dense') def f(*args): with scope_prog_guard(): train_main(use_cuda, is_sparse) setattr(TestMachineTranslation, f_name, f) def inject_test_decode(use_cuda, is_sparse, decorator=None): f_name = 'test_{0}_{1}_decode'.format('cuda' if use_cuda else 'cpu', 'sparse' if is_sparse else 'dense') def f(*args): with scope_prog_guard(): decode_main(use_cuda, is_sparse) if decorator is not None: f = decorator(f) setattr(TestMachineTranslation, f_name, f) for _use_cuda_ in (False, True): for _is_sparse_ in (False, True): inject_test_train(_use_cuda_, _is_sparse_) for _use_cuda_ in (False, True): for _is_sparse_ in (False, True): _decorator_ = None if _use_cuda_: _decorator_ = unittest.skip( reason='Beam Search does not support CUDA!') inject_test_decode( is_sparse=_is_sparse_, use_cuda=_use_cuda_, decorator=_decorator_) if __name__ == '__main__': unittest.main()
11,282
33.190909
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid as fluid import numpy as np import math import sys from functools import partial PASS_NUM = 100 EMBED_SIZE = 32 HIDDEN_SIZE = 256 N = 5 BATCH_SIZE = 32 word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) def inference_program(is_sparse): first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') embed_first = fluid.layers.embedding( input=first_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=is_sparse, param_attr='shared_w') embed_second = fluid.layers.embedding( input=second_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=is_sparse, param_attr='shared_w') embed_third = fluid.layers.embedding( input=third_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=is_sparse, param_attr='shared_w') embed_forth = fluid.layers.embedding( input=forth_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=is_sparse, param_attr='shared_w') concat_embed = fluid.layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], axis=1) hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') return predict_word def train_program(is_sparse): # The declaration of 'next_word' must be after the invoking of inference_program, # or the data input order of train program would be [next_word, firstw, secondw, # thirdw, forthw], which is not correct. predict_word = inference_program(is_sparse) next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) avg_cost = fluid.layers.mean(cost) return avg_cost def train(use_cuda, train_program, params_dirname): train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def event_handler(event): if isinstance(event, fluid.EndStepEvent): outs = trainer.test( reader=test_reader, feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) avg_cost = outs[0] print("loss= ", avg_cost) if avg_cost < 10.0: trainer.save_params(params_dirname) trainer.stop() if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") trainer = fluid.Trainer( train_func=train_program, optimizer=fluid.optimizer.SGD(learning_rate=0.001), place=place) trainer.train( reader=train_reader, num_epochs=1, event_handler=event_handler, feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( infer_func=inference_program, param_path=params_dirname, place=place) # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word # is simply an index to look up for the corresponding word vector and hence # the shape of word (base_shape) should be [1]. The length-based level of # detail (lod) info of each LoDtensor should be [[1]] meaning there is only # one lod_level and there is only one sequence of one word on this level. # Note that lod info should be a list of lists. lod = [[1]] base_shape = [1] # The range of random integers is [low, high] first_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) second_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) third_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) fourth_word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=dict_size - 1) result = inferencer.infer( { 'firstw': first_word, 'secondw': second_word, 'thirdw': third_word, 'forthw': fourth_word }, return_numpy=False) print(np.array(result[0])) def main(use_cuda, is_sparse): if use_cuda and not fluid.core.is_compiled_with_cuda(): return params_dirname = "word2vec.inference.model" train( use_cuda=use_cuda, train_program=partial(train_program, is_sparse), params_dirname=params_dirname) infer( use_cuda=use_cuda, inference_program=partial(inference_program, is_sparse), params_dirname=params_dirname) if __name__ == '__main__': for use_cuda in (False, True): for is_sparse in (False, True): main(use_cuda=use_cuda, is_sparse=is_sparse)
6,078
34.138728
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid import numpy as np WORD_DICT, VERB_DICT, LABEL_DICT = paddle.dataset.conll05.get_dict() WORD_DICT_LEN = len(WORD_DICT) LABEL_DICT_LEN = len(LABEL_DICT) PRED_DICT_LEN = len(VERB_DICT) MARK_DICT_LEN = 2 IS_SPARSE = True BATCH_SIZE = 10 EMBEDDING_NAME = 'emb' def lstm_net(): WORD_DIM = 32 MARK_DIM = 5 HIDDEN_DIM = 512 DEPTH = 8 # Data definitions word = fluid.layers.data( name='word_data', shape=[1], dtype='int64', lod_level=1) predicate = fluid.layers.data( name='verb_data', shape=[1], dtype='int64', lod_level=1) ctx_n2 = fluid.layers.data( name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) ctx_n1 = fluid.layers.data( name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) ctx_0 = fluid.layers.data( name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) ctx_p1 = fluid.layers.data( name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) ctx_p2 = fluid.layers.data( name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) mark = fluid.layers.data( name='mark_data', shape=[1], dtype='int64', lod_level=1) # 8 features predicate_embedding = fluid.layers.embedding( input=predicate, size=[PRED_DICT_LEN, WORD_DIM], dtype='float32', is_sparse=IS_SPARSE, param_attr='vemb') mark_embedding = fluid.layers.embedding( input=mark, size=[MARK_DICT_LEN, MARK_DIM], dtype='float32', is_sparse=IS_SPARSE) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ fluid.layers.embedding( size=[WORD_DICT_LEN, WORD_DIM], input=x, param_attr=fluid.ParamAttr(name=EMBEDDING_NAME)) for x in word_input #name=EMBEDDING_NAME, trainable=False)) for x in word_input ] emb_layers.append(predicate_embedding) emb_layers.append(mark_embedding) hidden_0_layers = [ fluid.layers.fc(input=emb, size=HIDDEN_DIM, act='tanh') for emb in emb_layers ] hidden_0 = fluid.layers.sums(input=hidden_0_layers) lstm_0 = fluid.layers.dynamic_lstm( input=hidden_0, size=HIDDEN_DIM, candidate_activation='relu', gate_activation='sigmoid', cell_activation='sigmoid') # stack L-LSTM and R-LSTM with direct edges input_tmp = [hidden_0, lstm_0] for i in range(1, DEPTH): mix_hidden = fluid.layers.sums(input=[ fluid.layers.fc(input=input_tmp[0], size=HIDDEN_DIM, act='tanh'), fluid.layers.fc(input=input_tmp[1], size=HIDDEN_DIM, act='tanh') ]) lstm = fluid.layers.dynamic_lstm( input=mix_hidden, size=HIDDEN_DIM, candidate_activation='relu', gate_activation='sigmoid', cell_activation='sigmoid', is_reverse=((i % 2) == 1)) input_tmp = [mix_hidden, lstm] feature_out = fluid.layers.sums(input=[ fluid.layers.fc(input=input_tmp[0], size=LABEL_DICT_LEN, act='tanh'), fluid.layers.fc(input=input_tmp[1], size=LABEL_DICT_LEN, act='tanh') ]) return feature_out def inference_program(): predict = lstm_net() return predict def train_program(): MIX_HIDDEN_LR = 1e-3 predict = lstm_net() target = fluid.layers.data( name='target', shape=[1], dtype='int64', lod_level=1) crf_cost = fluid.layers.linear_chain_crf( input=predict, label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=MIX_HIDDEN_LR)) avg_cost = fluid.layers.mean(crf_cost) return [avg_cost] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.SGD(learning_rate=0.01) trainer = fluid.Trainer( train_func=train_program, place=place, optimizer=optimizer) feed_order = [ 'word_data', 'ctx_n2_data', 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', 'ctx_p2_data', 'verb_data', 'mark_data', 'target' ] #embedding_param = fluid.global_scope().find_var( # EMBEDDING_NAME).get_tensor() #embedding_param.set( # load_parameter(conll05.get_embedding(), WORD_DICT_LEN, WORD_DIM), # place) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): test_reader = paddle.batch( paddle.dataset.conll05.test(), batch_size=BATCH_SIZE) avg_cost_set = trainer.test( reader=test_reader, feed_order=feed_order) # get avg cost avg_cost = np.array(avg_cost_set).mean() print("avg_cost: %s" % avg_cost) if float(avg_cost) < 100.0: # Large value to increase CI speed trainer.save_params(params_dirname) else: print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, float(avg_cost))) if math.isnan(float(avg_cost)): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): print("Step {0}, Epoch {1} Metrics {2}".format( event.step, event.epoch, map(np.array, event.metrics))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=feed_order) def infer(use_cuda, inference_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( inference_program, param_path=params_dirname, place=place) # Setup inputs by creating LoDTensors to represent sequences of words. # Here each word is the basic element of these LoDTensors and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # which has only one lod level. Then the created LoDTensors will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for three sentences of # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. lod = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_n2 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_n1 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_0 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_p1 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_p2 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) pred = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=PRED_DICT_LEN - 1) mark = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=MARK_DICT_LEN - 1) results = inferencer.infer( { 'word_data': word, 'ctx_n2_data': ctx_n2, 'ctx_n1_data': ctx_n1, 'ctx_0_data': ctx_0, 'ctx_p1_data': ctx_p1, 'ctx_p2_data': ctx_p2, 'verb_data': pred, 'mark_data': mark }, return_numpy=False) print("infer results: ", np.array(results[0])) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return params_dirname = "label_semantic_roles.inference.model" train(use_cuda, train_program, params_dirname) infer(use_cuda, inference_program, params_dirname) if __name__ == '__main__': for use_cuda in (False, True): main(use_cuda=use_cuda)
9,145
33.908397
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid from functools import partial import numpy as np CLASS_DIM = 2 EMB_DIM = 128 BATCH_SIZE = 128 LSTM_SIZE = 128 def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): emb = fluid.layers.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh') rnn = fluid.layers.DynamicRNN() with rnn.block(): word = rnn.step_input(sentence) prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) def gate_common(ipt, hidden, size): gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) return gate0 + gate1 forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, lstm_size)) cell = forget_gate * prev_cell + input_gate * cell_gate hidden = output_gate * fluid.layers.tanh(x=cell) rnn.update_memory(prev_cell, cell) rnn.update_memory(prev_hidden, hidden) rnn.output(hidden) last = fluid.layers.sequence_last_step(rnn()) prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") return prediction def inference_program(word_dict): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) dict_dim = len(word_dict) pred = dynamic_rnn_lstm(data, dict_dim, CLASS_DIM, EMB_DIM, LSTM_SIZE) return pred def train_program(word_dict): prediction = inference_program(word_dict) label = fluid.layers.data(name="label", shape=[1], dtype="int64") cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return [avg_cost, accuracy] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) word_dict = paddle.dataset.imdb.word_dict() trainer = fluid.Trainer( train_func=partial(train_program, word_dict), place=place, optimizer=optimizer) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): test_reader = paddle.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) avg_cost, acc = trainer.test( reader=test_reader, feed_order=['words', 'label']) print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( event.epoch + 1, avg_cost, acc)) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): print("Step {0}, Epoch {1} Metrics {2}".format( event.step, event.epoch, map(np.array, event.metrics))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=['words', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() word_dict = paddle.dataset.imdb.word_dict() inferencer = fluid.Inferencer( infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # which has only one lod level. Then the created LoDTensor will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for three sentences of # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. lod = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] tensor_words = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) print("infer results: ", results) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return params_dirname = "understand_sentiment_conv.inference.model" train(use_cuda, train_program, params_dirname) infer(use_cuda, inference_program, params_dirname) if __name__ == '__main__': for use_cuda in (False, True): main(use_cuda=use_cuda)
6,544
37.727811
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid from functools import partial import numpy as np CLASS_DIM = 2 EMB_DIM = 128 HID_DIM = 512 STACKED_NUM = 3 BATCH_SIZE = 128 def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): assert stacked_num % 2 == 1 emb = fluid.layers.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) fc1 = fluid.layers.fc(input=emb, size=hid_dim) lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) inputs = [fc1, lstm1] for i in range(2, stacked_num + 1): fc = fluid.layers.fc(input=inputs, size=hid_dim) lstm, cell = fluid.layers.dynamic_lstm( input=fc, size=hid_dim, is_reverse=(i % 2) == 0) inputs = [fc, lstm] fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') prediction = fluid.layers.fc(input=[fc_last, lstm_last], size=class_dim, act='softmax') return prediction def inference_program(word_dict): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) dict_dim = len(word_dict) net = stacked_lstm_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM, STACKED_NUM) return net def train_program(word_dict): prediction = inference_program(word_dict) label = fluid.layers.data(name="label", shape=[1], dtype="int64") cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return [avg_cost, accuracy] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) word_dict = paddle.dataset.imdb.word_dict() trainer = fluid.Trainer( train_func=partial(train_program, word_dict), place=place, optimizer=optimizer) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): test_reader = paddle.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) avg_cost, acc = trainer.test( reader=test_reader, feed_order=['words', 'label']) print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( event.epoch + 1, avg_cost, acc)) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): print("Step {0}, Epoch {1} Metrics {2}".format( event.step, event.epoch, map(np.array, event.metrics))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=['words', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() word_dict = paddle.dataset.imdb.word_dict() inferencer = fluid.Inferencer( infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # which has only one lod level. Then the created LoDTensor will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for three sentences of # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. lod = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] tensor_words = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) print("infer results: ", results) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return params_dirname = "understand_sentiment_stacked_lstm.inference.model" train(use_cuda, train_program, params_dirname) infer(use_cuda, inference_program, params_dirname) if __name__ == '__main__': for use_cuda in (False, True): main(use_cuda=use_cuda)
5,878
35.515528
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid from functools import partial import numpy as np CLASS_DIM = 2 EMB_DIM = 128 HID_DIM = 512 BATCH_SIZE = 128 def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): emb = fluid.layers.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=3, act="tanh", pool_type="sqrt") conv_4 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=4, act="tanh", pool_type="sqrt") prediction = fluid.layers.fc(input=[conv_3, conv_4], size=class_dim, act="softmax") return prediction def inference_program(word_dict): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) dict_dim = len(word_dict) net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM) return net def train_program(word_dict): prediction = inference_program(word_dict) label = fluid.layers.data(name="label", shape=[1], dtype="int64") cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return [avg_cost, accuracy] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) word_dict = paddle.dataset.imdb.word_dict() trainer = fluid.Trainer( train_func=partial(train_program, word_dict), place=place, optimizer=optimizer) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): test_reader = paddle.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) avg_cost, acc = trainer.test( reader=test_reader, feed_order=['words', 'label']) print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( event.epoch + 1, avg_cost, acc)) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): print("Step {0}, Epoch {1} Metrics {2}".format( event.step, event.epoch, map(np.array, event.metrics))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=['words', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() word_dict = paddle.dataset.imdb.word_dict() inferencer = fluid.Inferencer( infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of # each word (base_shape) should be [1] since it is simply an index to # look up for the corresponding word vector. # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # which has only one lod level. Then the created LoDTensor will have only # one higher level structure (sequence of words, or sentence) than the basic # element (word). Hence the LoDTensor will hold data for three sentences of # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. lod = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] tensor_words = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) print("infer results: ", results) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return params_dirname = "understand_sentiment_conv.inference.model" train(use_cuda, train_program, params_dirname) infer(use_cuda, inference_program, params_dirname) if __name__ == '__main__': for use_cuda in (False, True): main(use_cuda=use_cuda)
5,546
35.019481
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid as fluid import contextlib import numpy import unittest # train reader BATCH_SIZE = 20 train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), batch_size=BATCH_SIZE) def inference_program(): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) return y_predict def linear(): y = fluid.layers.data(name='y', shape=[1], dtype='float32') y_predict = inference_program() loss = fluid.layers.square_error_cost(input=y_predict, label=y) avg_loss = fluid.layers.mean(loss) return avg_loss def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( train_func=train_program, place=place, optimizer=fluid.optimizer.SGD(learning_rate=0.001)) def event_handler(event): if isinstance(event, fluid.EndStepEvent): if event.step == 10: test_metrics = trainer.test( reader=test_reader, feed_order=['x', 'y']) print test_metrics ''' ... ['25.768919467926025'] ['15.343549569447836'] ... ''' if params_dirname is not None: trainer.save_params(params_dirname) trainer.stop() trainer.train( reader=train_reader, num_epochs=100, event_handler=event_handler, feed_order=['x', 'y']) # infer def infer(use_cuda, inference_program, params_dirname=None): if params_dirname is None: return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( infer_func=inference_program, param_path=params_dirname, place=place) batch_size = 10 tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") results = inferencer.infer({'x': tensor_x}) print("infer results: ", results[0]) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model params_dirname = "fit_a_line.inference.model" train(use_cuda, linear, params_dirname) infer(use_cuda, inference_program, params_dirname) class TestFitALine(unittest.TestCase): def test_cpu(self): with self.program_scope_guard(): with fluid.unique_name.guard(): main(use_cuda=False) def test_cuda(self): with self.program_scope_guard(): with fluid.unique_name.guard(): main(use_cuda=True) @contextlib.contextmanager def program_scope_guard(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield if __name__ == '__main__': unittest.main()
3,843
28.121212
78
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid import numpy import cifar10_small_test_set def vgg16_bn_drop(input): def conv_block(input, num_filter, groups, dropouts): return fluid.nets.img_conv_group( input=input, pool_size=2, pool_stride=2, conv_num_filter=[num_filter] * groups, conv_filter_size=3, conv_act='relu', conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, pool_type='max') conv1 = conv_block(input, 64, 2, [0.3, 0]) conv2 = conv_block(conv1, 128, 2, [0.4, 0]) conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) fc1 = fluid.layers.fc(input=drop, size=4096, act=None) bn = fluid.layers.batch_norm(input=fc1, act='relu') drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) fc2 = fluid.layers.fc(input=drop2, size=4096, act=None) predict = fluid.layers.fc(input=fc2, size=10, act='softmax') return predict def inference_network(): data_shape = [3, 32, 32] images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') predict = vgg16_bn_drop(images) return predict def train_network(): predict = inference_network() label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) return [avg_cost, accuracy] def train(use_cuda, train_program, params_dirname): BATCH_SIZE = 128 train_reader = paddle.batch( paddle.reader.shuffle( cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) def event_handler(event): if isinstance(event, fluid.EndStepEvent): avg_cost, accuracy = trainer.test( reader=test_reader, feed_order=['pixel', 'label']) print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) if accuracy > 0.01: # Low threshold for speeding up CI if params_dirname is not None: trainer.save_params(params_dirname) return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( train_func=train_program, place=place, optimizer=fluid.optimizer.Adam(learning_rate=0.001)) trainer.train( reader=train_reader, num_epochs=1, event_handler=event_handler, feed_order=['pixel', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( infer_func=inference_program, param_path=params_dirname, place=place) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range # [0, 1.0]. tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") results = inferencer.infer({'pixel': tensor_img}) print("infer results: ", results) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_vgg.inference.model" train( use_cuda=use_cuda, train_program=train_network, params_dirname=save_path) infer( use_cuda=use_cuda, inference_program=inference_network, params_dirname=save_path) if __name__ == '__main__': for use_cuda in (False, True): main(use_cuda=use_cuda)
4,520
32.488889
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid import numpy import cifar10_small_test_set def resnet_cifar10(input, depth=32): def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu', bias_attr=False): tmp = fluid.layers.conv2d( input=input, filter_size=filter_size, num_filters=ch_out, stride=stride, padding=padding, act=None, bias_attr=bias_attr) return fluid.layers.batch_norm(input=tmp, act=act) def shortcut(input, ch_in, ch_out, stride): if ch_in != ch_out: return conv_bn_layer(input, ch_out, 1, stride, 0, None) else: return input def basicblock(input, ch_in, ch_out, stride): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) short = shortcut(input, ch_in, ch_out, stride) return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') def layer_warp(block_func, input, ch_in, ch_out, count, stride): tmp = block_func(input, ch_in, ch_out, stride) for i in range(1, count): tmp = block_func(tmp, ch_out, ch_out, 1) return tmp assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 conv1 = conv_bn_layer( input=input, ch_out=16, filter_size=3, stride=1, padding=1) res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) pool = fluid.layers.pool2d( input=res3, pool_size=8, pool_type='avg', pool_stride=1) predict = fluid.layers.fc(input=pool, size=10, act='softmax') return predict def inference_network(): data_shape = [3, 32, 32] images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') predict = resnet_cifar10(images, 32) return predict def train_network(): predict = inference_network() label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) return [avg_cost, accuracy] def train(use_cuda, train_program, params_dirname): BATCH_SIZE = 128 EPOCH_NUM = 1 train_reader = paddle.batch( paddle.reader.shuffle( cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) def event_handler(event): if isinstance(event, fluid.EndStepEvent): avg_cost, accuracy = trainer.test( reader=test_reader, feed_order=['pixel', 'label']) print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) if accuracy > 0.01: # Low threshold for speeding up CI if params_dirname is not None: trainer.save_params(params_dirname) return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( train_func=train_program, optimizer=fluid.optimizer.Adam(learning_rate=0.001), place=place) trainer.train( reader=train_reader, num_epochs=EPOCH_NUM, event_handler=event_handler, feed_order=['pixel', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( infer_func=inference_program, param_path=params_dirname, place=place) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range # [0, 1.0]. tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") results = inferencer.infer({'pixel': tensor_img}) print("infer results: ", results) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_resnet.inference.model" train( use_cuda=use_cuda, train_program=train_network, params_dirname=save_path) infer( use_cuda=use_cuda, inference_program=inference_network, params_dirname=save_path) if __name__ == '__main__': for use_cuda in (False, True): main(use_cuda=use_cuda)
5,284
32.449367
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CIFAR dataset. This module will download dataset from https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into paddle reader creators. The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes containing 600 images each. There are 500 training images and 100 testing images per class. """ import cPickle import itertools import numpy import paddle.v2.dataset.common import tarfile __all__ = ['train10'] URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/' CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a' def reader_creator(filename, sub_name, batch_size=None): def read_batch(batch): data = batch['data'] labels = batch.get('labels', batch.get('fine_labels', None)) assert labels is not None for sample, label in itertools.izip(data, labels): yield (sample / 255.0).astype(numpy.float32), int(label) def reader(): with tarfile.open(filename, mode='r') as f: names = (each_item.name for each_item in f if sub_name in each_item.name) batch_count = 0 for name in names: batch = cPickle.load(f.extractfile(name)) for item in read_batch(batch): if isinstance(batch_size, int) and batch_count > batch_size: break batch_count += 1 yield item return reader def train10(batch_size=None): """ CIFAR-10 training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Training reader creator :rtype: callable """ return reader_creator( paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'data_batch', batch_size=batch_size)
2,662
31.084337
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse import paddle.fluid as fluid import paddle import sys import numpy import unittest import math import sys import os BATCH_SIZE = 64 def inference_program(): img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') hidden = fluid.layers.fc(input=img, size=200, act='tanh') hidden = fluid.layers.fc(input=hidden, size=200, act='tanh') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') return prediction def train_program(): label = fluid.layers.data(name='label', shape=[1], dtype='int64') predict = inference_program() cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) return [avg_cost, acc] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.Adam(learning_rate=0.001) trainer = fluid.Trainer( train_func=train_program, place=place, optimizer=optimizer) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) avg_cost, acc = trainer.test( reader=test_reader, feed_order=['img', 'label']) print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) else: print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( event.epoch + 1, avg_cost, acc)) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=500), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=['img', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( infer_func=inference_program, param_path=params_dirname, place=place) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32") results = inferencer.infer({'img': tensor_img}) print("infer results: ", results[0]) def main(use_cuda): params_dirname = "recognize_digits_mlp.inference.model" # call train() with is_local argument to run distributed train train( use_cuda=use_cuda, train_program=train_program, params_dirname=params_dirname) infer( use_cuda=use_cuda, inference_program=inference_program, params_dirname=params_dirname) if __name__ == '__main__': # for use_cuda in (False, True): main(use_cuda=False)
3,731
31.172414
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse import paddle.fluid as fluid import paddle import sys import numpy import unittest import math import sys import os BATCH_SIZE = 64 def inference_program(): img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') conv_pool_1 = fluid.nets.simple_img_conv_pool( input=img, filter_size=5, num_filters=20, pool_size=2, pool_stride=2, act="relu") conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, num_filters=50, pool_size=2, pool_stride=2, act="relu") prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') return prediction def train_program(): label = fluid.layers.data(name='label', shape=[1], dtype='int64') predict = inference_program() cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) return [avg_cost, acc] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.Adam(learning_rate=0.001) trainer = fluid.Trainer( train_func=train_program, place=place, optimizer=optimizer, parallel=True) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) avg_cost, acc = trainer.test( reader=test_reader, feed_order=['img', 'label']) print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) else: print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( event.epoch + 1, avg_cost, acc)) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): print("Step {0}, Epoch {1} Metrics {2}".format( event.step, event.epoch, map(numpy.array, event.metrics))) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=500), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=['img', 'label']) def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( infer_func=inference_program, param_path=params_dirname, place=place) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32") results = inferencer.infer({'img': tensor_img}) print("infer results: ", results[0]) def main(use_cuda): params_dirname = "recognize_digits_conv.inference.model" # call train() with is_local argument to run distributed train train( use_cuda=use_cuda, train_program=train_program, params_dirname=params_dirname) infer( use_cuda=use_cuda, inference_program=inference_program, params_dirname=params_dirname) if __name__ == '__main__': # for use_cuda in (False, True): main(use_cuda=True)
4,260
30.562963
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import sys import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers import paddle.fluid.nets as nets IS_SPARSE = True USE_GPU = False BATCH_SIZE = 256 def get_usr_combined_features(): # FIXME(dzh) : old API integer_value(10) may have range check. # currently we don't have user configurated check. USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 uid = layers.data(name='user_id', shape=[1], dtype='int64') usr_emb = layers.embedding( input=uid, dtype='float32', size=[USR_DICT_SIZE, 32], param_attr='user_table', is_sparse=IS_SPARSE) usr_fc = layers.fc(input=usr_emb, size=32) USR_GENDER_DICT_SIZE = 2 usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') usr_gender_emb = layers.embedding( input=usr_gender_id, size=[USR_GENDER_DICT_SIZE, 16], param_attr='gender_table', is_sparse=IS_SPARSE) usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") usr_age_emb = layers.embedding( input=usr_age_id, size=[USR_AGE_DICT_SIZE, 16], is_sparse=IS_SPARSE, param_attr='age_table') usr_age_fc = layers.fc(input=usr_age_emb, size=16) USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") usr_job_emb = layers.embedding( input=usr_job_id, size=[USR_JOB_DICT_SIZE, 16], param_attr='job_table', is_sparse=IS_SPARSE) usr_job_fc = layers.fc(input=usr_job_emb, size=16) concat_embed = layers.concat( input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") return usr_combined_features def get_mov_combined_features(): MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') mov_emb = layers.embedding( input=mov_id, dtype='float32', size=[MOV_DICT_SIZE, 32], param_attr='movie_table', is_sparse=IS_SPARSE) mov_fc = layers.fc(input=mov_emb, size=32) CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) category_id = layers.data( name='category_id', shape=[1], dtype='int64', lod_level=1) mov_categories_emb = layers.embedding( input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) mov_categories_hidden = layers.sequence_pool( input=mov_categories_emb, pool_type="sum") MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) mov_title_id = layers.data( name='movie_title', shape=[1], dtype='int64', lod_level=1) mov_title_emb = layers.embedding( input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) mov_title_conv = nets.sequence_conv_pool( input=mov_title_emb, num_filters=32, filter_size=3, act="tanh", pool_type="sum") concat_embed = layers.concat( input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) # FIXME(dzh) : need tanh operator mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") return mov_combined_features def inference_program(): usr_combined_features = get_usr_combined_features() mov_combined_features = get_mov_combined_features() inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) scale_infer = layers.scale(x=inference, scale=5.0) return scale_infer def train_program(): scale_infer = inference_program() label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) avg_cost = layers.mean(square_cost) return [avg_cost, scale_infer] def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.SGD(learning_rate=0.2) trainer = fluid.Trainer( train_func=train_program, place=place, optimizer=optimizer) feed_order = [ 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', 'movie_title', 'score' ] def event_handler(event): if isinstance(event, fluid.EndStepEvent): test_reader = paddle.batch( paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) avg_cost_set = trainer.test( reader=test_reader, feed_order=feed_order) # get avg cost avg_cost = np.array(avg_cost_set).mean() print("avg_cost: %s" % avg_cost) if float(avg_cost) < 4: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, float(avg_cost))) if math.isnan(float(avg_cost)): sys.exit("got NaN loss, training failed.") train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.movielens.train(), buf_size=8192), batch_size=BATCH_SIZE) trainer.train( num_epochs=1, event_handler=event_handler, reader=train_reader, feed_order=feed_order) def infer(use_cuda, inference_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( inference_program, param_path=params_dirname, place=place) # Use the first data from paddle.dataset.movielens.test() as input. # Use create_lod_tensor(data, lod, place) API to generate LoD Tensor, # where `data` is a list of sequences of index numbers, `lod` is # the level of detail (lod) info associated with `data`. # For example, data = [[10, 2, 3], [2, 3]] means that it contains # two sequences of indexes, of length 3 and 2, respectively. # Correspondingly, lod = [[3, 2]] contains one level of detail info, # indicating that `data` consists of two sequences of length 3 and 2. user_id = fluid.create_lod_tensor([[1]], [[1]], place) gender_id = fluid.create_lod_tensor([[1]], [[1]], place) age_id = fluid.create_lod_tensor([[0]], [[1]], place) job_id = fluid.create_lod_tensor([[10]], [[1]], place) movie_id = fluid.create_lod_tensor([[783]], [[1]], place) category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], [[5]], place) results = inferencer.infer( { 'user_id': user_id, 'gender_id': gender_id, 'age_id': age_id, 'job_id': job_id, 'movie_id': movie_id, 'category_id': category_id, 'movie_title': movie_title }, return_numpy=False) print("infer results: ", np.array(results[0])) def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return params_dirname = "recommender_system.inference.model" train( use_cuda=use_cuda, train_program=train_program, params_dirname=params_dirname) infer( use_cuda=use_cuda, inference_program=inference_program, params_dirname=params_dirname) if __name__ == '__main__': main(USE_GPU)
8,367
31.6875
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as pd from paddle.fluid.executor import Executor from functools import partial import unittest import os dict_size = 30000 source_dict_dim = target_dict_dim = dict_size hidden_dim = 32 word_dim = 16 batch_size = 2 max_length = 8 topk_size = 50 trg_dic_size = 10000 beam_size = 2 decoder_size = hidden_dim def encoder(is_sparse): # encoder src_word_id = pd.data( name="src_word_id", shape=[1], dtype='int64', lod_level=1) src_embedding = pd.embedding( input=src_word_id, size=[dict_size, word_dim], dtype='float32', is_sparse=is_sparse, param_attr=fluid.ParamAttr(name='vemb')) fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4) encoder_out = pd.sequence_last_step(input=lstm_hidden0) return encoder_out def train_decoder(context, is_sparse): # decoder trg_language_word = pd.data( name="target_language_word", shape=[1], dtype='int64', lod_level=1) trg_embedding = pd.embedding( input=trg_language_word, size=[dict_size, word_dim], dtype='float32', is_sparse=is_sparse, param_attr=fluid.ParamAttr(name='vemb')) rnn = pd.DynamicRNN() with rnn.block(): current_word = rnn.step_input(trg_embedding) pre_state = rnn.memory(init=context) current_state = pd.fc(input=[current_word, pre_state], size=decoder_size, act='tanh') current_score = pd.fc(input=current_state, size=target_dict_dim, act='softmax') rnn.update_memory(pre_state, current_state) rnn.output(current_score) return rnn() def decode(context, is_sparse): init_state = context array_len = pd.fill_constant(shape=[1], dtype='int64', value=max_length) counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True) # fill the first element with init_state state_array = pd.create_array('float32') pd.array_write(init_state, array=state_array, i=counter) # ids, scores as memory ids_array = pd.create_array('int64') scores_array = pd.create_array('float32') init_ids = pd.data(name="init_ids", shape=[1], dtype="int64", lod_level=2) init_scores = pd.data( name="init_scores", shape=[1], dtype="float32", lod_level=2) pd.array_write(init_ids, array=ids_array, i=counter) pd.array_write(init_scores, array=scores_array, i=counter) cond = pd.less_than(x=counter, y=array_len) while_op = pd.While(cond=cond) with while_op.block(): pre_ids = pd.array_read(array=ids_array, i=counter) pre_state = pd.array_read(array=state_array, i=counter) pre_score = pd.array_read(array=scores_array, i=counter) # expand the lod of pre_state to be the same with pre_score pre_state_expanded = pd.sequence_expand(pre_state, pre_score) pre_ids_emb = pd.embedding( input=pre_ids, size=[dict_size, word_dim], dtype='float32', is_sparse=is_sparse) # use rnn unit to update rnn current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], size=decoder_size, act='tanh') current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) # use score to do beam search current_score = pd.fc(input=current_state_with_lod, size=target_dict_dim, act='softmax') topk_scores, topk_indices = pd.topk(current_score, k=topk_size) selected_ids, selected_scores = pd.beam_search( pre_ids, topk_indices, topk_scores, beam_size, end_id=10, level=0) pd.increment(x=counter, value=1, in_place=True) # update the memories pd.array_write(current_state, array=state_array, i=counter) pd.array_write(selected_ids, array=ids_array, i=counter) pd.array_write(selected_scores, array=scores_array, i=counter) pd.less_than(x=counter, y=array_len, cond=cond) translation_ids, translation_scores = pd.beam_search_decode( ids=ids_array, scores=scores_array) # return init_ids, init_scores return translation_ids, translation_scores def train_program(is_sparse): context = encoder(is_sparse) rnn_out = train_decoder(context, is_sparse) label = pd.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = pd.cross_entropy(input=rnn_out, label=label) avg_cost = pd.mean(cost) return avg_cost def train(use_cuda, is_sparse, is_local=True): EPOCH_NUM = 1 if use_cuda and not fluid.core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) feed_order = [ 'src_word_id', 'target_language_word', 'target_language_next_word' ] def event_handler(event): if isinstance(event, fluid.EndStepEvent): print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step)) if event.step == 10: trainer.stop() trainer = fluid.Trainer( train_func=partial(train_program, is_sparse), optimizer=fluid.optimizer.Adagrad( learning_rate=1e-4, regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=0.1)), place=place) trainer.train( reader=train_reader, num_epochs=EPOCH_NUM, event_handler=event_handler, feed_order=feed_order) def decode_main(use_cuda, is_sparse): if use_cuda and not fluid.core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() context = encoder(is_sparse) translation_ids, translation_scores = decode(context, is_sparse) exe = Executor(place) exe.run(framework.default_startup_program()) init_ids_data = np.array([1 for _ in range(batch_size)], dtype='int64') init_scores_data = np.array( [1. for _ in range(batch_size)], dtype='float32') init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) init_lod = [1] * batch_size init_lod = [init_lod, init_lod] init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) feed_order = ['src_word_id'] feed_list = [ framework.default_main_program().global_block().var(var_name) for var_name in feed_order ] feeder = fluid.DataFeeder(feed_list, place) for data in train_data(): feed_dict = feeder.feed(map(lambda x: [x[0]], data)) feed_dict['init_ids'] = init_ids feed_dict['init_scores'] = init_scores result_ids, result_scores = exe.run( framework.default_main_program(), feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) print result_ids.lod() break class TestMachineTranslation(unittest.TestCase): pass @contextlib.contextmanager def scope_prog_guard(): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): yield def inject_test_train(use_cuda, is_sparse): f_name = 'test_{0}_{1}_train'.format('cuda' if use_cuda else 'cpu', 'sparse' if is_sparse else 'dense') def f(*args): with scope_prog_guard(): train(use_cuda, is_sparse) setattr(TestMachineTranslation, f_name, f) def inject_test_decode(use_cuda, is_sparse, decorator=None): f_name = 'test_{0}_{1}_decode'.format('cuda' if use_cuda else 'cpu', 'sparse' if is_sparse else 'dense') def f(*args): with scope_prog_guard(): decode_main(use_cuda, is_sparse) if decorator is not None: f = decorator(f) setattr(TestMachineTranslation, f_name, f) for _use_cuda_ in (False, True): for _is_sparse_ in (False, True): inject_test_train(_use_cuda_, _is_sparse_) for _use_cuda_ in (False, True): for _is_sparse_ in (False, True): _decorator_ = None if _use_cuda_: _decorator_ = unittest.skip( reason='Beam Search does not support CUDA!') inject_test_decode( is_sparse=_is_sparse_, use_cuda=_use_cuda_, decorator=_decorator_) if __name__ == '__main__': unittest.main()
9,890
31.643564
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework import paddle.fluid.layers as layers from paddle.fluid.executor import Executor import math import sys dict_size = 30000 source_dict_dim = target_dict_dim = dict_size src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) hidden_dim = 32 word_dim = 16 IS_SPARSE = True batch_size = 10 max_length = 50 topk_size = 50 trg_dic_size = 10000 decoder_size = hidden_dim # need to fix random seed and training data to compare the loss # value accurately calculated by the default and the memory optimization # version. fluid.default_startup_program().random_seed = 111 def encoder_decoder(): # encoder src_word_id = layers.data( name="src_word_id", shape=[1], dtype='int64', lod_level=1) src_embedding = layers.embedding( input=src_word_id, size=[dict_size, word_dim], dtype='float32', is_sparse=IS_SPARSE, param_attr=fluid.ParamAttr(name='vemb')) fc1 = fluid.layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4) encoder_out = layers.sequence_last_step(input=lstm_hidden0) # decoder trg_language_word = layers.data( name="target_language_word", shape=[1], dtype='int64', lod_level=1) trg_embedding = layers.embedding( input=trg_language_word, size=[dict_size, word_dim], dtype='float32', is_sparse=IS_SPARSE, param_attr=fluid.ParamAttr(name='vemb')) rnn = fluid.layers.DynamicRNN() with rnn.block(): current_word = rnn.step_input(trg_embedding) mem = rnn.memory(init=encoder_out) fc1 = fluid.layers.fc(input=[current_word, mem], size=decoder_size, act='tanh') out = fluid.layers.fc(input=fc1, size=target_dict_dim, act='softmax') rnn.update_memory(mem, fc1) rnn.output(out) return rnn() def to_lodtensor(data, place): seq_lens = [len(seq) for seq in data] cur_len = 0 lod = [cur_len] for l in seq_lens: cur_len += l lod.append(cur_len) flattened_data = np.concatenate(data, axis=0).astype("int64") flattened_data = flattened_data.reshape([len(flattened_data), 1]) res = core.LoDTensor() res.set(flattened_data, place) res.set_lod([lod]) return res def main(): rnn_out = encoder_decoder() label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) # fluid.memory_optimize(fluid.default_main_program()) fluid.release_memory(fluid.default_main_program()) # fix the order of training data train_data = paddle.batch( paddle.dataset.wmt14.train(dict_size), batch_size=batch_size) # train_data = paddle.batch( # paddle.reader.shuffle( # paddle.dataset.wmt14.train(dict_size), buf_size=1000), # batch_size=batch_size) place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) batch_id = 0 for pass_id in xrange(10): for data in train_data(): word_data = to_lodtensor(map(lambda x: x[0], data), place) trg_word = to_lodtensor(map(lambda x: x[1], data), place) trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(fluid.default_main_program(), feed={ 'src_word_id': word_data, 'target_language_word': trg_word, 'target_language_next_word': trg_word_next }, fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + " avg_cost=" + str(avg_cost_val)) if batch_id > 2: exit(0) if math.isnan(float(avg_cost_val)): sys.exit("got NaN loss, training failed.") batch_id += 1 if __name__ == '__main__': main()
5,038
32.593333
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import sys import paddle import paddle.fluid as fluid import math import sys # need to fix random seed and training data to compare the loss # value accurately calculated by the default and the memory optimization # version. fluid.default_startup_program().random_seed = 111 def resnet_cifar10(input, depth=32): def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): tmp = fluid.layers.conv2d( input=input, filter_size=filter_size, num_filters=ch_out, stride=stride, padding=padding, act=None, bias_attr=False) return fluid.layers.batch_norm(input=tmp, act=act) def shortcut(input, ch_in, ch_out, stride): if ch_in != ch_out: return conv_bn_layer(input, ch_out, 1, stride, 0, None) else: return input def basicblock(input, ch_in, ch_out, stride): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None) short = shortcut(input, ch_in, ch_out, stride) return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') def layer_warp(block_func, input, ch_in, ch_out, count, stride): tmp = block_func(input, ch_in, ch_out, stride) for i in range(1, count): tmp = block_func(tmp, ch_out, ch_out, 1) return tmp assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 conv1 = conv_bn_layer( input=input, ch_out=16, filter_size=3, stride=1, padding=1) res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) pool = fluid.layers.pool2d( input=res3, pool_size=8, pool_type='avg', pool_stride=1) return pool def vgg16_bn_drop(input): def conv_block(input, num_filter, groups, dropouts): return fluid.nets.img_conv_group( input=input, pool_size=2, pool_stride=2, conv_num_filter=[num_filter] * groups, conv_filter_size=3, conv_act='relu', conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, pool_type='max') conv1 = conv_block(input, 64, 2, [0.3, 0]) conv2 = conv_block(conv1, 128, 2, [0.4, 0]) conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) fc1 = fluid.layers.fc(input=drop, size=4096, act=None) bn = fluid.layers.batch_norm(input=fc1, act='relu') drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) fc2 = fluid.layers.fc(input=drop2, size=4096, act=None) return fc2 classdim = 10 data_shape = [3, 32, 32] images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') net_type = "vgg" if len(sys.argv) >= 2: net_type = sys.argv[1] if net_type == "vgg": print("train vgg net") net = vgg16_bn_drop(images) elif net_type == "resnet": print("train resnet") net = resnet_cifar10(images, 32) else: raise ValueError("%s network is not supported" % net_type) predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) opts = optimizer.minimize(avg_cost) batch_size = fluid.layers.create_tensor(dtype='int64') batch_acc = fluid.layers.accuracy(input=predict, label=label, total=batch_size) # fluid.memory_optimize(fluid.default_main_program(), level=0) fluid.release_memory(fluid.default_main_program()) BATCH_SIZE = 16 PASS_NUM = 1 # fix the order of training data train_reader = paddle.batch( paddle.dataset.cifar.train10(), batch_size=BATCH_SIZE) # train_reader = paddle.batch( # paddle.reader.shuffle( # paddle.dataset.cifar.train10(), buf_size=128 * 10), # batch_size=BATCH_SIZE) place = fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) exe.run(fluid.default_startup_program()) i = 0 accuracy = fluid.average.WeightedAverage() for pass_id in range(PASS_NUM): accuracy.reset() for data in train_reader(): loss, acc, weight = exe.run( fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost, batch_acc, batch_size]) accuracy.add(value=acc, weight=weight) pass_acc = accuracy.eval() print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( pass_acc)) # this model is slow, so if we can train two mini batch, we think it works properly. if i > 0: exit(0) if math.isnan(float(loss)): sys.exit("got NaN loss, training failed.") i += 1 exit(1)
5,674
32.579882
92
py
Paddle
Paddle-master/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import paddle import paddle.fluid as fluid import math import sys # need to fix random seed and training data to compare the loss # value accurately calculated by the default and the memory optimization # version. fluid.default_startup_program().random_seed = 111 x = fluid.layers.data(name='x', shape=[13], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32') device_type = 'CPU' use_nccl = False place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): device_type = 'CUDA' use_nccl = False place = fluid.CUDAPlace(0) places = fluid.layers.get_places(device_count=0, device_type=device_type) pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) with pd.do(): x_ = pd.read_input(x) y_ = pd.read_input(y) y_predict = fluid.layers.fc(input=x_, size=1, act=None) cost = fluid.layers.square_error_cost(input=y_predict, label=y_) avg_cost = fluid.layers.mean(x=cost) pd.write_output(avg_cost) cost = pd() avg_cost = fluid.layers.mean(x=cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) sgd_optimizer.minimize(avg_cost) fluid.memory_optimize(fluid.default_main_program(), print_log=True) # fluid.release_memory(fluid.default_main_program()) BATCH_SIZE = 200 # fix the order of training data train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) # train_reader = paddle.batch( # paddle.reader.shuffle( # paddle.dataset.uci_housing.train(), buf_size=500), # batch_size=BATCH_SIZE) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): avg_loss_value, = exe.run(fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) if avg_loss_value[0] < 10.0: exit(0) # if avg cost less than 10.0, we think our code is good. print avg_loss_value[0] if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") exit(1)
2,795
32.285714
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/demo/fc_gan.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import math import os import matplotlib import numpy import paddle import paddle.fluid as fluid matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec NOISE_SIZE = 100 NUM_PASS = 1000 NUM_REAL_IMGS_IN_BATCH = 121 NUM_TRAIN_TIMES_OF_DG = 3 LEARNING_RATE = 2e-5 def D(x): hidden = fluid.layers.fc(input=x, size=200, act='relu', param_attr='D.w1', bias_attr='D.b1') logits = fluid.layers.fc(input=hidden, size=1, act=None, param_attr='D.w2', bias_attr='D.b2') return logits def G(x): hidden = fluid.layers.fc(input=x, size=200, act='relu', param_attr='G.w1', bias_attr='G.b1') img = fluid.layers.fc(input=hidden, size=28 * 28, act='tanh', param_attr='G.w2', bias_attr='G.b2') return img def plot(gen_data): gen_data.resize(gen_data.shape[0], 28, 28) n = int(math.ceil(math.sqrt(gen_data.shape[0]))) fig = plt.figure(figsize=(n, n)) gs = gridspec.GridSpec(n, n) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(gen_data): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') return fig def main(): try: os.makedirs("./out") except OSError as e: if e.errno != errno.EEXIST: raise startup_program = fluid.Program() d_program = fluid.Program() dg_program = fluid.Program() with fluid.program_guard(d_program, startup_program): img = fluid.layers.data(name='img', shape=[784], dtype='float32') d_loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=D(img), label=fluid.layers.data( name='label', shape=[1], dtype='float32')) d_loss = fluid.layers.mean(d_loss) with fluid.program_guard(dg_program, startup_program): noise = fluid.layers.data( name='noise', shape=[NOISE_SIZE], dtype='float32') g_img = G(x=noise) g_program = dg_program.clone() dg_loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=D(g_img), label=fluid.layers.fill_constant_batch_size_like( input=noise, dtype='float32', shape=[-1, 1], value=1.0)) dg_loss = fluid.layers.mean(dg_loss) opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) opt.minimize(loss=d_loss, startup_program=startup_program) opt.minimize( loss=dg_loss, startup_program=startup_program, parameter_list=[ p.name for p in g_program.global_block().all_parameters() ]) exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) num_true = NUM_REAL_IMGS_IN_BATCH train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=60000), batch_size=num_true) for pass_id in range(NUM_PASS): for batch_id, data in enumerate(train_reader()): num_true = len(data) n = numpy.random.uniform( low=-1.0, high=1.0, size=[num_true * NOISE_SIZE]).astype('float32').reshape( [num_true, NOISE_SIZE]) generated_img = exe.run(g_program, feed={'noise': n}, fetch_list={g_img})[0] real_data = numpy.array(map(lambda x: x[0], data)).astype('float32') real_data = real_data.reshape(num_true, 784) total_data = numpy.concatenate([real_data, generated_img]) total_label = numpy.concatenate([ numpy.ones( shape=[real_data.shape[0], 1], dtype='float32'), numpy.zeros( shape=[real_data.shape[0], 1], dtype='float32') ]) d_loss_np = exe.run(d_program, feed={'img': total_data, 'label': total_label}, fetch_list={d_loss})[0] for _ in xrange(NUM_TRAIN_TIMES_OF_DG): n = numpy.random.uniform( low=-1.0, high=1.0, size=[2 * num_true * NOISE_SIZE]).astype('float32').reshape( [2 * num_true, NOISE_SIZE, 1, 1]) dg_loss_np = exe.run(dg_program, feed={'noise': n}, fetch_list={dg_loss})[0] print("Pass ID={0}, Batch ID={1}, D-Loss={2}, DG-Loss={3}".format( pass_id, batch_id, d_loss_np, dg_loss_np)) # generate image each batch fig = plot(generated_img) plt.savefig( 'out/{0}.png'.format(str(pass_id).zfill(3)), bbox_inches='tight') plt.close(fig) if __name__ == '__main__': main()
5,964
33.680233
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import paddle.fluid as fluid import paddle.v2 as paddle def load_vocab(filename): """ load vocabulary """ vocab = {} with open(filename) as f: wid = 0 for line in f: vocab[line.strip()] = wid wid += 1 return vocab # load word dict with paddle inner function word_dict = load_vocab(sys.argv[1]) word_dict["<unk>"] = len(word_dict) print "Dict dim = ", len(word_dict) # input text data data = fluid.layers.data(name="words", shape=[1], dtype="int64", lod_level=1) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # like placeholder feeder = fluid.DataFeeder(feed_list=[data, label], place=fluid.CPUPlace()) # train data set BATCH_SIZE = 128 train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=10000), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) fluid.recordio_writer.convert_reader_to_recordio_file( "train.recordio", feeder=feeder, reader_creator=train_reader) fluid.recordio_writer.convert_reader_to_recordio_file( "test.recordio", feeder=feeder, reader_creator=test_reader)
1,844
29.75
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/demo/text_classification/train.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid as fluid import numpy import sys TRAIN_FILES = ['train.recordio'] TEST_FILES = ['test.recordio'] DICT_DIM = 89528 # embedding dim emb_dim = 128 # hidden dim hid_dim = 128 # hidden dim2 hid_dim2 = 96 # class num class_dim = 2 def network_cfg(is_train, pass_num=100): with fluid.unique_name.guard(): train_file_obj = fluid.layers.open_files( filenames=TRAIN_FILES, pass_num=pass_num, shapes=[[-1, 1], [-1, 1]], lod_levels=[1, 0], dtypes=['int64', 'int64'], thread_num=1) test_file_obj = fluid.layers.open_files( filenames=TEST_FILES, pass_num=1, shapes=[[-1, 1], [-1, 1]], lod_levels=[1, 0], dtypes=['int64', 'int64'], thread_num=1) if is_train: file_obj = fluid.layers.shuffle(train_file_obj, buffer_size=1000) else: file_obj = test_file_obj file_obj = fluid.layers.double_buffer( file_obj, name="train_double_buffer" if is_train else 'test_double_buffer') data, label = fluid.layers.read_file(file_obj) emb = fluid.layers.embedding(input=data, size=[DICT_DIM, emb_dim]) # sequence conv with window size = 3 win_size = 3 conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=win_size, act="tanh", pool_type="max") # fc layer after conv fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) # probability of each class prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act="softmax") # cross entropy loss cost = fluid.layers.cross_entropy(input=prediction, label=label) # mean loss avg_cost = fluid.layers.mean(x=cost) acc = fluid.layers.accuracy(input=prediction, label=label) if is_train: # SGD optimizer sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.01) sgd_optimizer.minimize(avg_cost) return { 'loss': avg_cost, 'log': [avg_cost, acc], 'file': train_file_obj if is_train else test_file_obj } def main(): train = fluid.Program() startup = fluid.Program() with fluid.program_guard(train, startup): train_args = network_cfg(is_train=True) test = fluid.Program() with fluid.program_guard(test, fluid.Program()): test_args = network_cfg(is_train=False) # startup place = fluid.CUDAPlace(0) exe = fluid.Executor(place=place) exe.run(startup) train_exe = fluid.ParallelExecutor( use_cuda=True, loss_name=train_args['loss'].name, main_program=train) fetch_var_list = [var.name for var in train_args['log']] for i in xrange(sys.maxint): result = map(numpy.array, train_exe.run(fetch_list=fetch_var_list if i % 1000 == 0 else [])) if len(result) != 0: print 'Train: ', result if i % 1000 == 0: test_exe = fluid.ParallelExecutor( use_cuda=True, main_program=test, share_vars_from=train_exe) loss = [] acc = [] try: while True: loss_np, acc_np = map( numpy.array, test_exe.run(fetch_list=fetch_var_list)) loss.append(loss_np[0]) acc.append(acc_np[0]) except: test_args['file'].reset() print 'TEST: ', numpy.mean(loss), numpy.mean(acc) if __name__ == '__main__': main()
4,426
28.711409
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_gather_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestGatherOp(OpTest): def setUp(self): self.op_type = "gather" xnp = np.random.random((10, 20)).astype("float32") self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == "__main__": unittest.main()
1,148
30.916667
78
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class ElementwiseDivOp(OpTest): def setUp(self): self.op_type = "elementwise_div" """ Warning CPU gradient check error! 'X': np.random.random((32,84)).astype("float32"), 'Y': np.random.random((32,84)).astype("float32") """ self.inputs = { 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X")) def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) class TestElementwiseDivOp_scalar(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32), 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) } self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} class TestElementwiseDivOp_Vector(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [32]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [32]).astype("float32") } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [2]).astype("float32") } self.attrs = {'axis': 0} self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1)) } class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [3]).astype("float32") } self.attrs = {'axis': 1} self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1)) } class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [4]).astype("float32") } self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4)) } class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [3, 4]).astype("float32") } self.attrs = {'axis': 1} self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1)) } if __name__ == '__main__': unittest.main()
4,232
31.813953
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestElementwiseOp(OpTest): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) class TestElementwiseSubOp_scalar(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 4).astype(np.float32), 'Y': np.random.rand(1).astype(np.float32) } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.random((32, )).astype("float32"), 'Y': np.random.random((32, )).astype("float32") } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 4).astype(np.float32), 'Y': np.random.rand(2).astype(np.float32) } self.attrs = {'axis': 0} self.outputs = { 'Out': self.inputs['X'] - self.inputs['Y'].reshape(2, 1, 1) } class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 4).astype(np.float32), 'Y': np.random.rand(3).astype(np.float32) } self.attrs = {'axis': 1} self.outputs = { 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 3, 1) } class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 4).astype(np.float32), 'Y': np.random.rand(4).astype(np.float32) } self.outputs = { 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 4) } class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), 'Y': np.random.rand(3, 4).astype(np.float32) } self.attrs = {'axis': 1} self.outputs = { 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 3, 4, 1) } if __name__ == '__main__': unittest.main()
3,817
30.816667
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_layers.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle.fluid.layers as layers import paddle.fluid.nets as nets from paddle.fluid.framework import Program, program_guard, default_main_program from paddle.fluid.param_attr import ParamAttr import decorators class TestBook(unittest.TestCase): def test_fit_a_line(self): program = Program() with program_guard(program, startup_program=Program()): x = layers.data(name='x', shape=[13], dtype='float32') y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) def test_recognize_digits_mlp(self): program = Program() with program_guard(program, startup_program=Program()): # Change g_program, so the rest layers use `g_program` images = layers.data(name='pixel', shape=[784], dtype='float32') label = layers.data(name='label', shape=[1], dtype='int32') hidden1 = layers.fc(input=images, size=128, act='relu') hidden2 = layers.fc(input=hidden1, size=64, act='relu') predict = layers.fc(input=[hidden2, hidden1], size=10, act='softmax', param_attr=["sftmax.w1", "sftmax.w2"]) cost = layers.cross_entropy(input=predict, label=label) avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) def test_simple_conv2d(self): program = Program() with program_guard(program, startup_program=Program()): images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32') layers.conv2d(input=images, num_filters=3, filter_size=[4, 4]) print(str(program)) def test_conv2d_transpose(self): program = Program() with program_guard(program): img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') layers.conv2d_transpose(input=img, num_filters=10, output_size=28) print(str(program)) def test_recognize_digits_conv(self): program = Program() with program_guard(program, startup_program=Program()): images = layers.data( name='pixel', shape=[1, 28, 28], dtype='float32') label = layers.data(name='label', shape=[1], dtype='int32') conv_pool_1 = nets.simple_img_conv_pool( input=images, filter_size=5, num_filters=2, pool_size=2, pool_stride=2, act="relu") conv_pool_2 = nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, num_filters=4, pool_size=2, pool_stride=2, act="relu") predict = layers.fc(input=conv_pool_2, size=10, act="softmax") cost = layers.cross_entropy(input=predict, label=label) avg_cost = layers.mean(cost) print(str(program)) def test_word_embedding(self): program = Program() with program_guard(program, startup_program=Program()): dict_size = 10000 embed_size = 32 first_word = layers.data(name='firstw', shape=[1], dtype='int64') second_word = layers.data(name='secondw', shape=[1], dtype='int64') third_word = layers.data(name='thirdw', shape=[1], dtype='int64') forth_word = layers.data(name='forthw', shape=[1], dtype='int64') next_word = layers.data(name='nextw', shape=[1], dtype='int64') embed_first = layers.embedding( input=first_word, size=[dict_size, embed_size], dtype='float32', param_attr='shared_w') embed_second = layers.embedding( input=second_word, size=[dict_size, embed_size], dtype='float32', param_attr='shared_w') embed_third = layers.embedding( input=third_word, size=[dict_size, embed_size], dtype='float32', param_attr='shared_w') embed_forth = layers.embedding( input=forth_word, size=[dict_size, embed_size], dtype='float32', param_attr='shared_w') concat_embed = layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], axis=1) hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid') predict_word = layers.fc(input=hidden1, size=dict_size, act='softmax') cost = layers.cross_entropy(input=predict_word, label=next_word) avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) def test_linear_chain_crf(self): program = Program() with program_guard(program, startup_program=Program()): label_dict_len = 10 images = layers.data(name='pixel', shape=[784], dtype='float32') label = layers.data(name='label', shape=[1], dtype='int32') hidden = layers.fc(input=images, size=128) crf = layers.linear_chain_crf( input=hidden, label=label, param_attr=ParamAttr(name="crfw")) crf_decode = layers.crf_decoding( input=hidden, param_attr=ParamAttr(name="crfw")) layers.chunk_eval( input=crf_decode, label=label, chunk_scheme="IOB", num_chunk_types=(label_dict_len - 1) / 2) self.assertFalse(crf is None) self.assertFalse(crf_decode is None) print(str(program)) def test_sigmoid_cross_entropy(self): program = Program() with program_guard(program): dat = layers.data(name='data', shape=[10], dtype='float32') lbl = layers.data(name='label', shape=[10], dtype='float32') self.assertIsNotNone( layers.sigmoid_cross_entropy_with_logits( x=dat, label=lbl)) print(str(program)) def test_sequence_expand(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[10], dtype='float32') y = layers.data( name='y', shape=[10, 20], dtype='float32', lod_level=2) self.assertIsNotNone(layers.sequence_expand(x=x, y=y, ref_level=1)) print(str(program)) def test_lstm_unit(self): program = Program() with program_guard(program): x_t_data = layers.data( name='x_t_data', shape=[10, 10], dtype='float32') x_t = layers.fc(input=x_t_data, size=10) prev_hidden_data = layers.data( name='prev_hidden_data', shape=[10, 30], dtype='float32') prev_hidden = layers.fc(input=prev_hidden_data, size=30) prev_cell_data = layers.data( name='prev_cell', shape=[10, 30], dtype='float32') prev_cell = layers.fc(input=prev_cell_data, size=30) self.assertIsNotNone( layers.lstm_unit( x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell)) print(str(program)) def test_dynamic_lstmp(self): program = Program() with program_guard(program): hidden_dim, proj_dim = 16, 8 seq_data = layers.data( name='seq_data', shape=[10, 10], dtype='float32', lod_level=1) fc_out = layers.fc(input=seq_data, size=4 * hidden_dim) self.assertIsNotNone( layers.dynamic_lstmp( input=fc_out, size=4 * hidden_dim, proj_size=proj_dim)) print(str(program)) def test_sequence_softmax(self): program = Program() with program_guard(program): seq_data = layers.data( name='seq_data', shape=[10, 10], dtype='float32', lod_level=1) seq = layers.fc(input=seq_data, size=20) self.assertIsNotNone(layers.sequence_softmax(seq)) print(str(program)) def test_softmax(self): program = Program() with program_guard(program): data = layers.data(name='data', shape=[10], dtype='float32') hid = layers.fc(input=data, size=20) self.assertIsNotNone(layers.softmax(hid)) print(str(program)) def test_lrn(self): program = Program() with program_guard(program): data = layers.data(name='data', shape=[6, 2, 2], dtype='float32') self.assertIsNotNone(layers.lrn(data)) print(str(program)) def test_get_places(self): program = Program() with program_guard(program): x = layers.get_places(device_count=4) self.assertIsNotNone(x) print(str(program)) def test_sequence_reshape(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1) out = layers.sequence_reshape(input=x, new_dim=16) self.assertIsNotNone(out) print(str(program)) def test_im2sequence(self): print("test_im2sequence") program = Program() with program_guard(program): x = layers.data(name='x', shape=[3, 128, 128], dtype='float32') output = layers.im2sequence( input=x, stride=[1, 1], filter_size=[2, 2]) self.assertIsNotNone(output) print(str(program)) @decorators.prog_scope() def test_nce(self): window_size = 5 words = [] for i in xrange(window_size): words.append( layers.data( name='word_{0}'.format(i), shape=[1], dtype='int64')) dict_size = 10000 label_word = int(window_size / 2) + 1 embs = [] for i in xrange(window_size): if i == label_word: continue emb = layers.embedding( input=words[i], size=[dict_size, 32], param_attr='emb.w', is_sparse=True) embs.append(emb) embs = layers.concat(input=embs, axis=1) loss = layers.nce(input=embs, label=words[label_word], num_total_classes=dict_size, param_attr='nce.w', bias_attr='nce.b') avg_loss = layers.mean(loss) self.assertIsNotNone(avg_loss) print(str(default_main_program())) def test_row_conv(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1) out = layers.row_conv(input=x, future_context_size=2) self.assertIsNotNone(out) print(str(program)) def test_multiplex(self): program = Program() with program_guard(program): x1 = layers.data(name='x1', shape=[4], dtype='float32') x2 = layers.data(name='x2', shape=[4], dtype='float32') index = layers.data(name='index', shape=[1], dtype='int32') out = layers.multiplex(inputs=[x1, x2], index=index) self.assertIsNotNone(out) print(str(program)) def test_softmax_with_cross_entropy(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[16], dtype='float32') y = layers.data(name='label', shape=[1], dtype='int64') loss = layers.softmax_with_cross_entropy(x, y) self.assertIsNotNone(loss) print(str(program)) def test_smooth_l1(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[4], dtype='float32') y = layers.data(name='label', shape=[4], dtype='float32') loss = layers.smooth_l1(x, y) self.assertIsNotNone(loss) print(str(program)) def test_lod_reset(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[10], dtype='float32') y = layers.data( name='y', shape=[10, 20], dtype='float32', lod_level=2) print(layers.lod_reset(x=x, y=y)) print(str(program)) def test_label_smooth(self): program = Program() with program_guard(program): label = layers.data(name="label", shape=[1], dtype="float32") one_hot_label = layers.one_hot(input=label, depth=10) smooth_label = layers.label_smooth( label=one_hot_label, epsilon=0.1, dtype="float32") self.assertIsNotNone(smooth_label) print(str(program)) def test_topk(self): program = Program() with program_guard(program): data = layers.data(name="label", shape=[200], dtype="float32") values, indices = layers.topk(data, k=5) self.assertIsNotNone(values) self.assertIsNotNone(indices) print(str(program)) def test_roi_pool(self): program = Program() with program_guard(program): x = layers.data(name="x", shape=[256, 30, 30], dtype="float32") rois = layers.data( name="rois", shape=[4], dtype="float32", lod_level=1) output = layers.roi_pool(x, rois, 7, 7, 0.6) self.assertIsNotNone(output) print(str(program)) def test_upsampling_bilinear2d(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[3, 9, 6], dtype="float32") output = layers.upsampling_bilinear2d(x, out_shape=[12, 12]) self.assertIsNotNone(output) output = layers.upsampling_bilinear2d(x, scale=3) self.assertIsNotNone(output) print(str(program)) if __name__ == '__main__': unittest.main()
15,106
38.238961
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_clip_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestClipOp(OpTest): def setUp(self): self.max_relative_error = 0.006 self.initTestCase() input = np.random.random(self.shape).astype("float32") input[np.abs(input - self.min) < self.max_relative_error] = 0.5 input[np.abs(input - self.max) < self.max_relative_error] = 0.5 self.op_type = "clip" self.inputs = {'X': input, } self.attrs = {} self.attrs['min'] = self.min self.attrs['max'] = self.max self.outputs = { 'Out': np.clip(self.inputs['X'], self.attrs['min'], self.attrs['max']) } def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad( ['X'], 'Out', max_relative_error=self.max_relative_error) def initTestCase(self): self.shape = (4, 4) self.max = 0.7 self.min = 0.1 class TestCase1(TestClipOp): def initTestCase(self): self.shape = (8, 16, 8) self.max = 0.7 self.min = 0.0 class TestCase2(TestClipOp): def initTestCase(self): self.shape = (8, 16) self.max = 1.0 self.min = 0.0 class TestCase3(TestClipOp): def initTestCase(self): self.shape = (4, 8, 16) self.max = 0.7 self.min = 0.2 if __name__ == '__main__': unittest.main()
2,065
27.30137
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_variable.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ import paddle.fluid.core as core import numpy as np class TestVariable(unittest.TestCase): def test_np_dtype_convert(self): DT = core.VarDesc.VarType convert = convert_np_dtype_to_dtype_ self.assertEqual(DT.FP32, convert(np.float32)) self.assertEqual(DT.FP16, convert("float16")) self.assertEqual(DT.FP64, convert("float64")) self.assertEqual(DT.INT32, convert("int32")) self.assertEqual(DT.INT16, convert("int16")) self.assertEqual(DT.INT64, convert("int64")) self.assertEqual(DT.BOOL, convert("bool")) self.assertRaises(ValueError, lambda: convert("int8")) def test_var(self): b = default_main_program().current_block() w = b.create_var( dtype="float64", shape=[784, 100], lod_level=0, name="fc.w") self.assertNotEqual(str(w), "") self.assertEqual(core.VarDesc.VarType.FP64, w.dtype) self.assertEqual((784, 100), w.shape) self.assertEqual("fc.w", w.name) self.assertEqual(0, w.lod_level) w = b.create_var(name='fc.w') self.assertEqual(core.VarDesc.VarType.FP64, w.dtype) self.assertEqual((784, 100), w.shape) self.assertEqual("fc.w", w.name) self.assertEqual(0, w.lod_level) self.assertRaises(ValueError, lambda: b.create_var(name="fc.w", shape=(24, 100))) def test_step_scopes(self): prog = Program() b = prog.current_block() var = b.create_var( name='step_scopes', type=core.VarDesc.VarType.STEP_SCOPES) self.assertEqual(core.VarDesc.VarType.STEP_SCOPES, var.type) if __name__ == '__main__': unittest.main()
2,424
37.492063
92
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_inference_model_io.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core import paddle.fluid.executor as executor import paddle.fluid.layers as layers import paddle.fluid.optimizer as optimizer from paddle.fluid.framework import Program, program_guard from paddle.fluid.io import save_inference_model, load_inference_model class TestBook(unittest.TestCase): def test_fit_line_inference_model(self): MODEL_DIR = "./tmp/inference_model" init_program = Program() program = Program() with program_guard(program, init_program): x = layers.data(name='x', shape=[2], dtype='float32') y = layers.data(name='y', shape=[1], dtype='float32') y_predict = layers.fc(input=x, size=1, act=None) cost = layers.square_error_cost(input=y_predict, label=y) avg_cost = layers.mean(cost) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) sgd_optimizer.minimize(avg_cost, init_program) place = core.CPUPlace() exe = executor.Executor(place) exe.run(init_program, feed={}, fetch_list=[]) for i in xrange(100): tensor_x = np.array( [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32") exe.run(program, feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost]) save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program) expected = exe.run(program, feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost])[0] reload(executor) # reload to build a new scope exe = executor.Executor(place) [infer_prog, feed_var_names, fetch_vars] = load_inference_model( MODEL_DIR, exe) outs = exe.run( infer_prog, feed={feed_var_names[0]: tensor_x, feed_var_names[1]: tensor_y}, fetch_list=fetch_vars) actual = outs[0] self.assertEqual(feed_var_names, ["x", "y"]) self.assertEqual(len(fetch_vars), 1) self.assertEqual(str(fetch_vars[0]), str(avg_cost)) self.assertEqual(expected, actual) if __name__ == '__main__': unittest.main()
3,020
33.329545
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_executor_and_mul.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy import paddle.fluid.core as core from paddle.fluid.executor import Executor from paddle.fluid.layers import mul, data class TestExecutor(unittest.TestCase): def test_mul(self): a = data(name='a', shape=[784], dtype='float32') b = data( name='b', shape=[784, 100], dtype='float32', append_batch_size=False) out = mul(x=a, y=b) place = core.CPUPlace() a_np = numpy.random.random((100, 784)).astype('float32') b_np = numpy.random.random((784, 100)).astype('float32') exe = Executor(place) outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out]) out = outs[0] self.assertEqual((100, 100), out.shape) self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np))) if __name__ == '__main__': unittest.main()
1,496
33.022727
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_adamax_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestAdamaxOp1(OpTest): def setUp(self): '''Test Adamax Operator with supplied attributes ''' self.op_type = "adamax" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The infinity norm is positive inf_norm = np.random.random((102, 105)).astype("float32") learning_rate = 0.002 beta1 = 0.78 beta2 = 0.899 epsilon = 1e-5 beta1_pow = beta1**10 self.inputs = { 'Param': param, 'Grad': grad, 'Moment': moment, 'InfNorm': inf_norm, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32") } self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} param_out, moment_out, inf_norm_out = adamax_step(self.inputs, self.attrs) self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, 'InfNormOut': inf_norm_out } def test_check_output(self): self.check_output() class TestAdamaxOp2(OpTest): '''Test Adamax Operator with default attributes ''' def setUp(self): self.op_type = "adamax" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The infinity norm is positive inf_norm = np.random.random((102, 105)).astype("float32") learning_rate = 0.002 beta1 = 0.9 beta2 = 0.999 epsilon = 1e-8 beta1_pow = beta1**8 self.inputs = { 'Param': param, 'Grad': grad, 'Moment': moment, 'InfNorm': inf_norm, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32") } attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} param_out, moment_out, inf_norm_out = adamax_step(self.inputs, attrs) self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, 'InfNormOut': inf_norm_out } def test_check_output(self): self.check_output() class TestAdamaxOpMultipleSteps(OpTest): def setUp(self): '''Test Adamax Operator with supplied attributes ''' self.op_type = "adamax" self.num_steps = 10 param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The infinity norm is positive inf_norm = np.random.random((102, 105)).astype("float32") learning_rate = 0.002 beta1 = 0.8 beta2 = 0.99 epsilon = 1e-5 beta1_pow = 1 self.inputs = { 'Param': param, 'Grad': grad, 'Moment': moment, 'InfNorm': inf_norm, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32") } self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} def test_check_output(self): for _ in range(self.num_steps): param_out, moment_out, inf_norm_out = adamax_step(self.inputs, self.attrs) self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, 'InfNormOut': inf_norm_out } # Verify output for this step self.check_output() # Output of this step becomes input for next step self.inputs['Param'] = param_out self.inputs['Moment'] = moment_out self.inputs['InfNorm'] = inf_norm_out # Update Beta1 Power accumulator for next step self.inputs['Beta1Pow'] *= self.attrs['beta1'] # Randomize gradient for next step self.inputs['Grad'] = np.random.uniform( -1, 1, (102, 105)).astype("float32") def adamax_step(inputs, attributes): ''' Simulate one step of the adamax optimizer :param inputs: dict of inputs :param attributes: dict of attributes :return tuple: tuple of output param, moment, inf_norm and beta1 power accumulator ''' param = inputs['Param'] grad = inputs['Grad'] moment = inputs['Moment'] inf_norm = inputs['InfNorm'] lr = inputs['LearningRate'] beta1_pow = inputs['Beta1Pow'] beta1 = attributes['beta1'] beta2 = attributes['beta2'] epsilon = attributes['epsilon'] moment_out = beta1 * moment + (1 - beta1) * grad inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad)) lr_t = (lr / (1 - beta1_pow)) param_out = param - lr_t * np.divide(moment_out, inf_norm_out) return param_out, moment_out, inf_norm_out if __name__ == "__main__": unittest.main()
6,038
31.294118
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid as fluid import paddle import paddle.dataset.mnist as mnist class TestMultipleReader(unittest.TestCase): def setUp(self): self.batch_size = 64 self.pass_num = 3 # Convert mnist to recordio file with fluid.program_guard(fluid.Program(), fluid.Program()): data_file = paddle.batch(mnist.train(), batch_size=self.batch_size) feeder = fluid.DataFeeder( feed_list=[ fluid.layers.data( name='image', shape=[784]), fluid.layers.data( name='label', shape=[1], dtype='int64'), ], place=fluid.CPUPlace()) self.num_batch = fluid.recordio_writer.convert_reader_to_recordio_file( './mnist.recordio', data_file, feeder) def test_main(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data_file = fluid.layers.open_recordio_file( filename='./mnist.recordio', shapes=[(-1, 784), (-1, 1)], lod_levels=[0, 0], dtypes=['float32', 'int64'], pass_num=self.pass_num) img, label = fluid.layers.read_file(data_file) if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) batch_count = 0 while True: try: img_val, = exe.run(fetch_list=[img]) except fluid.core.EnforceNotMet as ex: self.assertIn("There is no next data.", ex.message) break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) self.assertEqual(batch_count, self.num_batch * self.pass_num)
2,613
37.441176
83
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_lstmp_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import unittest import numpy as np import test_lstm_op as LstmTest ACTIVATION = { 'identity': LstmTest.identity, 'sigmoid': LstmTest.sigmoid, 'tanh': LstmTest.tanh, 'relu': LstmTest.relu } # LSTM with recurrent projection Layer def lstmp( input, # T x 4D lod, # 1 x N h0=None, # N x D c0=None, # N x D w_r=None, # P x 4D w_rh=None, # D x P w_b=None, # 1 x 4D w_c=None, # 1 x 3D is_reverse=False, act_gate=None, act_cell=None, act_cand=None, act_proj=None): def _step(x, w_r, w_rh, w_c, r_pre, c_pre, act_gate, act_cell, act_cand, act_proj): g = np.dot(r_pre, w_r) # 1 x 4D g = g + x g = np.reshape(g, (1, g.size)) c, g_i, g_f, g_o = np.split(g, 4, axis=1) if w_c is None: g_i = act_gate(g_i) # 1 x D g_f = act_gate(g_f) # 1 x D else: w_ic, w_fc, _ = np.split(w_c, 3, axis=1) g_i = act_gate(g_i + w_ic * c_pre) # 1 x D g_f = act_gate(g_f + w_fc * c_pre) # 1 x D c = g_f * c_pre + g_i * act_cand(c) # 1 x D if w_c is None: g_o = act_gate(g_o) # 1 x D else: _, _, w_oc = np.split(w_c, 3, axis=1) g_o = act_gate(g_o + w_oc * c) # 1 x D h = g_o * act_cell(c) # projection r = np.dot(h, w_rh) r = act_proj(r) return r, c def _reverse(x, lod): y = np.zeros_like(x) for i in range(len(lod) - 1): b, e = lod[i], lod[i + 1] y[b:e, :] = np.flip(x[b:e, :], 0) return y offset = lod[0] batch_size = len(offset) - 1 # recurrent projection state projection = [] cell = [] input = _reverse(input, offset) if is_reverse else input if w_b is not None: input = input + np.tile(w_b, (offset[-1], 1)) for i in range(batch_size): # compute one sequence seq_len = offset[i + 1] - offset[i] x = input[offset[i]:offset[i + 1], :] r_pre = np.dot(h0[i], w_rh) # 1 x P r_pre = act_proj(r_pre) c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step r_pre, c_pre = _step(x[j], w_r, w_rh, w_c, r_pre, c_pre, act_gate, act_cell, act_cand, act_proj) projection.append(r_pre.flatten()) cell.append(c_pre.flatten()) projection = np.array(projection).astype('float64') cell = np.array(cell).astype('float64') projection = _reverse(projection, offset) if is_reverse else projection cell = _reverse(cell, offset) if is_reverse else cell assert projection.shape == (input.shape[0], w_r.shape[0]) # T x P assert cell.shape == (input.shape[0], input.shape[1] / 4) # T x D return projection, cell class TestLstmpOp(LstmTest.TestLstmOp): def reset_argument(self): pass def setUp(self): self.set_argument() # projection size self.P = 10 self.act_proj = self.act_cell self.reset_argument() self.op_type = 'lstmp' T = self.lod[0][-1] N = len(self.lod[0]) - 1 x = np.random.normal(size=(T, 4 * self.D)).astype('float64') if self.has_initial_state: h0 = np.random.normal(size=(N, self.D)).astype('float64') c0 = np.random.normal(size=(N, self.D)).astype('float64') else: h0 = np.zeros((N, self.D)).astype('float64') c0 = np.zeros((N, self.D)).astype('float64') w = np.random.normal(size=(self.P, 4 * self.D)).astype('float64') if self.use_peepholes: b = np.random.normal(size=(1, 7 * self.D)).astype('float64') else: b = np.random.normal(size=(1, 4 * self.D)).astype('float64') w_b = b[:, 0:4 * self.D] w_c = b[:, 4 * self.D:] if self.use_peepholes else None w_rh = np.random.normal(size=(self.D, self.P)).astype('float64') r, c = lstmp(x, self.lod, h0, c0, w, w_rh, w_b, w_c, self.is_reverse, ACTIVATION[self.act_gate], ACTIVATION[self.act_cell], ACTIVATION[self.act_cand], ACTIVATION[self.act_proj]) self.inputs = {'Input': (x, self.lod), 'Weight': w, 'ProjWeight': w_rh} self.inputs['Bias'] = b if self.has_initial_state: self.inputs['H0'] = h0 self.inputs['C0'] = c0 self.outputs = { 'Projection': (r, self.lod), 'Cell': (c, self.lod), } self.attrs = { 'use_peepholes': self.use_peepholes, 'is_reverse': self.is_reverse, 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand, 'proj_activation': self.act_proj } def test_check_output(self): self.check_output(atol=1e-8) def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2) class TestLstmpOpHasInitial(TestLstmpOp): def reset_argument(self): self.has_initial_state = True def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0', 'C0'], ['Projection'], max_relative_error=1e-2) def test_check_grad_ingore_bias(self): N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'ProjWeight', 'Weight'], ['Projection'], max_relative_error=1e-2, no_grad_set=set('Bias')) def test_check_grad_ingore_weight(self): N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, no_grad_set=set('Weight')) def test_check_grad_ingore_proj_weight(self): N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'Bias'], ['Projection'], max_relative_error=1e-2, no_grad_set=set('ProjWeight')) def test_check_grad_ingore_input(self): N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Weight', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, no_grad_set=set('Input')) def test_check_grad_ingore_h0(self): N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias', 'C0'], ['Projection'], max_relative_error=1e-2, no_grad_set=set('H0')) def test_check_grad_ingore_c0(self): N = len(self.lod[0]) - 1 self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0'], ['Projection'], max_relative_error=1e-2, no_grad_set=set('C0')) class TestLstmpOpRerverse(TestLstmpOp): def reset_argument(self): self.is_reverse = True class TestLstmpOpNotUsePeepholes(TestLstmpOp): def reset_argument(self): self.use_peepholes = False class TestLstmpOpLinearProjection(TestLstmpOp): def reset_argument(self): self.act_proj = 'identity' if __name__ == '__main__': unittest.main()
10,765
36.512195
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_fill_constant_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestFillConstantOp1(OpTest): def setUp(self): '''Test fill_constant op with specified value ''' self.op_type = "fill_constant" self.inputs = {} self.attrs = {'shape': [123, 92], 'value': 3.8} self.outputs = {'Out': np.full((123, 92), 3.8)} def test_check_output(self): self.check_output() class TestFillConstantOp2(OpTest): def setUp(self): '''Test fill_constant op with default value ''' self.op_type = "fill_constant" self.inputs = {} self.attrs = {'shape': [123, 92]} self.outputs = {'Out': np.full((123, 92), 0.0)} def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
1,434
27.7
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid as fluid import paddle.fluid.core as core import numpy as np class TestNormalization(unittest.TestCase): data_desc = {"name": "input", "shape": (2, 3, 7)} def gen_random_input(self): """Generate random input data. """ self.data = np.random.random( size=self.data_desc["shape"]).astype("float32") def set_program(self, axis, epsilon): """Build the test program. """ data = fluid.layers.data( name=self.data_desc["name"], shape=self.data_desc["shape"], dtype="float32", append_batch_size=False) data.stop_gradient = False l2_norm = fluid.layers.l2_normalize(x=data, axis=axis, epsilon=epsilon) out = fluid.layers.reduce_sum(l2_norm, dim=None) fluid.backward.append_backward(loss=out) self.fetch_list = [l2_norm] def run_program(self): """Run the test program. """ places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.set_inputs(place) exe = fluid.Executor(place) output = exe.run(fluid.default_main_program(), feed=self.inputs, fetch_list=self.fetch_list, return_numpy=True) self.op_output = output def set_inputs(self, place): """Set the randomly generated data to the test program. """ self.inputs = {} tensor = fluid.Tensor() tensor.set(self.data, place) self.inputs[self.data_desc["name"]] = tensor def l2_normalize(self, data, axis, epsilon): """ Compute the groundtruth. """ output = data * np.reciprocal( np.sum(np.square(data), axis=axis, keepdims=True)) return output def test_l2_normalize(self): """ Test the python wrapper for l2_normalize. """ axis = 1 #TODO(caoying) epsilon is not supported due to lack of a maximum_op. epsilon = 1e-6 self.gen_random_input() self.set_program(axis, epsilon) self.run_program() expect_output = self.l2_normalize(self.data, axis, epsilon) # check output self.assertTrue(np.allclose(self.op_output, expect_output, atol=0.001)) if __name__ == '__main__': unittest.main()
3,091
30.876289
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_split_var.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from paddle.fluid.transpiler.distribute_transpiler import split_variable import paddle.fluid as fluid import paddle.fluid.core as core import random class TestSplitVar(unittest.TestCase): def check_split_output(self, shapes, expected_sizes, min_size): var_list = [] program = fluid.Program() for shape in shapes: var = program.global_block().create_var( name=str(random.randint(10000, 99999)), persistable=True, # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) blocks = split_variable(var_list, 10, min_size) all_sizes = [] for s in expected_sizes: for s2 in s: all_sizes.append(s2) for i, block_str in enumerate(blocks): varname, block_id, size = block_str.split(":") self.assertEqual(int(size), all_sizes[i]) def test_1k(self): shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] expected_sizes = [ [15], [1024], [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], [2040, 2040, 2040, 2040], [1150, 1150, 1150, 1150, 1150, 1150, 1100] ] self.check_split_output(shapes, expected_sizes, 1024) def test_check_output_8k(self): shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10], [6, 33, 33, 33]] expected_sizes = [[15], [1024], [10976, 10976], [8160], [8000], [35937, 35937, 35937, 35937, 35937, 35937]] self.check_split_output(shapes, expected_sizes, 8192) if __name__ == '__main__': unittest.main()
2,356
35.261538
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestDecayedAdagradOp1(OpTest): ''' Test DecayedAdagrad operator with explicit attributes ''' def setUp(self): self.op_type = "decayed_adagrad" param = np.random.random((123, 321)).astype("float32") grad = np.random.random((123, 321)).astype("float32") moment = np.zeros((123, 321)).astype("float32") lr = 0.01 decay = 0.80 epsilon = 1e-8 self.inputs = { 'Param': param, 'Grad': grad, 'Moment': moment, 'LearningRate': np.array([lr]).astype("float32") } self.attrs = {'decay': decay, 'epsilon': epsilon} moment_out = decay * moment + (1 - decay) * grad * grad param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon) self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} def test_check_output(self): self.check_output() class TestDecayedAdagradOp2(OpTest): ''' Test DecayedAdagrad operator with default attributes ''' def setUp(self): self.op_type = "decayed_adagrad" param = np.random.random((123, 321)).astype("float32") grad = np.random.random((123, 321)).astype("float32") moment = np.zeros((123, 321)).astype("float32") lr = 0.01 decay = 0.95 epsilon = 1e-6 self.inputs = { 'Param': param, 'Grad': grad, 'Moment': moment, 'LearningRate': np.array([lr]).astype("float32") } self.attrs = {'decay': decay, 'epsilon': epsilon} moment_out = decay * moment + (1 - decay) * grad * grad param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon) self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
2,577
28.976744
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from test_lrn_op import TestLRNOp class TestLRNMKLDNNOp(TestLRNOp): def get_attrs(self): attrs = TestLRNOp.get_attrs(self) attrs['use_mkldnn'] = True return attrs def test_check_output(self): self.check_output(atol=0.002) class TestLRNMKLDNNOpWithIsTest(TestLRNMKLDNNOp): def get_attrs(self): attrs = TestLRNMKLDNNOp.get_attrs(self) attrs['is_test'] = True return attrs def test_check_grad_normal(self): def check_raise_is_test(): try: self.check_grad(['X'], 'Out', max_relative_error=0.01) except Exception as e: t = \ "is_test attribute should be set to False in training phase." if t in str(e): raise AttributeError self.assertRaises(AttributeError, check_raise_is_test) if __name__ == "__main__": unittest.main()
1,555
30.12
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, attrs): in_n, in_c, in_h, in_w = input_.shape f_c, f_out_c, f_h, f_w = filter_.shape groups = attrs['groups'] assert in_c == f_c out_c = f_out_c * groups sub_in_c = in_c / groups stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ 'dilations'] d_bolck_h = dilations[0] * (f_h - 1) + 1 d_bolck_w = dilations[1] * (f_w - 1) + 1 out_h = (in_h - 1) * stride[0] + d_bolck_h out_w = (in_w - 1) * stride[1] + d_bolck_w out = np.zeros((in_n, out_c, out_h, out_w)) for n in range(in_n): for i in range(in_h): for j in range(in_w): for g in range(groups): input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i, j] # (c) input_masked = np.reshape(input_masked, (sub_in_c, 1, 1)) input_masked = np.tile(input_masked, (1, f_h, f_w)) for k in range(f_out_c): tmp_out = np.sum( input_masked * filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], axis=0) i1, i2 = i * stride[0], i * stride[0] + d_bolck_h j1, j2 = j * stride[0], j * stride[0] + d_bolck_h out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2: dilations[1]] += tmp_out out = out[:, :, pad[0]:out_h - pad[0], pad[1]:out_w - pad[1]] return out class TestConv2dTransposeOp(OpTest): def setUp(self): # init as conv transpose self.use_cudnn = False self.init_op_type() self.init_test_case() input_ = np.random.random(self.input_size).astype("float32") filter_ = np.random.random(self.filter_size).astype("float32") self.inputs = {'Input': input_, 'Filter': filter_} self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } output = conv2dtranspose_forward_naive(input_, filter_, self.attrs).astype('float32') self.outputs = {'Output': output} def test_check_output(self): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) else: self.check_output() def test_check_grad_no_input(self): if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Filter'], 'Output', max_relative_error=0.02, no_grad_set=set(['Input'])) else: self.check_grad( ['Filter'], 'Output', max_relative_error=0.02, no_grad_set=set(['Input'])) def test_check_grad_no_filter(self): if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter'])) else: self.check_grad( ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter'])) def test_check_grad(self): if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( place, set(['Input', 'Filter']), 'Output', max_relative_error=0.02) else: self.check_grad( set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.dilations = [1, 1] self.groups = 1 self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] def init_op_type(self): self.op_type = "conv2d_transpose" class TestWithPad(TestConv2dTransposeOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.dilations = [1, 1] self.groups = 1 self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] class TestWithGroups(TestConv2dTransposeOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.dilations = [1, 1] self.groups = 2 self.input_size = [2, 4, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 3, 3, 3] class TestWithStride(TestConv2dTransposeOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.dilations = [1, 1] self.groups = 1 self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] class TestWithDilation(TestConv2dTransposeOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.groups = 1 self.dilations = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] # ------------ test_cudnn ------------ class TestCUDNN(TestConv2dTransposeOp): def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" class TestCUDNNWithPad(TestWithPad): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.groups = 1 self.dilations = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" class TestCUDNNWithStride(TestWithStride): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.groups = 1 self.dilations = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" class TestCUDNNWithGroups(TestWithGroups): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.dilations = [1, 1] self.groups = 2 self.input_size = [2, 4, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 3, 3, 3] def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" # Please Don't remove the following code. # Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): # def init_test_case(self): # self.pad = [1, 1] # self.stride = [2, 2] # self.dilations = [2, 2] # self.input_size = [2, 3, 5, 5] # NCHW # f_c = self.input_size[1] # self.filter_size = [f_c, 6, 3, 3] # # def init_op_type(self): # self.op_type = "conv2d_transpose" if __name__ == '__main__': unittest.main()
8,232
30.544061
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_regularizer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid.framework as framework import paddle.fluid.optimizer as optimizer import paddle.fluid.regularizer as regularizer from paddle.fluid.backward import append_backward class TestL2DecayRegularizer(unittest.TestCase): def test_l2decay_regularizer(self): program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", regularizer=regularizer.L2DecayRegularizer(0.5)) self.assertTrue(mul_x.regularizer is not None) self.assertTrue( isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer)) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) count_ops = len(block.ops) params_grads = optimizer.append_regularization_ops(params_grads) self.assertEqual(len(params_grads), 1) self.assertEqual(len(block.ops), count_ops + 2) self.assertEqual(block.ops[-1].type, 'elementwise_add') self.assertEqual(block.ops[-2].type, 'scale') class TestL1DecayRegularizer(unittest.TestCase): def test_l2decay_regularizer(self): program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", regularizer=regularizer.L1DecayRegularizer(0.5)) self.assertTrue(mul_x.regularizer is not None) self.assertTrue( isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer)) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) count_ops = len(block.ops) params_grads = optimizer.append_regularization_ops(params_grads) self.assertEqual(len(params_grads), 1) self.assertEqual(len(block.ops), count_ops + 3) self.assertEqual(block.ops[-1].type, 'elementwise_add') self.assertEqual(block.ops[-2].type, 'scale') self.assertEqual(block.ops[-3].type, 'sign') if __name__ == '__main__': unittest.main()
4,053
39.54
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid.layers as layers class TestDocString(unittest.TestCase): def test_layer_doc_string(self): print layers.dropout.__doc__ if __name__ == '__main__': unittest.main()
831
31
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest def modified_huber_loss_forward(val): if val < -1: return -4. * val elif val < 1: return (1. - val) * (1. - val) else: return 0. class TestModifiedHuberLossOp(OpTest): def setUp(self): self.op_type = 'modified_huber_loss' samples_num = 32 x_np = np.random.uniform(-2., 2., (samples_num, 1)).astype('float32') y_np = np.random.choice([0, 1], samples_num).reshape( (samples_num, 1)).astype('float32') product_res = x_np * (2. * y_np - 1.) # keep away from the junction of piecewise function for pos, val in np.ndenumerate(product_res): while abs(val - 1.) < 0.05: x_np[pos] = np.random.uniform(-2., 2.) y_np[pos] = np.random.choice([0, 1]) product_res[pos] = x_np[pos] * (2 * y_np[pos] - 1) val = product_res[pos] self.inputs = {'X': x_np, 'Y': y_np} loss = np.vectorize(modified_huber_loss_forward)(product_res) self.outputs = { 'IntermediateVal': product_res.astype('float32'), 'Out': loss.reshape((samples_num, 1)).astype('float32') } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out', max_relative_error=0.01) if __name__ == '__main__': unittest.main()
2,064
31.777778
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestSquaredL2DistanceOp_f0(OpTest): def setUp(self): self.op_type = "squared_l2_distance" self.inputs = { 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), 'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, 'Out': np.expand_dims(output.sum(1), 1) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X', 'Y'], 'Out') class TestSquaredL2DistanceOp_f1(OpTest): def setUp(self): self.op_type = "squared_l2_distance" self.inputs = { 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, 'Out': np.expand_dims(output.sum(1), 1) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X', 'Y'], 'Out') class TestSquaredL2DistanceOp_f2(OpTest): def setUp(self): self.op_type = "squared_l2_distance" self.inputs = { 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"), 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = sub_res.reshape((2, 3 * 4)) output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, 'Out': np.expand_dims(output.sum(1), 1) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X', 'Y'], 'Out') if __name__ == "__main__": unittest.main()
2,681
30.186047
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_image_classification_layer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid as fluid import paddle.fluid.nets as nets from paddle.fluid.framework import Program def conv_block(input, num_filter, groups, dropouts): return nets.img_conv_group( input=input, pool_size=2, pool_stride=2, conv_num_filter=[num_filter] * groups, conv_filter_size=3, conv_act='relu', conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, pool_type='max') class TestLayer(unittest.TestCase): def test_batch_norm_layer(self): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): images = fluid.layers.data( name='pixel', shape=[3, 48, 48], dtype='float32') hidden1 = fluid.layers.batch_norm(input=images) hidden2 = fluid.layers.fc(input=hidden1, size=128, act='relu') fluid.layers.batch_norm(input=hidden2) print str(main_program) def test_dropout_layer(self): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): images = fluid.layers.data( name='pixel', shape=[3, 48, 48], dtype='float32') fluid.layers.dropout(x=images, dropout_prob=0.5) print str(main_program) def test_img_conv_group(self): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): images = fluid.layers.data( name='pixel', shape=[3, 48, 48], dtype='float32') conv1 = conv_block(images, 64, 2, [0.3, 0]) conv_block(conv1, 256, 3, [0.4, 0.4, 0]) print str(main_program) def test_elementwise_add_with_act(self): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): image1 = fluid.layers.data( name='pixel1', shape=[3, 48, 48], dtype='float32') image2 = fluid.layers.data( name='pixel2', shape=[3, 48, 48], dtype='float32') fluid.layers.elementwise_add(x=image1, y=image2, act='relu') print(main_program) if __name__ == '__main__': unittest.main()
2,955
34.190476
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_assign_value_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid as fluid import paddle.fluid.layers as layers import op_test import numpy import unittest import paddle.fluid.framework as framework class TestAssignValueOp(op_test.OpTest): def setUp(self): self.op_type = "assign_value" x = numpy.random.random(size=(2, 5)).astype(numpy.float32) self.inputs = {} self.outputs = {'Out': x} self.attrs = { 'shape': x.shape, 'dtype': framework.convert_np_dtype_to_dtype_(x.dtype), 'fp32_values': [float(v) for v in x.flat] } def test_forward(self): self.check_output() def test_assign(self): val = ( -100 + 200 * numpy.random.random(size=(2, 5))).astype(numpy.int32) x = layers.create_tensor(dtype="float32") layers.assign(input=val, output=x) exe = fluid.Executor(fluid.CPUPlace()) fetched_x = exe.run(fluid.default_main_program(), feed={}, fetch_list=[x])[0] self.assertTrue( numpy.array_equal(fetched_x, val), "fetch_x=%s val=%s" % (fetched_x, val)) self.assertEqual(fetched_x.dtype, val.dtype) if __name__ == '__main__': unittest.main()
1,867
32.963636
78
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_scope.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid.core import unittest class TestScope(unittest.TestCase): def test_create_destroy(self): paddle_c = paddle.fluid.core scope = paddle_c.Scope() self.assertIsNotNone(scope) scope_with_parent = scope.new_scope() self.assertIsNotNone(scope_with_parent) def test_none_variable(self): paddle_c = paddle.fluid.core scope = paddle_c.Scope() self.assertIsNone(scope.find_var("test")) def test_create_var_get_var(self): paddle_c = paddle.fluid.core scope = paddle_c.Scope() var_a = scope.var("var_a") self.assertIsNotNone(var_a) self.assertIsNotNone(scope.find_var('var_a')) scope2 = scope.new_scope() self.assertIsNotNone(scope2.find_var('var_a')) def test_var_get_int(self): paddle_c = paddle.fluid.core scope = paddle_c.Scope() var = scope.var("test_int") var.set_int(10) self.assertTrue(var.is_int()) self.assertEqual(10, var.get_int()) if __name__ == '__main__': unittest.main()
1,701
31.730769
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_activation_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest from scipy.special import expit class TestExp(OpTest): def setUp(self): self.op_type = "exp" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.exp(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Exp(TestExp): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSigmoid(OpTest): def setUp(self): self.op_type = "sigmoid" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = 1 / (1 + np.exp(-x)) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.01) def init_dtype(self): pass class TestFP16Sigmoid(TestSigmoid): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestLogSigmoid(OpTest): def setUp(self): self.op_type = "logsigmoid" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.log(1 / (1 + np.exp(-x))) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.008) def init_dtype(self): pass class TestFP16LogSigmoid(TestLogSigmoid): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestTanh(OpTest): def setUp(self): self.op_type = "tanh" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.tanh(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Tanh(TestTanh): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestTanhShrink(OpTest): def setUp(self): self.op_type = "tanh_shrink" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype) out = x - np.tanh(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.008) def init_dtype(self): pass class TestFP16TanhShrink(TestTanhShrink): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestHardShrink(OpTest): def setUp(self): self.op_type = "hard_shrink" self.dtype = np.float32 self.init_dtype() threshold = 0.5 x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) out = np.copy(x) out[(out >= -threshold) & (out <= threshold)] = 0 self.attrs = {'lambda': threshold} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.005) def init_dtype(self): pass class TestFP16HardShrink(TestHardShrink): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSoftShrink(OpTest): def setUp(self): self.op_type = "softshrink" self.dtype = np.float32 self.init_dtype() lambda_val = 0.1 x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype) out = np.copy(x) out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * ( out - lambda_val) self.attrs = {'lambda': lambda_val} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16SoftShrink(TestSoftShrink): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSqrt(OpTest): def setUp(self): self.op_type = "sqrt" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.sqrt(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Sqrt(TestSqrt): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestAbs(OpTest): def setUp(self): self.op_type = "abs" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) # Because we set delta = 0.005 in caculating numeric gradient, # if x is too small, such as 0.002, x_neg will be -0.003 # x_pos will be 0.007, so the numeric gradient is unaccurate. # we should avoid this x[np.abs(x) < 0.005] = 0.02 out = np.abs(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Abs(TestAbs): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCeil(OpTest): def setUp(self): self.op_type = "ceil" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) out = np.ceil(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() # The same reason with TestFloor def init_dtype(self): pass class TestFP16Ceil(TestCeil): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestFloor(OpTest): def setUp(self): self.op_type = "floor" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) out = np.floor(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() # the gradient on floor, ceil, round is undefined. # we return zero as gradient, but the numpy return nan def init_dtype(self): pass class TestFP16Floor(TestFloor): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCos(OpTest): def setUp(self): self.op_type = "cos" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) out = np.cos(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Cos(TestCos): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSin(OpTest): def setUp(self): self.op_type = "sin" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) out = np.sin(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Sin(TestSin): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestRound(OpTest): def setUp(self): self.op_type = "round" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) out = np.round(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def init_dtype(self): pass class TestFP16Round(TestRound): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestRelu(OpTest): def setUp(self): self.op_type = "relu" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) # The same reason with TestAbs x[np.abs(x) < 0.005] = 0.02 out = np.maximum(x, 0) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Relu(TestRelu): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestBRelu(OpTest): def setUp(self): self.op_type = "brelu" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) t_min = 1.0 t_max = 4.0 # The same with TestAbs x[np.abs(x - t_min) < 0.005] = t_min + 0.02 x[np.abs(x - t_max) < 0.005] = t_max + 0.02 t = np.copy(x) t[t < t_min] = t_min t[t > t_max] = t_max self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'t_min': t_min, 't_max': t_max} self.outputs = {'Out': t} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) def init_dtype(self): pass class TestFP16BRelu(TestBRelu): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestRelu6(OpTest): def setUp(self): self.op_type = "relu6" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype) threshold = 6.0 # The same with TestAbs x[np.abs(x) < 0.005] = 0.02 x[np.abs(x - threshold) < 0.005] = threshold + 0.02 out = np.minimum(np.maximum(x, 0), threshold) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'threshold': threshold} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) def init_dtype(self): pass class TestFP16Relu6(TestRelu6): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSoftRelu(OpTest): def setUp(self): self.op_type = "soft_relu" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) threshold = 2.0 # The same reason with TestAbs x[np.abs(x - threshold) < 0.005] = threshold + 0.02 x[np.abs(x + threshold) < 0.005] = -threshold + 0.02 t = np.copy(x) t[t < -threshold] = -threshold t[t > threshold] = threshold out = np.log((np.exp(t) + 1)) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'threshold': threshold} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) def init_dtype(self): pass class TestFP16SoftRelu(TestSoftRelu): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestELU(OpTest): def setUp(self): self.op_type = "elu" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) alpha = 1. out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here self.inputs = {'X': x} self.attrs = {'alpha': alpha} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) def init_dtype(self): pass class TestFP16ELU(TestELU): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestReciprocal(OpTest): def setUp(self): self.op_type = "reciprocal" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) out = np.reciprocal(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.01) def init_dtype(self): pass class TestFP16Reciprocal(TestReciprocal): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestLog(OpTest): def setUp(self): self.op_type = "log" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.log(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Log(TestLog): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSquare(OpTest): def setUp(self): self.op_type = "square" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.square(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Square(TestSquare): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestPow(OpTest): def setUp(self): self.op_type = "pow" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) out = np.power(x, 3) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'factor': 3.0} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) def init_dtype(self): pass class TestFP16Pow(TestPow): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=5e-2) class TestSTanh(OpTest): def setUp(self): self.op_type = "stanh" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) scale_a = 2.0 / 3.0 scale_b = 1.7159 out = scale_b * np.tanh(x * scale_a) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16STanh(TestSTanh): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSoftplus(OpTest): def setUp(self): self.op_type = "softplus" self.dtype = np.float64 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.log(1 + np.exp(x)) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Softplus(TestSoftplus): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSoftsign(OpTest): def setUp(self): self.op_type = "softsign" self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.divide(x, 1 + np.abs(x)) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): pass class TestFP16Softsign(TestSoftsign): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestThresholdedRelu(OpTest): def setUp(self): self.op_type = "thresholded_relu" self.dtype = np.float32 self.init_dtype() threshold = 0.25 self.relative_error = 0.005 X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) # Same reason as TestAbs X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2 out = (X > threshold) * X self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} self.attrs = {'threshold': threshold} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=self.relative_error) def init_dtype(self): pass class TestFP16ThresholdedRelu(TestThresholdedRelu): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestHardSigmoid(OpTest): def setUp(self): self.op_type = "hard_sigmoid" self.dtype = np.float32 self.init_dtype() self.relative_error = 0.002 X = np.random.uniform(-5, 5, [2, 2]).astype("float32") slope = 0.2 offset = 0.5 lower_threshold = -offset / slope upper_threshold = (1 - offset) / slope # Same reason as TestAbs X[np.abs(X - lower_threshold) < self.relative_error] = \ lower_threshold + 0.2 X[np.abs(X - upper_threshold) < self.relative_error] = \ upper_threshold - 0.2 temp = X * slope + offset out = np.maximum(0.0, np.minimum(1.0, temp)) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.002) def init_dtype(self): pass class TestFP16HardSigmoid(TestHardSigmoid): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestSwish(OpTest): def setUp(self): self.op_type = "swish" self.dtype = np.float32 self.init_dtype() X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) beta = 2.3 out = X * expit(beta * X) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} self.attrs = {'beta': beta} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.008) def init_dtype(self): pass class TestFP16Swish(TestSwish): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) if __name__ == "__main__": unittest.main()
29,486
25.978042
95
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid as fluid import os import signal import subprocess import time import unittest from multiprocessing import Process from op_test import OpTest def run_pserver(use_cuda, sync_mode, ip, port, trainer_count, trainer_id): x = fluid.layers.data(name='x', shape=[1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') # loss function cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) # optimizer sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) port = os.getenv("PADDLE_INIT_PORT", port) pserver_ips = os.getenv("PADDLE_INIT_PSERVERS", ip) # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... trainers = int(os.getenv("TRAINERS", trainer_count)) current_endpoint = os.getenv("POD_IP", ip) + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID", trainer_id)) t = fluid.DistributeTranspiler() t.transpile( trainer_id, pservers=pserver_endpoints, trainers=trainers, sync_mode=sync_mode) pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) class TestListenAndServOp(OpTest): def setUp(self): self.sleep_time = 5 self.ip = "127.0.0.1" self.port = "6173" self.trainer_count = 1 self.trainer_id = 1 def _raise_signal(self, parent_pid, raised_signal): time.sleep(self.sleep_time) ps_command = subprocess.Popen( "ps -o pid --ppid %d --noheaders" % parent_pid, shell=True, stdout=subprocess.PIPE) ps_output = ps_command.stdout.read() retcode = ps_command.wait() assert retcode == 0, "ps command returned %d" % retcode for pid_str in ps_output.split("\n")[:-1]: try: os.kill(int(pid_str), raised_signal) except Exception: continue def _start_pserver(self, use_cuda, sync_mode): p = Process( target=run_pserver, args=(use_cuda, sync_mode, self.ip, self.port, self.trainer_count, self.trainer_id)) p.start() def test_handle_signal_in_serv_op(self): # run pserver on CPU in sync mode self._start_pserver(False, True) # raise SIGINT to pserver self._raise_signal(os.getpid(), signal.SIGINT) # run pserver on CPU in async mode self._start_pserver(False, False) # raise SIGTERM to pserver self._raise_signal(os.getpid(), signal.SIGTERM) if __name__ == '__main__': unittest.main()
3,674
32.409091
78
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestHingeLossOp(OpTest): def setUp(self): self.op_type = 'hinge_loss' samples_num = 64 logits = np.random.uniform(-10, 10, (samples_num, 1)).astype('float32') labels = np.random.randint(0, 2, (samples_num, 1)).astype('float32') self.inputs = { 'Logits': logits, 'Labels': labels, } loss = np.maximum(1.0 - (2 * labels - 1) * logits, 0) self.outputs = {'Loss': loss} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['Logits'], 'Loss', max_relative_error=0.008) if __name__ == '__main__': unittest.main()
1,355
30.534884
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_prelu_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class PReluTest(OpTest): def setUp(self): self.op_type = "prelu" x_np = np.random.normal(size=(10, 10)).astype("float32") for pos, val in np.ndenumerate(x_np): # Since zero point in prelu is not differentiable, avoid randomize # zero. while abs(val) < 1e-3: x_np[pos] = np.random.normal() val = x_np[pos] x_np_sign = np.sign(x_np) x_np = x_np_sign * np.maximum(x_np, .005) alpha_np = np.array([.1], dtype="float32") self.inputs = {'X': x_np, 'Alpha': alpha_np} out_np = np.maximum(self.inputs['X'], 0.) out_np = out_np + np.minimum(self.inputs['X'], 0.) * self.inputs['Alpha'] assert out_np is not self.inputs['X'] self.outputs = {'Out': out_np} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == "__main__": unittest.main()
1,703
32.411765
78
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.conll05 as conll05 import paddle.fluid as fluid import unittest import paddle import numpy as np word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) label_dict_len = len(label_dict) pred_dict_len = len(verb_dict) mark_dict_len = 2 word_dim = 32 mark_dim = 5 hidden_dim = 512 depth = 8 mix_hidden_lr = 1e-3 embedding_name = 'emb' def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, is_sparse, **ignored): # 8 features predicate_embedding = fluid.layers.embedding( input=predicate, is_sparse=is_sparse, size=[pred_dict_len, word_dim], dtype='float32', param_attr='vemb') mark_embedding = fluid.layers.embedding( input=mark, is_sparse=is_sparse, size=[mark_dict_len, mark_dim], dtype='float32') word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ fluid.layers.embedding( size=[word_dict_len, word_dim], is_sparse=is_sparse, input=x, param_attr=fluid.ParamAttr( name=embedding_name, trainable=False)) for x in word_input ] emb_layers.append(predicate_embedding) emb_layers.append(mark_embedding) hidden_0_layers = [ fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') for emb in emb_layers ] hidden_0 = fluid.layers.sums(input=hidden_0_layers) lstm_0 = fluid.layers.dynamic_lstm( input=hidden_0, size=hidden_dim, candidate_activation='relu', gate_activation='sigmoid', cell_activation='sigmoid') # stack L-LSTM and R-LSTM with direct edges input_tmp = [hidden_0, lstm_0] for i in range(1, depth): mix_hidden = fluid.layers.sums(input=[ fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') ]) lstm = fluid.layers.dynamic_lstm( input=mix_hidden, size=hidden_dim, candidate_activation='relu', gate_activation='sigmoid', cell_activation='sigmoid', is_reverse=((i % 2) == 1)) input_tmp = [mix_hidden, lstm] feature_out = fluid.layers.sums(input=[ fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') ]) return feature_out class TestCRFModel(unittest.TestCase): def check_network_convergence(self, is_sparse, build_strategy=None): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): word = fluid.layers.data( name='word_data', shape=[1], dtype='int64', lod_level=1) predicate = fluid.layers.data( name='verb_data', shape=[1], dtype='int64', lod_level=1) ctx_n2 = fluid.layers.data( name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) ctx_n1 = fluid.layers.data( name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) ctx_0 = fluid.layers.data( name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) ctx_p1 = fluid.layers.data( name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) ctx_p2 = fluid.layers.data( name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) mark = fluid.layers.data( name='mark_data', shape=[1], dtype='int64', lod_level=1) feature_out = db_lstm(**locals()) target = fluid.layers.data( name='target', shape=[1], dtype='int64', lod_level=1) crf_cost = fluid.layers.linear_chain_crf( input=feature_out, label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=1e-1)) avg_cost = fluid.layers.mean(crf_cost) sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.exponential_decay( learning_rate=0.01, decay_steps=100000, decay_rate=0.5, staircase=True)) sgd_optimizer.minimize(avg_cost) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=16) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(startup) pe = fluid.ParallelExecutor( use_cuda=True, loss_name=avg_cost.name, build_strategy=build_strategy) feeder = fluid.DataFeeder( feed_list=[ word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target ], place=fluid.CPUPlace()) data = train_data() for i in xrange(10): cur_batch = next(data) print map(np.array, pe.run(feed=feeder.feed(cur_batch), fetch_list=[avg_cost.name]))[0] def test_update_sparse_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce self.check_network_convergence( is_sparse=True, build_strategy=build_strategy) def test_update_dense_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce self.check_network_convergence( is_sparse=False, build_strategy=build_strategy) def test_update_sparse_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce self.check_network_convergence( is_sparse=True, build_strategy=build_strategy) def test_update_dense_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce self.check_network_convergence( is_sparse=False, build_strategy=build_strategy) if __name__ == '__main__': unittest.main()
7,145
35.090909
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import random import numpy as np from op_test import OpTest class CRFDecoding(object): def __init__(self, emission_weights, transition_weights, seq_start_positions): assert (emission_weights.shape[0] == seq_start_positions[-1]) self.tag_num = emission_weights.shape[1] self.seq_num = len(seq_start_positions) - 1 self.seq_start_positions = seq_start_positions self.x = emission_weights self.a = transition_weights[0, :] self.b = transition_weights[1, :] self.w = transition_weights[2:, :] self.track = np.zeros( (seq_start_positions[-1], self.tag_num), dtype="int64") self.decoded_path = np.zeros( (seq_start_positions[-1], 1), dtype="int64") def _decode_one_sequence(self, decoded_path, x): seq_len, tag_num = x.shape alpha = np.zeros((seq_len, tag_num), dtype="float64") track = np.zeros((seq_len, tag_num), dtype="int64") for i in range(tag_num): alpha[0, i] = self.a[i] + x[0, i] for k in range(1, seq_len): for i in range(tag_num): max_score = -np.finfo("float64").max max_idx = 0 for j in range(tag_num): score = alpha[k - 1, j] + self.w[j, i] if score > max_score: max_score = score max_idx = j alpha[k, i] = max_score + x[k, i] track[k, i] = max_idx max_score = -np.finfo("float64").max max_idx = 0 for i in range(tag_num): score = alpha[seq_len - 1, i] + self.b[i] if score > max_score: max_score = score max_idx = i decoded_path[-1] = max_idx for i in range(seq_len - 1, 0, -1): decoded_path[i - 1] = max_idx = track[i, max_idx] def decode(self): for i in range(self.seq_num): start = self.seq_start_positions[i] end = self.seq_start_positions[i + 1] self._decode_one_sequence(self.decoded_path[start:end, :], self.x[start:end, :]) return self.decoded_path class TestCRFDecodingOp1(OpTest): """ Compare the dynamic program with random generated parameters and inputs with grouth truth not being given. """ def set_test_data(self): SEQ_NUM = 3 TAG_NUM = 17 MAX_SEQ_LEN = 10 lod = [[0]] for i in range(SEQ_NUM): lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) emission = np.random.uniform(-1, 1, [lod[-1][-1], TAG_NUM]).astype("float64") transition = np.random.uniform(-0.5, 0.5, [TAG_NUM + 2, TAG_NUM]).astype("float64") self.inputs = { "Emission": (emission, lod), "Transition": transition, } decoder = CRFDecoding(emission, transition, lod[0]) decoded_path = decoder.decode() self.outputs = {"ViterbiPath": decoded_path} def setUp(self): self.op_type = "crf_decoding" self.set_test_data() def test_check_output(self): self.check_output() class TestCRFDecodingOp2(OpTest): """ Compare the dynamic program with brute force computation with ground truth being given. """ def setUp(self): self.op_type = "crf_decoding" TAG_NUM = 5 lod = [[0, 1, 3, 6, 10]] transition = np.repeat( np.arange( TAG_NUM, dtype="float64").reshape(1, TAG_NUM), TAG_NUM + 2, axis=0) emission = np.repeat( np.arange( TAG_NUM, dtype="float64").reshape(1, TAG_NUM), lod[-1][-1], axis=0) labels = np.random.randint( low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int64") predicted_labels = np.ones( (lod[-1][-1], 1), dtype="int64") * (TAG_NUM - 1) expected_output = (labels == predicted_labels).astype("int64") self.inputs = { "Emission": (emission, lod), "Transition": transition, "Label": (labels, lod) } self.outputs = {"ViterbiPath": expected_output} def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
5,114
30.770186
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_minus_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestMinusOp(OpTest): def setUp(self): self.op_type = "minus" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((32, 84)).astype("float32") } self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X', 'Y'], 'Out') if __name__ == "__main__": unittest.main()
1,171
29.842105
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_optimizer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid.framework as framework import paddle.fluid.optimizer as optimizer from paddle.fluid.backward import append_backward class TestOptimizer(unittest.TestCase): def test_sgd_optimizer(self): def check_sgd_optimizer(optimizer_attr): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr=optimizer_attr) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01) opts, _ = sgd_optimizer.minimize(mean_out, init_program) return opts opts = check_sgd_optimizer({'learning_rate': 1.1}) self.assertEqual(len(opts), 3) self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "sgd"]) opts = check_sgd_optimizer({'learning_rate': 1.0}) self.assertEqual(len(opts), 1) self.assertEqual([op.type for op in opts], ["sgd"]) class TestMomentumOptimizer(unittest.TestCase): class MockMomentum(optimizer.MomentumOptimizer): def get_accumulators(self): return self._accumulators def get_velocity_str(self): return self._velocity_acc_str def test_vanilla_momentum_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) learning_rate = 0.01 momentum_optimizer = self.MockMomentum( learning_rate=learning_rate, momentum=0.2) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) opts = momentum_optimizer.create_optimization_pass( params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) sgd_op = opts[-1] self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "momentum"]) self.assertFalse(sgd_op.attr('use_nesterov')) # Check accumulators accumulators = momentum_optimizer.get_accumulators() self.assertEqual(len(accumulators), 1) self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators) velocity_acc = accumulators[momentum_optimizer.get_velocity_str()] self.assertEqual(len(velocity_acc), 1) self.assertTrue(mul_x.name in velocity_acc) # Check init_program init_ops = init_program.global_block().ops self.assertEqual(len(init_ops), 2) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) self.assertEqual(init_ops[1].type, "fill_constant") self.assertAlmostEqual(init_ops[1].attr('value'), 0.0) def test_nesterov_momentum_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) learning_rate = 0.01 momentum_optimizer = self.MockMomentum( learning_rate=learning_rate, momentum=0.2, use_nesterov=True) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) opts = momentum_optimizer.create_optimization_pass( params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) sgd_op = opts[-1] self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "momentum"]) self.assertTrue(sgd_op.attr('use_nesterov')) # Check accumulators accumulators = momentum_optimizer.get_accumulators() self.assertEqual(len(accumulators), 1) self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators) velocity_acc = accumulators[momentum_optimizer.get_velocity_str()] self.assertEqual(len(velocity_acc), 1) self.assertTrue(mul_x.name in velocity_acc) # Check init_program init_ops = init_program.global_block().ops self.assertEqual(len(init_ops), 2) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) self.assertEqual(init_ops[1].type, "fill_constant") self.assertAlmostEqual(init_ops[1].attr('value'), 0.0) class TestAdagradOptimizer(unittest.TestCase): class MockAdagrad(optimizer.AdagradOptimizer): def get_accumulators(self): return self._accumulators def get_moment_str(self): return self._moment_acc_str def test_adagrad_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) learning_rate = 0.01 adagrad_optimizer = self.MockAdagrad( learning_rate=learning_rate, epsilon=1.0e-6) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0) opts = adagrad_optimizer.create_optimization_pass(params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "adagrad"]) # Check accumulators accumulators = adagrad_optimizer.get_accumulators() self.assertEqual(len(accumulators), 1) self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators) moment_acc = accumulators[adagrad_optimizer.get_moment_str()] self.assertEqual(len(moment_acc), 1) self.assertTrue(mul_x.name in moment_acc) # Check init_program init_ops = init_program.global_block().ops self.assertEqual(len(init_ops), 2) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) self.assertEqual(init_ops[1].type, "fill_constant") self.assertAlmostEqual(init_ops[1].attr('value'), 0.0) class TestAdamOptimizer(unittest.TestCase): class MockAdam(optimizer.AdamOptimizer): def get_accumulators(self): return self._accumulators def get_moment1_str(self): return self._moment1_acc_str def get_moment2_str(self): return self._moment2_acc_str def test_adam_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) learning_rate = 0.01 adam_optimizer = self.MockAdam( learning_rate=learning_rate, beta1=0.9, beta2=0.999) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adam_optimizer.get_accumulators()), 0) opts = adam_optimizer.create_optimization_pass(params_grads, mul_out, init_program) self.assertEqual(len(opts), 5) self.assertEqual( [op.type for op in opts], ["fill_constant", "elementwise_mul", "adam", "scale", "scale"]) # Check accumulators accumulators = adam_optimizer.get_accumulators() self.assertEqual(len(accumulators), 2) self.assertTrue(adam_optimizer.get_moment1_str() in accumulators) self.assertTrue(adam_optimizer.get_moment2_str() in accumulators) moment1_acc = accumulators[adam_optimizer.get_moment1_str()] moment2_acc = accumulators[adam_optimizer.get_moment2_str()] self.assertEqual(len(moment1_acc), 1) self.assertEqual(len(moment2_acc), 1) self.assertTrue(mul_x.name in moment1_acc) self.assertTrue(mul_x.name in moment2_acc) # Check init_program init_ops = init_program.global_block().ops self.assertEqual(len(init_ops), 5) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) class TestAdamaxOptimizer(unittest.TestCase): class MockAdamax(optimizer.AdamaxOptimizer): def get_accumulators(self): return self._accumulators def get_moment_str(self): return self._moment_acc_str def get_inf_norm_str(self): return self._inf_norm_acc_str def test_adamax_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) learning_rate = 0.01 adamax_optimizer = self.MockAdamax( learning_rate=learning_rate, beta1=0.9, beta2=0.999) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adamax_optimizer.get_accumulators()), 0) opts = adamax_optimizer.create_optimization_pass(params_grads, mul_out, init_program) self.assertEqual(len(opts), 4) self.assertEqual( [op.type for op in opts], ["fill_constant", "elementwise_mul", "adamax", "scale"]) # Check accumulators accumulators = adamax_optimizer.get_accumulators() self.assertEqual(len(accumulators), 2) self.assertTrue(adamax_optimizer.get_moment_str() in accumulators) self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators) moment_acc = accumulators[adamax_optimizer.get_moment_str()] inf_norm_acc = accumulators[adamax_optimizer.get_inf_norm_str()] self.assertEqual(len(moment_acc), 1) self.assertEqual(len(inf_norm_acc), 1) self.assertTrue(mul_x.name in moment_acc) self.assertTrue(mul_x.name in inf_norm_acc) # Check init_program init_ops = init_program.global_block().ops self.assertEqual(len(init_ops), 4) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) class TestDecayedAdagradOptimizer(unittest.TestCase): class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer): def get_accumulators(self): return self._accumulators def get_moment_str(self): return self._moment_acc_str def test_decayed_adagrad_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() mul_x = block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}) mul_y = block.create_var( dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") mul_out = block.create_var( dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") block.append_op( type="mul", inputs={"X": mul_x, "Y": mul_y}, outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) mean_out = block.create_var( dtype="float32", shape=[1], lod_level=0, name="mean.out") block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) learning_rate = 0.01 decayed_adagrad_optimizer = self.MockDecayedAdagrad( learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0) opts = decayed_adagrad_optimizer.create_optimization_pass( params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) self.assertEqual( [op.type for op in opts], ["fill_constant", "elementwise_mul", "decayed_adagrad"]) # Check accumulators accumulators = decayed_adagrad_optimizer.get_accumulators() self.assertEqual(len(accumulators), 1) self.assertTrue( decayed_adagrad_optimizer.get_moment_str() in accumulators) moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()] self.assertEqual(len(moment_acc), 1) self.assertTrue(mul_x.name in moment_acc) # Check init_program init_ops = init_program.global_block().ops self.assertEqual(len(init_ops), 2) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) self.assertEqual(init_ops[1].type, "fill_constant") self.assertAlmostEqual(init_ops[1].attr('value'), 0.0) if __name__ == '__main__': unittest.main()
18,415
40.949886
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_expand_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestExpandOpRank1(OpTest): def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.random(12).astype("float32")} self.attrs = {'expand_times': [2]} output = np.tile(self.inputs['X'], 2) self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestExpandOpRank2_Corner(OpTest): def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.random((12, 14)).astype("float32")} self.attrs = {'expand_times': [1, 1]} output = np.tile(self.inputs['X'], (1, 1)) self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestExpandOpRank2(OpTest): def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.random((12, 14)).astype("float32")} self.attrs = {'expand_times': [2, 3]} output = np.tile(self.inputs['X'], (2, 3)) self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestExpandOpRank3_Corner(OpTest): def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.random((2, 4, 5)).astype("float32")} self.attrs = {'expand_times': [1, 1, 1]} output = np.tile(self.inputs['X'], (1, 1, 1)) self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestExpandOpRank3(OpTest): def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.random((2, 4, 5)).astype("float32")} self.attrs = {'expand_times': [2, 1, 4]} output = np.tile(self.inputs['X'], (2, 1, 4)) self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestExpandOpRank4(OpTest): def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.random((2, 4, 5, 7)).astype("float32")} self.attrs = {'expand_times': [3, 2, 1, 2]} output = np.tile(self.inputs['X'], (3, 2, 1, 2)) self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == "__main__": unittest.main()
3,334
28.776786
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import sys from op_test import OpTest class TestSequenceSliceOp(OpTest): def set_data(self): self.init_test_case() # only supprot one level LoD x = np.random.random(self.x_dim).astype('float32') lod = self.x_lod offset = np.array(self.offset).astype("int64") length = np.array(self.length).astype("int64") self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length} outs = [] #np.zeros((100, 3, 2)).astype('float32') out_lod = [[0]] out_lod_offset = 0 for i in range(len(offset)): sub_x = x[lod[0][i] + offset[i, 0]:lod[0][i] + offset[i, 0] + length[i, 0], :] out_lod_offset = out_lod_offset + len(sub_x) outs.append(sub_x) out_lod[0].append(out_lod_offset) outs = np.concatenate(outs, axis=0) self.outputs = {'Out': (outs, out_lod)} def init_test_case(self): self.x_dim = (100, 3, 2) self.x_lod = [[0, 20, 40, 60, 80, 100]] self.offset = [[1], [2], [3], [4], [5]] self.length = [[10], [8], [6], [4], [2]] def setUp(self): self.op_type = "sequence_slice" self.set_data() def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == '__main__': unittest.main()
2,046
32.016129
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_row_conv_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest def row_conv_forward(x, lod, wt): out = np.zeros_like(x) seq_info = lod[0] num_sequences = len(seq_info) - 1 context_length = wt.shape[0] for i in range(num_sequences): # loop over number of sequences start = seq_info[i] end = seq_info[i + 1] curinput = x[start:end, :] curoutput = out[start:end, :] cur_timesteps = end - start for j in range(cur_timesteps): # loop over different timesteps for k in range(context_length): if j + k >= cur_timesteps: continue curoutput[j, :] += curinput[j + k, :] * wt[k, :] return out class TestRowConvOp1(OpTest): def setUp(self): self.op_type = "row_conv" lod = [[0, 2, 5, 7]] T = lod[0][-1] D = 16 context_length = 2 x = np.random.random((T, D)).astype("float32") wt = np.random.random((context_length, D)).astype("float32") self.inputs = {'X': (x, lod), 'Filter': wt} out = row_conv_forward(x, lod, wt) self.outputs = {'Out': (out, lod)} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.05) def test_check_grad_ignore_x(self): self.check_grad( ['Filter'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) def test_check_grad_ignore_wt(self): self.check_grad( ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Filter')) class TestRowConvOp2(OpTest): def setUp(self): self.op_type = "row_conv" lod = [[0, 20, 50, 100]] T = lod[0][-1] D = 35 context_length = 35 x = np.random.random((T, D)).astype("float32") wt = np.random.random((context_length, D)).astype("float32") self.inputs = {'X': (x, lod), 'Filter': wt} out = row_conv_forward(x, lod, wt) self.outputs = {'Out': (out, lod)} def test_check_output(self): self.check_output() #max_relative_error is increased from 0.05 to 0.06 as for higher #dimensional input, the dX on CPU for some values has max_rel_error #slightly more than 0.05 def test_check_grad_normal(self): self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.06) def test_check_grad_ignore_x(self): self.check_grad( ['Filter'], 'Out', max_relative_error=0.06, no_grad_set=set('X')) def test_check_grad_ignore_wt(self): self.check_grad( ['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Filter')) if __name__ == '__main__': unittest.main()
3,381
29.745455
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_profiler.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import os import numpy as np import paddle.fluid as fluid import paddle.fluid.profiler as profiler import paddle.fluid.layers as layers import paddle.fluid.core as core class TestProfiler(unittest.TestCase): def net_profiler(self, state, profile_path='/tmp/profile'): enable_if_gpu = state == 'GPU' or state == "All" if enable_if_gpu and not core.is_compiled_with_cuda(): return startup_program = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): image = fluid.layers.data(name='x', shape=[784], dtype='float32') hidden1 = fluid.layers.fc(input=image, size=64, act='relu') i = layers.zeros(shape=[1], dtype='int64') counter = fluid.layers.zeros( shape=[1], dtype='int64', force_cpu=True) until = layers.fill_constant([1], dtype='int64', value=10) data_arr = layers.array_write(hidden1, i) cond = fluid.layers.less_than(x=counter, y=until) while_op = fluid.layers.While(cond=cond) with while_op.block(): hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu') layers.array_write(hidden_n, i, data_arr) fluid.layers.increment(x=counter, value=1, in_place=True) layers.less_than(x=counter, y=until, cond=cond) hidden_n = layers.array_read(data_arr, i) hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu') predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) batch_size = fluid.layers.create_tensor(dtype='int64') batch_acc = fluid.layers.accuracy( input=predict, label=label, total=batch_size) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) opts = optimizer.minimize(avg_cost, startup_program=startup_program) place = fluid.CPUPlace() if state == 'CPU' else fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(startup_program) pass_acc_calculator = fluid.average.WeightedAverage() with profiler.profiler(state, 'total', profile_path) as prof: for iter in range(10): if iter == 2: profiler.reset_profiler() x = np.random.random((32, 784)).astype("float32") y = np.random.randint(0, 10, (32, 1)).astype("int64") outs = exe.run(main_program, feed={'x': x, 'y': y}, fetch_list=[avg_cost, batch_acc, batch_size]) acc = np.array(outs[1]) b_size = np.array(outs[2]) pass_acc_calculator.add(value=acc, weight=b_size) pass_acc = pass_acc_calculator.eval() def test_cpu_profiler(self): self.net_profiler('CPU') def test_cuda_profiler(self): self.net_profiler('GPU') def test_all_profiler(self): self.net_profiler('All', '/tmp/profile_out') with open('/tmp/profile_out', 'r') as f: self.assertGreater(len(f.read()), 0) if __name__ == '__main__': unittest.main()
4,091
41.625
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_lstm_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest SIGMOID_THRESHOLD_MIN = -40.0 SIGMOID_THRESHOLD_MAX = 13.0 EXP_MAX_INPUT = 40.0 def identity(x): return x def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX return 1. / (1. + np.exp(-y)) def tanh(x): y = -2. * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT return (2. / (1. + np.exp(y))) - 1. def relu(x): return np.maximum(x, 0) ACTIVATION = { 'identity': identity, 'sigmoid': sigmoid, 'tanh': tanh, 'relu': relu } def lstm( input, # T x 4D lod, # 1 x N h0=None, # N x D c0=None, # N x D w_h=None, # D x 4D w_b=None, # 1 x 4D w_c=None, # 1 x 3D is_reverse=False, act_gate=None, act_cell=None, act_cand=None): def _step(x, w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand): g = np.dot(h_pre, w_h) # 1 x 4D g = g + x g = np.reshape(g, (1, g.size)) c, g_i, g_f, g_o = np.split(g, 4, axis=1) if w_c is None: g_i = act_gate(g_i) # 1 x D g_f = act_gate(g_f) # 1 x D else: w_ic, w_fc, w_oc = np.split(w_c, 3, axis=1) g_i = act_gate(g_i + w_ic * c_pre) # 1 x D g_f = act_gate(g_f + w_fc * c_pre) # 1 x D c = g_f * c_pre + g_i * act_cand(c) # 1 x D if w_c is None: g_o = act_gate(g_o) # 1 x D else: _, _, w_oc = np.split(w_c, 3, axis=1) g_o = act_gate(g_o + w_oc * c) # 1 x D h = g_o * act_cell(c) return h, c def _reverse(x, lod): y = np.zeros_like(x) for i in range(len(lod) - 1): b, e = lod[i], lod[i + 1] y[b:e, :] = np.flip(x[b:e, :], 0) return y offset = lod[0] batch_size = len(offset) - 1 hidden = [] cell = [] input = _reverse(input, offset) if is_reverse else input if w_b is not None: input = input + np.tile(w_b, (offset[-1], 1)) for i in range(batch_size): # compute one sequence seq_len = offset[i + 1] - offset[i] x = input[offset[i]:offset[i + 1], :] h_pre = h0[i] # 1 x D c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step h_pre, c_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand) hidden.append(h_pre.flatten()) cell.append(c_pre.flatten()) hidden = np.array(hidden).astype('float64') cell = np.array(cell).astype('float64') hidden = _reverse(hidden, offset) if is_reverse else hidden cell = _reverse(cell, offset) if is_reverse else cell assert hidden.shape == (input.shape[0], input.shape[1] / 4) assert cell.shape == (input.shape[0], input.shape[1] / 4) return hidden, cell class TestLstmOp(OpTest): def set_argument(self): self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' self.act_cell = 'tanh' self.act_cand = 'tanh' self.has_initial_state = False self.is_reverse = False self.use_peepholes = True def setUp(self): self.set_argument() self.op_type = 'lstm' T = self.lod[0][-1] N = len(self.lod[0]) - 1 x = np.random.normal(size=(T, 4 * self.D)).astype('float64') if self.has_initial_state: h0 = np.random.normal(size=(N, self.D)).astype('float64') c0 = np.random.normal(size=(N, self.D)).astype('float64') else: h0 = np.zeros((N, self.D)).astype('float64') c0 = np.zeros((N, self.D)).astype('float64') w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64') if self.use_peepholes: b = np.random.normal(size=(1, 7 * self.D)).astype('float64') else: b = np.random.normal(size=(1, 4 * self.D)).astype('float64') w_b = b[:, 0:4 * self.D] w_c = b[:, 4 * self.D:] if self.use_peepholes else None h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse, ACTIVATION[self.act_gate], ACTIVATION[self.act_cell], ACTIVATION[self.act_cand]) self.inputs = {'Input': (x, self.lod), 'Weight': w} self.inputs['Bias'] = b if self.has_initial_state: self.inputs['H0'] = h0 self.inputs['C0'] = c0 self.outputs = { 'Hidden': (h, self.lod), 'Cell': (c, self.lod), } self.attrs = { 'use_peepholes': self.use_peepholes, 'is_reverse': self.is_reverse, 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand } def test_check_output(self): self.check_output(atol=1e-8) def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4) class TestLstmOpHasInitial(TestLstmOp): def set_argument(self): self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' self.act_cell = 'tanh' self.act_cand = 'tanh' self.has_initial_state = True self.is_reverse = True self.use_peepholes = True def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'], max_relative_error=5e-4) def test_check_grad_ingore_bias(self): N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight'], ['Hidden'], max_relative_error=5e-4, no_grad_set=set('Bias')) def test_check_grad_ingore_weight(self): N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Bias'], ['Hidden'], max_relative_error=5e-4, no_grad_set=set('Weight')) def test_check_grad_ingore_input(self): N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4, no_grad_set=set('Input')) def test_check_grad_ingore_h0(self): N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'], max_relative_error=5e-4, no_grad_set=set('H0')) def test_check_grad_ingore_c0(self): N = len(self.lod[0]) - 1 self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'], max_relative_error=5e-4, no_grad_set=set('C0')) class TestLstmOpRerverse(TestLstmOp): def set_argument(self): self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' self.act_cell = 'tanh' self.act_cand = 'tanh' self.has_initial_state = False self.is_reverse = True self.use_peepholes = True class TestLstmOpNotUsePeepholes(TestLstmOp): def set_argument(self): self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' self.act_cell = 'tanh' self.act_cand = 'tanh' self.has_initial_state = False self.is_reverse = True self.use_peepholes = False if __name__ == '__main__': unittest.main()
9,522
30.637874
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_top_k_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestTopkOp(OpTest): def setUp(self): self.op_type = "top_k" k = 1 input = np.random.random((32, 84)).astype("float32") output = np.ndarray((32, k)) indices = np.ndarray((32, k)).astype("int64") self.inputs = {'X': input} self.attrs = {'k': k} for rowid in xrange(32): row = input[rowid] output[rowid] = np.sort(row)[-k:] indices[rowid] = row.argsort()[-k:] self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): self.check_output() class TestTopkOp3d(OpTest): def setUp(self): self.op_type = "top_k" k = 1 input = np.random.random((32, 2, 84)).astype("float32") input_flat_2d = input.reshape(64, 84) output = np.ndarray((64, k)) indices = np.ndarray((64, k)).astype("int64") # FIXME: should use 'X': input for a 3d input self.inputs = {'X': input_flat_2d} self.attrs = {'k': k} for rowid in xrange(64): row = input_flat_2d[rowid] output[rowid] = np.sort(row)[-k:] indices[rowid] = row.argsort()[-k:] self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
2,031
28.882353
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator def output_hist(out): hist, _ = np.histogram(out, range=(-5, 10)) hist = hist.astype("float32") hist /= float(out.size) prob = 0.1 * np.ones((10)) return hist, prob class TestLookupSpraseTable(OpTest): def check_with_place(self, place): scope = core.Scope() # create and initialize Id Variable ids = scope.var("Ids").get_tensor() ids_array = np.array([0, 2, 3, 5, 100]).astype("int64") ids.set(ids_array, place) # create and initialize W Variable rows = [0, 1, 2, 3, 4, 5, 6] row_numel = 10000 w_selected_rows = scope.var('W').get_selected_rows() w_selected_rows.set_height(len(rows)) w_selected_rows.set_rows(rows) w_array = np.ones((len(rows), row_numel)).astype("float32") for i in range(len(rows)): w_array[i] *= i w_tensor = w_selected_rows.get_tensor() w_tensor.set(w_array, place) # create Out Variable out_tensor = scope.var('Out').get_tensor() # create and run lookup_table operator lookup_table = Operator( "lookup_sparse_table", W='W', Ids='Ids', Out='Out', min=-5.0, max=10.0, seed=10) lookup_table.run(scope, place) # get result from Out result_array = np.array(out_tensor) # all(): return True if all elements of the iterable are true (or if the iterable is empty) for idx, row in enumerate(ids_array[:-2]): assert (row == result_array[idx]).all() # check the random value hist, prob = output_hist(result_array[-1]) self.assertTrue( np.allclose( hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) def test_w_is_selected_rows(self): places = [core.CPUPlace()] # currently only support CPU for place in places: self.check_with_place(place) if __name__ == "__main__": unittest.main()
2,777
30.931034
99
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_reshape_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestReshapeOp(OpTest): def setUp(self): ori_shape = (2, 25) new_shape = (5, 10) self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} self.attrs = {"shape": new_shape, "inplace": False} self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") class TestReshapeOpDimInfer1(OpTest): def setUp(self): ori_shape = (5, 10) new_shape = (5, -1, 5) self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} self.attrs = {"shape": new_shape, "inplace": False} self.outputs = {"Out": self.inputs["X"].reshape(self.attrs["shape"])} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") class TestReshapeOpDimInfer2(OpTest): def setUp(self): ori_shape = (2, 2, 6) new_shape = (2, 0, 3, -1) infered_shape = (2, 2, 3, -1) self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} self.attrs = {"shape": new_shape, "inplace": False} self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") class TestReshapeOpInplace(OpTest): def setUp(self): ori_shape = (2, 25) new_shape = (5, 10) self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") class TestReshapeOpDimInferInplace1(OpTest): def setUp(self): ori_shape = (5, 10) new_shape = (5, -1, 5) self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") class TestReshapeOpDimInferInplace2(OpTest): def setUp(self): ori_shape = (2, 2, 6) new_shape = (2, 0, 3, -1) infered_shape = (2, 2, 3, -1) self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") class TestReshapeOpWithInputShape(OpTest): def setUp(self): ori_shape = (6, 5) new_shape = (0, -1, 5) actual_shape = (2, 3, 5) self.op_type = "reshape" self.inputs = { "X": np.random.random(ori_shape).astype("float32"), "Shape": np.array( actual_shape, dtype="int32") } self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(actual_shape)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["X"], "Out") if __name__ == "__main__": unittest.main()
4,316
27.973154
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_maxout_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest def maxout_forward_naive(input, groups): s0, s1, s2, s3 = input.shape return np.ndarray([s0, s1 / groups, groups, s2, s3], \ buffer = input, dtype=input.dtype).max(axis=(2)) class TestMaxOutOp(OpTest): def setUp(self): self.op_type = "maxout" self.init_test_case() input = np.random.random(self.shape).astype("float32") output = self.MaxOut_forward_naive(input, self.groups).astype("float32") self.inputs = {'X': input} self.attrs = {'groups': self.groups} self.outputs = {'Out': output.astype('float32')} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') def init_test_case(self): self.MaxOut_forward_naive = maxout_forward_naive self.shape = [100, 6, 2, 2] self.groups = 2 if __name__ == '__main__': unittest.main()
1,592
29.634615
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_adadelta_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestAdadeltaOp1(OpTest): def setUp(self): self.op_type = "adadelta" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The squared gradient is positive avg_squared_grad = np.random.random((102, 105)).astype("float32") # The squared update is positive avg_squared_update = np.random.random((102, 105)).astype("float32") rho = 0.95 epsilon = 1e-6 self.inputs = { 'Param': param, 'Grad': grad, 'AvgSquaredGrad': avg_squared_grad, 'AvgSquaredUpdate': avg_squared_update } self.attrs = {'rho': rho, 'epsilon': epsilon} avg_squared_grad_out = rho * avg_squared_grad + \ (1 - rho) * np.square(grad) update = -np.multiply( np.sqrt( np.divide(avg_squared_update + epsilon, avg_squared_grad_out + epsilon)), grad) avg_squared_update_out = rho * avg_squared_update + \ (1 - rho) * np.square(update) param_out = param + update self.outputs = { 'ParamOut': param_out, 'AvgSquaredGradOut': avg_squared_grad_out, 'AvgSquaredUpdateOut': avg_squared_update_out } def test_check_output(self): self.check_output() class TestAdadeltaOp2(OpTest): '''Test Adadelta op with default attribute values ''' def setUp(self): self.op_type = "adadelta" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The squared gradient is positive avg_squared_grad = np.random.random((102, 105)).astype("float32") # The squared update is positive avg_squared_update = np.random.random((102, 105)).astype("float32") rho = 0.95 epsilon = 1e-6 self.inputs = { 'Param': param, 'Grad': grad, 'AvgSquaredGrad': avg_squared_grad, 'AvgSquaredUpdate': avg_squared_update } avg_squared_grad_out = rho * avg_squared_grad + \ (1 - rho) * np.square(grad) update = -np.multiply( np.sqrt( np.divide(avg_squared_update + epsilon, avg_squared_grad_out + epsilon)), grad) avg_squared_update_out = rho * avg_squared_update + \ (1 - rho) * np.square(update) param_out = param + update self.outputs = { 'ParamOut': param_out, 'AvgSquaredGradOut': avg_squared_grad_out, 'AvgSquaredUpdateOut': avg_squared_update_out } def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
3,550
30.990991
78
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid.core as core import numpy as np from paddle.fluid.op import Operator class TestSpliteSelectedRows(unittest.TestCase): def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) return places def test_check_output(self): for place in self.get_places(): self.check_with_place(place) def test_check_grad(self): for place in self.get_places(): self.check_grad_with_place(place) def check_with_place(self, place): scope = core.Scope() rows = [0, 5, 7, 4, 20] height = 20 row_numel = 2 # initialize input variable X x = scope.var('X').get_selected_rows() x.set_rows(rows) x.set_height(height) np_array = np.ones((len(rows), row_numel)).astype("float32") np_array[0, 0] = 2.0 np_array[2, 1] = 4.0 np_array[4, 1] = 8.0 x_tensor = x.get_tensor() x_tensor.set(np_array, place) height_sections = [5, 5, 5, 5, 3] # initialize output variables [out0, out1] outs_name = ["out%d" % i for i in xrange(len(height_sections))] outs = [ scope.var(var_name).get_selected_rows() for var_name in outs_name ] # expected output selected rows expected_out0_rows = [0, 4] expected_out1_rows = [0, 2] expected_out4_rows = [0] op = Operator( "split_selected_rows", X="X", Out=outs_name, height_sections=height_sections) op.run(scope, place) self.assertEqual(outs[0].rows(), expected_out0_rows) self.assertEqual(outs[1].rows(), expected_out1_rows) self.assertEqual(outs[4].rows(), expected_out4_rows) self.assertEqual(outs[0].height(), height_sections[0]) self.assertEqual(outs[4].height(), height_sections[4]) self.assertAlmostEqual(2.0, np.array(outs[0].get_tensor())[0, 0]) self.assertAlmostEqual(4.0, np.array(outs[1].get_tensor())[1, 1]) self.assertAlmostEqual(8.0, np.array(outs[4].get_tensor())[0, 1]) def check_grad_with_place(self, place): scope = core.Scope() height = 10 row_numel = 2 # attr height_sections = [5, 5] # initialize input variable X out0_grad = scope.var("out0@GRAD").get_selected_rows() rows0 = [0, 5] out0_grad.set_rows(rows0) out0_grad.set_height(height) out0_grad_tensor = out0_grad.get_tensor() np_array = np.ones((len(rows0), row_numel)).astype("float32") np_array[0, 0] = 2.0 out0_grad_tensor.set(np_array, place) out1_grad = scope.var("out1@GRAD").get_selected_rows() rows1 = [2, 0] out1_grad.set_rows(rows1) out1_grad.set_height(height) out1_grad_tensor = out1_grad.get_tensor() np_array = np.ones((len(rows1), row_numel)).astype("float32") np_array[0, 1] = 4.0 out1_grad_tensor.set(np_array, place) x_grad = scope.var("X@GRAD").get_selected_rows() grad_op = Operator( "sum", X=["out0@GRAD", "out1@GRAD"], Out="X@GRAD", height_sections=height_sections) grad_op.run(scope, place) self.assertEqual(x_grad.rows(), rows0 + rows1) self.assertEqual(x_grad.height(), height) self.assertAlmostEqual(2.0, np.array(x_grad.get_tensor())[0, 0]) self.assertAlmostEqual(4.0, np.array(x_grad.get_tensor())[2, 1]) if __name__ == "__main__": unittest.main()
4,279
31.671756
77
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_multiplex_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestMultiplexOp(OpTest): def setUp(self): self.op_type = "multiplex" rows = 4 index = np.arange(0, rows).astype('int32') np.random.shuffle(index) index = np.reshape(index, (rows, 1)) ins1 = np.random.random((rows, 10)).astype("float32") ins2 = np.random.random((rows, 10)).astype("float32") ins3 = np.random.random((rows, 10)).astype("float32") ins4 = np.random.random((rows, 10)).astype("float32") self.inputs = { 'Ids': index, 'X': [('x1', ins1), ('x2', ins2), ('x3', ins3), ('x4', ins4)] } # multiplex output output = np.zeros_like(ins1) for i in range(0, rows): k = index[i][0] output[i] = self.inputs['X'][k][1][i] self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['x1', 'x2', 'x3', 'x4'], 'Out') def test_check_grad_ignore_x1(self): self.check_grad(['x2', 'x3', 'x4'], 'Out', no_grad_set=set('x1')) def test_check_grad_ignore_x1_x2(self): self.check_grad(['x3', 'x4'], 'Out', no_grad_set=set(['x1', 'x2'])) def test_check_grad_ignore_x3(self): self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3')) if __name__ == '__main__': unittest.main()
2,059
33.333333
75
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestGaussianRandomBatchSizeLike(OpTest): def setUp(self): self.op_type = "gaussian_random_batch_size_like" self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")} self.attrs = {'mean': 1., 'std': 2., 'shape': [-1, 2000]} self.outputs = {'Out': np.zeros((500, 2000), dtype='float32')} def test_check_output(self): self.check_output_customized(self.verify_output) def verify_output(self, outs): self.assertEqual(outs[0].shape, (500, 2000)) hist, _ = np.histogram(outs[0], range=(-3, 5)) hist = hist.astype("float32") hist /= float(outs[0].size) data = np.random.normal(size=(500, 2000), loc=1, scale=2) hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) self.assertTrue( np.allclose( hist, hist2, rtol=0, atol=0.01), "hist: " + str(hist) + " hist2: " + str(hist2)) if __name__ == "__main__": unittest.main()
1,723
35.680851
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_cumsum_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestSumOp1(OpTest): def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestSumOp2(OpTest): def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': -1, 'reverse': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = { 'Out': np.flip( np.flip( self.inputs['X'], axis=2).cumsum(axis=2), axis=2) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestSumOp3(OpTest): def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 1} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestSumOp4(OpTest): def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 0} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestSumOp5(OpTest): def setUp(self): self.op_type = "cumsum" self.inputs = {'X': np.random.random((5, 6)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestSumOp7(OpTest): def setUp(self): self.op_type = "cumsum" self.inputs = {'X': np.random.random((6)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestSumOp8(OpTest): def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((5, 6, 3)).astype("float64") self.inputs = {'X': a} self.outputs = { 'Out': np.concatenate( (np.zeros( (5, 6, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), axis=2) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == '__main__': unittest.main()
3,630
27.367188
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest def smooth_l1_loss_forward(val, sigma2): abs_val = abs(val) if abs_val < 1.0 / sigma2: return 0.5 * val * val * sigma2 else: return abs_val - 0.5 / sigma2 class TestSmoothL1LossOp1(OpTest): def setUp(self): self.op_type = "smooth_l1_loss" dims = (5, 10) self.inputs = { 'X': np.random.random(dims).astype("float32"), 'Y': np.random.random(dims).astype("float32") } sigma = 3.0 self.attrs = {'sigma': sigma} sigma2 = sigma * sigma diff = self.inputs['X'] - self.inputs['Y'] loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2).sum(1) loss = loss.reshape((dims[0], 1)) self.outputs = { 'Diff': diff.astype('float32'), 'Out': loss.astype('float32') } def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.03, no_grad_set=set("X")) def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.03, no_grad_set=set('Y')) class TestSmoothL1LossOp2(OpTest): def setUp(self): self.op_type = "smooth_l1_loss" dims = (5, 10) self.inputs = { 'X': np.random.random(dims).astype("float32"), 'Y': np.random.random(dims).astype("float32"), 'InsideWeight': np.random.random(dims).astype("float32"), 'OutsideWeight': np.random.random(dims).astype("float32") } sigma = 3.0 self.attrs = {'sigma': sigma} sigma2 = sigma * sigma diff = self.inputs['X'] - self.inputs['Y'] diff = diff * self.inputs['InsideWeight'] loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2) loss = loss * self.inputs['OutsideWeight'] loss = loss.sum(1).reshape((dims[0], 1)) self.outputs = { 'Diff': diff.astype('float32'), 'Out': loss.astype('float32') } def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.03, no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight'])) def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.03, no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight'])) if __name__ == '__main__': unittest.main()
3,483
31.259259
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_mul_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest class TestMulOp(OpTest): def setUp(self): self.op_type = "mul" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) class TestMulOp2(OpTest): def setUp(self): self.op_type = "mul" self.inputs = { 'X': np.random.random((15, 4, 12, 10)).astype("float32"), 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") } self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} result = np.dot(self.inputs['X'].reshape(15 * 4, 12 * 10), self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) result = result.reshape(15, 4, 8, 2, 9) self.outputs = {'Out': result} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X')) def test_check_grad_ignore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) class TestFP16MulOp1(OpTest): def setUp(self): self.op_type = "mul" x = np.random.random((32, 84)).astype("float16") y = np.random.random((84, 100)).astype("float16") self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} self.outputs = {'Out': np.dot(x, y)} def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-1) class TestFP16MulOp2(OpTest): def setUp(self): self.op_type = "mul" x = np.random.random((15, 4, 12, 10)).astype("float16") y = np.random.random((4, 30, 8, 2, 9)).astype("float16") self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} result = np.dot( x.reshape(15 * 4, 12 * 10), y.reshape(4 * 30, 8 * 2 * 9)) result = result.reshape(15, 4, 8, 2, 9) self.outputs = {'Out': result} def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-1) if __name__ == "__main__": unittest.main()
3,787
33.752294
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_pool2d_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0, ceil_mode=False): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - 1 ) / strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - 1 ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) for i in xrange(H_out): for j in xrange(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, r_start:r_end, c_start:c_end] out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) return out def avg_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0, ceil_mode=False): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - 1 ) / strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - 1 ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) for i in xrange(H_out): for j in xrange(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, r_start:r_end, c_start:c_end] out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / ( (r_end - r_start) * (c_end - c_start)) return out class TestPool2d_Op(OpTest): def setUp(self): self.op_type = "pool2d" self.use_cudnn = False self.use_mkldnn = False self.dtype = np.float32 self.init_test_case() self.init_global_pool() self.init_kernel_type() self.init_pool_type() self.init_ceil_mode() if self.global_pool: self.paddings = [0 for _ in range(len(self.paddings))] input = np.random.random(self.shape).astype(self.dtype) output = self.pool2D_forward_naive(input, self.ksize, self.strides, self.paddings, self.global_pool, self.ceil_mode).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, 'pooling_type': self.pool_type, 'global_pooling': self.global_pool, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'ceil_mode': self.ceil_mode, 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } self.outputs = {'Out': output} def testcudnn(self): return core.is_compiled_with_cuda() and self.use_cudnn def test_check_output(self): if self.testcudnn(): place = core.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) else: self.check_output() def test_check_grad(self): if self.dtype == np.float16: return if self.testcudnn() and self.pool_type != "max": place = core.CUDAPlace(0) self.check_grad_with_place( place, set(['X']), 'Out', max_relative_error=0.07) elif self.pool_type != "max": self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def init_test_case(self): self.shape = [2, 3, 5, 5] self.ksize = [3, 3] self.strides = [1, 1] self.paddings = [0, 0] def init_kernel_type(self): pass def init_pool_type(self): self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive def init_global_pool(self): self.global_pool = True def init_ceil_mode(self): self.ceil_mode = False class TestCase1(TestPool2d_Op): def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [1, 1] self.paddings = [0, 0] def init_pool_type(self): self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive def init_global_pool(self): self.global_pool = False class TestCase2(TestPool2d_Op): def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [1, 1] self.paddings = [1, 1] def init_pool_type(self): self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive def init_global_pool(self): self.global_pool = False class TestCase3(TestPool2d_Op): def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase4(TestCase1): def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase5(TestCase2): def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive #--------------------test pool2d-------------------- class TestCUDNNCase1(TestPool2d_Op): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNCase1(TestPool2d_Op): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase2(TestCase1): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNCase2(TestCase1): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase3(TestCase2): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNCase3(TestCase2): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase4(TestCase3): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNCase4(TestCase3): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase5(TestCase4): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNCase5(TestCase4): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase6(TestCase5): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNCase6(TestCase5): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) class TestCeilModeCase1(TestCUDNNCase1): def init_ceil_mode(self): self.ceil_mode = True class TestCeilModeCase2(TestCUDNNCase2): def init_ceil_mode(self): self.ceil_mode = True class TestCeilModeCase3(TestCase1): def init_ceil_mode(self): self.ceil_mode = True class TestCeilModeCase4(TestCase2): def init_ceil_mode(self): self.ceil_mode = True if __name__ == '__main__': unittest.main()
10,132
30.178462
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_sgd_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator from op_test import OpTest class TestSGDOp(OpTest): def setUp(self): self.op_type = "sgd" w = np.random.random((102, 105)).astype("float32") g = np.random.random((102, 105)).astype("float32") lr = np.array([0.1]).astype("float32") self.inputs = {'Param': w, 'Grad': g, 'LearningRate': lr} self.outputs = {'ParamOut': w - lr * g} def test_check_output(self): self.check_output() class TestSparseSGDOp(unittest.TestCase): def check_with_place(self, place): scope = core.Scope() # create and initialize Grad Variable height = 10 rows = [0, 4, 7] row_numel = 12 grad_selected_rows = scope.var('Grad').get_selected_rows() grad_selected_rows.set_height(height) grad_selected_rows.set_rows(rows) np_array = np.ones((len(rows), row_numel)).astype("float32") np_array[0, 0] = 2.0 np_array[2, 8] = 4.0 grad_tensor = grad_selected_rows.get_tensor() grad_tensor.set(np_array, place) # create and initialize Param Variable param = scope.var('Param').get_tensor() param_array = np.full((height, row_numel), 5.0).astype("float32") param.set(param_array, place) # create and initialize LeraningRate Variable lr = scope.var('LearningRate').get_tensor() lr_array = np.full((1), 2.0).astype("float32") lr.set(lr_array, place) # create and run sgd operator sgd_op = Operator( "sgd", Param='Param', Grad='Grad', ParamOut='Param', LearningRate='LearningRate') sgd_op.run(scope, place) # get and compare result result_array = np.array(param) # rows[0] = 0, 5.0 - 2.0 * 2.0 self.assertAlmostEqual(1.0, result_array[rows[0], 0]) # rows[0] = 0, 5.0 - 2.0 * 1.0 self.assertAlmostEqual(3.0, result_array[rows[0], 2]) # 5.0 - 2.0 * 0.0 self.assertAlmostEqual(5.0, result_array[1, 0]) # rows[1] = 4, 5.0 - 2.0 * 1.0 self.assertAlmostEqual(3.0, result_array[rows[1], 10]) # 5.0 - 2.0 * 0.0 self.assertAlmostEqual(5.0, result_array[5, 8]) # rows[2] = 7, 5.0 - 2.0 * 1.0 self.assertAlmostEqual(3.0, result_array[rows[2], 1]) # rows[2] = 7, 5.0 - 2.0 * 4.0 self.assertAlmostEqual(-3.0, result_array[rows[2], 8]) def test_sparse_sgd(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) class TestSGDOpOptimizeSelectedRows(unittest.TestCase): def check_with_place(self, place): scope = core.Scope() row_width = 12 # create and initialize Grad Variable grad_height = 10 grad_rows = [0, 4, 7] grad_selected_rows = scope.var('Grad').get_selected_rows() grad_selected_rows.set_height(grad_height) grad_selected_rows.set_rows(grad_rows) grad_array = np.ones((len(grad_rows), row_width)).astype("float32") grad_array[0, 0] = 2.0 grad_array[2, 8] = 4.0 grad_tensor = grad_selected_rows.get_tensor() grad_tensor.set(grad_array, place) # create and initialize Param Variable # create and initialize W Variable param_rows = [0, 1, 2, 3, 4, 5, 6, 7] # init Param w_selected_rows = scope.var('Param').get_selected_rows() w_selected_rows.set_height(len(param_rows)) w_selected_rows.set_rows(param_rows) w_array = np.ones((len(param_rows), row_width)).astype("float32") for i in range(len(param_rows)): w_array[i] *= i w_tensor = w_selected_rows.get_tensor() w_tensor.set(w_array, place) w_before_optimize = np.array(w_tensor) # create and initialize LeraningRate Variable lr_value = 0.1 lr = scope.var('LearningRate').get_tensor() lr_array = np.full((1), lr_value).astype("float32") lr.set(lr_array, place) # optimize with Python w_after_optimize = np.copy(w_before_optimize) for index, id in enumerate(grad_rows): w_after_optimize[id] = w_before_optimize[ id] - lr_value * grad_array[index] # create and run sgd operator sgd_op = Operator( "sgd", Param='Param', Grad='Grad', ParamOut='Param', LearningRate='LearningRate') sgd_op.run(scope, place) # get and compare result result_array = np.array(w_tensor) assert (result_array == w_after_optimize).all() def test_sparse_parameter_sgd(self): places = [core.CPUPlace()] # do not support GPU kernel currently for place in places: self.check_with_place(place) if __name__ == "__main__": unittest.main()
5,709
32.786982
75
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_conv2d_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest def conv2d_forward_naive(input, filter, group, conv_param): in_n, in_c, in_h, in_w = input.shape out_c, f_c, f_h, f_w = filter.shape assert f_c * group == in_c assert np.mod(out_c, group) == 0 sub_out_c = out_c / group stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[ 'dilation'] out_h = 1 + (in_h + 2 * pad[0] - (dilation[0] * (f_h - 1) + 1)) / stride[0] out_w = 1 + (in_w + 2 * pad[1] - (dilation[1] * (f_w - 1) + 1)) / stride[1] out = np.zeros((in_n, out_c, out_h, out_w)) d_bolck_h = (dilation[0] * (f_h - 1) + 1) d_bolck_w = (dilation[1] * (f_w - 1) + 1) input_pad = np.pad(input, ((0, ), (0, ), (pad[0], ), (pad[1], )), mode='constant', constant_values=0) filter_dilation = np.zeros((out_c, f_c, d_bolck_h, d_bolck_w)) filter_dilation[:, :, 0:d_bolck_h:dilation[0], 0:d_bolck_w:dilation[ 1]] = filter for i in range(out_h): for j in range(out_w): for g in range(group): input_pad_masked = \ input_pad[:, g * f_c:(g + 1) * f_c, i * stride[0]:i * stride[0] + d_bolck_h, j * stride[1]:j * stride[1] + d_bolck_w] f_sub = filter_dilation[g * sub_out_c:(g + 1) * sub_out_c, :, :, :] for k in range(sub_out_c): out[:, g * sub_out_c + k, i, j] = \ np.sum(input_pad_masked * f_sub[k, :, :, :], axis=(1, 2, 3)) return out class TestConv2dOp(OpTest): def setUp(self): self.op_type = "conv2d" self.use_cudnn = False self.use_mkldnn = False self.dtype = np.float32 self.init_kernel_type() self.init_group() self.init_dilation() self.init_test_case() conv2d_param = { 'stride': self.stride, 'pad': self.pad, 'dilation': self.dilations } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) output = conv2d_forward_naive(input, filter, self.groups, conv2d_param).astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn } self.outputs = {'Output': output} def testcudnn(self): return core.is_compiled_with_cuda() and self.use_cudnn def test_check_output(self): if self.testcudnn(): place = core.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) else: self.check_output() def test_check_grad(self): if self.dtype == np.float16: return if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, set(['Input', 'Filter']), 'Output', max_relative_error=0.02) else: self.check_grad( set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): if self.dtype == np.float16: return if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter'])) else: self.check_grad( ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): if self.dtype == np.float16: return if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Filter'], 'Output', max_relative_error=0.02, no_grad_set=set(['Input'])) else: self.check_grad( ['Filter'], 'Output', max_relative_error=0.02, no_grad_set=set(['Input'])) def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 3, 3] def init_dilation(self): self.dilations = [1, 1] def init_group(self): self.groups = 1 def init_kernel_type(self): pass class TestWithPad(TestConv2dOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 3, 3] class TestWithStride(TestConv2dOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 3, 3] class TestWithGroup(TestConv2dOp): def init_group(self): self.groups = 3 class TestWith1x1(TestConv2dOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 1, 1] def init_group(self): self.groups = 3 class TestWithDilation(TestConv2dOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] def init_group(self): self.groups = 3 class TestWithInput1x1Filter1x1(TestConv2dOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 1, 1] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 1, 1] def init_group(self): self.groups = 3 #----------------Conv2dCUDNN---------------- class TestCUDNN(TestConv2dOp): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNN(TestConv2dOp): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithPad(TestWithPad): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNWithPad(TestWithPad): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithStride(TestWithStride): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNWithStride(TestWithStride): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithGroup(TestWithGroup): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNWithGroup(TestWithGroup): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) class TestCUDNNWith1x1(TestWith1x1): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNWith1x1(TestWith1x1): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): def init_kernel_type(self): self.use_cudnn = True class TestFP16CUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) class TestDepthwiseConv(TestConv2dOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConv2(TestConv2dOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 3, 3] self.op_type = "depthwise_conv2d" # Please Don't remove the following code. # Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): # def init_op_type(self): # self.op_type = "conv_cudnn" if __name__ == '__main__': unittest.main()
11,596
29.679894
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_unique_name.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid as fluid class TestUniqueName(unittest.TestCase): def test_guard(self): with fluid.unique_name.guard(): name_1 = fluid.unique_name.generate('') with fluid.unique_name.guard(): name_2 = fluid.unique_name.generate('') self.assertEqual(name_1, name_2) with fluid.unique_name.guard("A"): name_1 = fluid.unique_name.generate('') with fluid.unique_name.guard('B'): name_2 = fluid.unique_name.generate('') self.assertNotEqual(name_1, name_2) def test_generate(self): with fluid.unique_name.guard(): name1 = fluid.unique_name.generate('fc') name2 = fluid.unique_name.generate('fc') name3 = fluid.unique_name.generate('tmp') self.assertNotEqual(name1, name2) self.assertEqual(name1[-2:], name3[-2:])
1,520
33.568182
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_box_coder_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import sys import math from op_test import OpTest def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): prior_box_x = ( (prior_box[:, 2] + prior_box[:, 0]) / 2).reshape(1, prior_box.shape[0]) prior_box_y = ( (prior_box[:, 3] + prior_box[:, 1]) / 2).reshape(1, prior_box.shape[0]) prior_box_width = ( (prior_box[:, 2] - prior_box[:, 0])).reshape(1, prior_box.shape[0]) prior_box_height = ( (prior_box[:, 3] - prior_box[:, 1])).reshape(1, prior_box.shape[0]) prior_box_var = prior_box_var.reshape(1, prior_box_var.shape[0], prior_box_var.shape[1]) if (code_type == "EncodeCenterSize"): target_box_x = ((target_box[:, 2] + target_box[:, 0]) / 2).reshape( target_box.shape[0], 1) target_box_y = ((target_box[:, 3] + target_box[:, 1]) / 2).reshape( target_box.shape[0], 1) target_box_width = ((target_box[:, 2] - target_box[:, 0])).reshape( target_box.shape[0], 1) target_box_height = ((target_box[:, 3] - target_box[:, 1])).reshape( target_box.shape[0], 1) output_box[:,:,0] = (target_box_x - prior_box_x) / prior_box_width / \ prior_box_var[:,:,0] output_box[:,:,1] = (target_box_y - prior_box_y) / prior_box_height / \ prior_box_var[:,:,1] output_box[:,:,2] = np.log(np.fabs(target_box_width / prior_box_width)) / \ prior_box_var[:,:,2] output_box[:,:,3] = np.log(np.fabs(target_box_height / prior_box_height)) / \ prior_box_var[:,:,3] elif (code_type == "DecodeCenterSize"): target_box_x = prior_box_var[:,:,0] * target_box[:,:,0] * \ prior_box_width + prior_box_x target_box_y = prior_box_var[:,:,1] * target_box[:,:,1] * \ prior_box_height + prior_box_y target_box_width = np.exp(prior_box_var[:,:,2] * target_box[:,:,2]) * \ prior_box_width target_box_height = np.exp(prior_box_var[:,:,3] * target_box[:,:,3]) * \ prior_box_height output_box[:, :, 0] = target_box_x - target_box_width / 2 output_box[:, :, 1] = target_box_y - target_box_height / 2 output_box[:, :, 2] = target_box_x + target_box_width / 2 output_box[:, :, 3] = target_box_y + target_box_height / 2 def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type): n = target_box.shape[0] m = prior_box.shape[0] output_box = np.zeros((n, m, 4), dtype=np.float32) for i in range(len(lod) - 1): if (code_type == "EncodeCenterSize"): box_coder(target_box[lod[i]:lod[i + 1], :], prior_box, prior_box_var, output_box[lod[i]:lod[i + 1], :, :], code_type) elif (code_type == "DecodeCenterSize"): box_coder(target_box[lod[i]:lod[i + 1], :, :], prior_box, prior_box_var, output_box[lod[i]:lod[i + 1], :, :], code_type) return output_box class TestBoxCoderOp(OpTest): def test_check_output(self): self.check_output() def setUp(self): self.op_type = "box_coder" lod = [[0, 1, 2, 3, 4, 5]] prior_box = np.random.random((10, 4)).astype('float32') prior_box_var = np.random.random((10, 4)).astype('float32') target_box = np.random.random((5, 10, 4)).astype('float32') code_type = "DecodeCenterSize" output_box = batch_box_coder(prior_box, prior_box_var, target_box, lod[0], code_type) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, 'TargetBox': target_box, } self.attrs = {'code_type': 'decode_center_size'} self.outputs = {'OutputBox': output_box} class TestBoxCoderOpWithLoD(OpTest): def test_check_output(self): self.check_output() def setUp(self): self.op_type = "box_coder" lod = [[0, 4, 12, 20]] prior_box = np.random.random((10, 4)).astype('float32') prior_box_var = np.random.random((10, 4)).astype('float32') target_box = np.random.random((20, 4)).astype('float32') code_type = "EncodeCenterSize" output_box = batch_box_coder(prior_box, prior_box_var, target_box, lod[0], code_type) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, 'TargetBox': (target_box, lod), } self.attrs = {'code_type': 'encode_center_size'} self.outputs = {'OutputBox': output_box} if __name__ == '__main__': unittest.main()
5,450
39.984962
85
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_dist_train.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import unittest from multiprocessing import Process import numpy import paddle.fluid as fluid import paddle.fluid.layers as layers class TestSendOp(unittest.TestCase): @unittest.skip( "This test is buggy. We cannot use time.sleep to sync processes, the connection may fail in unittest." ) def test_send(self): # Run init_serv in a thread place = fluid.CPUPlace() # NOTE: python thread will not work here due to GIL. p = Process(target=self.init_serv, args=(place, )) p.daemon = True p.start() time.sleep(10) with open("/tmp/paddle.%d.port" % p.pid, "r") as fn: selected_port = int(fn.readlines()[0]) self.init_client(place, selected_port) self.run_local(place) self.assertTrue(numpy.allclose(self.local_out, self.dist_out)) # FIXME(typhoonzero): find a way to gracefully shutdown the server. os.system("kill -9 %d" % p.pid) p.join() def init_serv(self, place): main = fluid.Program() with fluid.program_guard(main): serv = layers.ListenAndServ( "127.0.0.1:0", ["X"], optimizer_mode=False) with serv.do(): out_var = main.global_block().create_var( name="scale_0.tmp_0", psersistable=True, dtype="float32", shape=[32, 32]) x = layers.data( shape=[32, 32], dtype='float32', name="X", append_batch_size=False) fluid.initializer.Constant(value=1.0)(x, main.global_block()) layers.scale(x=x, scale=10.0, out=out_var) self.server_exe = fluid.Executor(place) self.server_exe.run(main) def init_client(self, place, port): main = fluid.Program() with fluid.program_guard(main): x = layers.data( shape=[32, 32], dtype='float32', name='X', append_batch_size=False) fluid.initializer.Constant(value=2.3)(x, main.global_block()) get_var = main.global_block().create_var( name="scale_0.tmp_0", # server side var dtype="float32", persistable=False, shape=[32, 32]) o = layers.Send("127.0.0.1:%d" % port, [x], [get_var]) exe = fluid.Executor(place) self.dist_out = exe.run(main, fetch_list=o) # o is a list def run_local(self, place): main = fluid.Program() with fluid.program_guard(main): x = layers.data( shape=[32, 32], dtype='float32', name='X', append_batch_size=False) fluid.initializer.Constant(value=2.3)(x, main.global_block()) o = layers.scale(x=x, scale=10.0) exe = fluid.Executor(place) self.local_out = exe.run(main, fetch_list=[o]) if __name__ == "__main__": unittest.main()
3,719
33.766355
110
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_sum_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestSumOp(OpTest): def setUp(self): self.op_type = "sum" x0 = np.random.random((3, 4)).astype('float32') x1 = np.random.random((3, 4)).astype('float32') x2 = np.random.random((3, 4)).astype('float32') self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['x0'], 'Out') if __name__ == "__main__": unittest.main()
1,227
30.487179
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_program.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest from paddle.fluid.framework import Program, default_main_program, program_guard, grad_var_name import paddle.fluid.layers as layers main_program = default_main_program() class TestProgram(unittest.TestCase): def test_program(self): b = main_program.current_block() self.assertEqual(-1, b.parent_idx) self.assertEqual(0, b.idx) b = main_program.create_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) b = main_program.create_block() self.assertEqual(2, b.idx) self.assertEqual(1, b.parent_idx) main_program.rollback() b = main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) b = main_program.create_block() self.assertEqual(3, b.idx) self.assertEqual(1, b.parent_idx) main_program.rollback() b = main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) def test_program_clone(self): prog = Program() x = prog.global_block().create_var( name='X', shape=[1000, 784], dtype='float32') y = prog.global_block().create_var( name='Y', shape=[784, 100], dtype='float32') out = prog.global_block().create_var(name='Out', dtype='float32') prog.global_block().append_op( type="mul", inputs={'X': [x], 'Y': [y]}, outputs={'Out': [out]}) # FIXME(yuyang18): We manual compare the output string, since the order # of variable could be changed. print(prog) print(prog.clone()) def test_parse_program_from_string(self): prog = Program() x = prog.global_block().create_var( name='X', shape=[1000, 784], dtype='float32') y = prog.global_block().create_var( name='Y', shape=[784, 100], dtype='float32') out = prog.global_block().create_var(name='Out', dtype='float32') prog.global_block().append_op( type="mul", inputs={'X': [x], 'Y': [y]}, outputs={'Out': [out]}) binary_str = prog.desc.serialize_to_string() prog_restored = Program.parse_from_string(binary_str) print(prog) print(prog_restored) def test_program_clone_with_parameter(self): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): d = layers.data(name='x', shape=[784], dtype='float32') hidden = layers.fc(input=d, size=100) layers.fc(input=hidden, size=100) new_program = main_program.clone() self.assertNotEqual(0, len(new_program.blocks[0].all_parameters())) if __name__ == '__main__': unittest.main()
3,515
32.807692
94
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_split_ids_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestSplitIdsOp(OpTest): def setUp(self): self.op_type = "split_ids" ids = np.array([[0], [2], [2], [3], [5], [5], [6]]).astype('int64') out0 = np.array([[0], [3], [6]]).astype('int64') out1 = np.array([[]]).astype('int64') out2 = np.array([[2], [2], [5], [5]]).astype('int64') self.inputs = {'Ids': ids} self.outputs = {'Out': [('out0', out0), ('out1', out1), ('out2', out2)]} def test_check_output(self): self.check_output() if __name__ == '__main__': unittest.main()
1,231
33.222222
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_im2sequence_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import unittest import numpy as np from op_test import OpTest def get_output_shape(attrs, in_shape): img_height = in_shape[2] img_width = in_shape[3] paddings = attrs['paddings'] kernels = attrs['kernels'] strides = attrs['strides'] output_height = \ 1 + \ (img_height + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \ strides[0] output_width = \ 1 + \ (img_width + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \ strides[1] return output_height, output_width def im2col(attrs, im, col): """ im: {CHW} col: {outputHeight, outputWidth, inputChannels, filterHeight, filterWidth} """ input_channels, input_height, input_width = im.shape output_height, output_width, _, filter_height, filter_width = col.shape stride_height, stride_width = attrs['strides'] padding_height, padding_width = attrs['paddings'][0:2] for col_row_idx in range(0, output_height): for col_col_idx in range(0, output_width): for channel in range(0, input_channels): for filter_row_idx in range(0, filter_height): for filter_col_idx in range(0, filter_width): im_row_offset = col_row_idx * stride_height \ + filter_row_idx - padding_height im_col_offset = col_col_idx * stride_width \ + filter_col_idx - padding_width if (im_row_offset < 0 or im_row_offset >= input_height or im_col_offset < 0 or im_col_offset >= input_width): col[col_row_idx][col_col_idx][channel][\ filter_row_idx][filter_col_idx] = 0.0 else: im_offset = (channel * input_height + im_row_offset \ ) * input_width + im_col_offset col[col_row_idx][col_col_idx][channel][\ filter_row_idx][filter_col_idx] = im[channel][ \ im_row_offset][im_col_offset] def Im2Sequence(inputs, attrs): output_height, output_width = get_output_shape(attrs, inputs.shape) img_channels = inputs.shape[1] batch_size = inputs.shape[0] out = np.zeros([ batch_size, output_height, output_width, img_channels, attrs['kernels'][0], attrs['kernels'][1] ]).astype("float32") for i in range(len(inputs)): im2col(attrs, inputs[i], out[i]) out = out.reshape([ batch_size * output_height * output_width, img_channels * attrs['kernels'][0] * attrs['kernels'][1] ]) return out class TestBlockExpandOp(OpTest): def config(self): self.batch_size = 1 self.img_channels = 3 self.img_height = 4 self.img_width = 4 self.attrs = { 'kernels': [2, 2], 'strides': [1, 1], 'paddings': [1, 1, 1, 1] } def setUp(self): self.config() self.op_type = "im2sequence" x = np.random.uniform(0.1, 1, [ self.batch_size, self.img_channels, self.img_height, self.img_width ]).astype("float32") out = Im2Sequence(x, self.attrs) self.inputs = {'X': x} self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X'], 'Out') class TestBlockExpandOpCase2(TestBlockExpandOp): def config(self): self.batch_size = 2 self.img_channels = 3 self.img_height = 4 self.img_width = 5 self.attrs = { 'kernels': [2, 1], 'strides': [2, 1], 'paddings': [2, 1, 2, 1] } class TestBlockExpandOpCase3(TestBlockExpandOp): def config(self): self.batch_size = 3 self.img_channels = 1 self.img_height = 4 self.img_width = 5 self.attrs = { 'kernels': [2, 1], 'strides': [2, 1], 'paddings': [2, 0, 2, 0] } class TestBlockExpandOpCase4(TestBlockExpandOp): def config(self): self.batch_size = 2 self.img_channels = 2 self.img_height = 3 self.img_width = 3 self.attrs = { 'kernels': [2, 2], 'strides': [1, 1], 'paddings': [0, 0, 0, 0] } if __name__ == '__main__': unittest.main()
5,232
30.14881
81
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_network_with_dtype.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.executor import Executor BATCH_SIZE = 20 class TestNetWithDtype(unittest.TestCase): def setUp(self): self.dtype = "float64" self.init_dtype() def run_net_on_place(self, place): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) y_predict = fluid.layers.fc(input=x, size=1, act=None) cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) fetch_list = [avg_cost] train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(startup) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) # the main program is runable, the datatype is fully supported break def init_dtype(self): pass def test_cpu(self): place = fluid.CPUPlace() self.run_net_on_place(place) def test_gpu(self): if not core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) self.run_net_on_place(place) # TODO(dzhwinter): make sure the fp16 is runable # class TestFloat16(TestNetWithDtype): # def init_dtype(self): # self.dtype = "float16" if __name__ == '__main__': unittest.main()
2,477
32.04
75
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_reduce_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestSumOp(OpTest): def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestMeanOp(OpTest): def setUp(self): self.op_type = "reduce_mean" self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} self.attrs = {'dim': [1]} self.outputs = { 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestMaxOp(OpTest): """Remove Max with subgradient from gradient check to confirm the success of CI.""" def setUp(self): self.op_type = "reduce_max" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [-1]} self.outputs = { 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) } def test_check_output(self): self.check_output() class TestMinOp(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" def setUp(self): self.op_type = "reduce_min" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [2]} self.outputs = { 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) } def test_check_output(self): self.check_output() class TestProdOp(OpTest): def setUp(self): self.op_type = "reduce_prod" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].prod(axis=0)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestKeepDimReduce(OpTest): def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [-2], 'keep_dim': True} self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class Test1DReduce(OpTest): def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random(20).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestReduceAll(OpTest): def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} self.attrs = {'reduce_all': True} self.outputs = {'Out': self.inputs['X'].sum()} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') ## reduction in multi dims class TestReduceMeanOpMultiAxises(OpTest): def setUp(self): self.op_type = "reduce_mean" self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} self.attrs = {'dim': [1, 2]} self.outputs = {'Out': self.inputs['X'].mean(axis=(1, 2))} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') class TestReduceMaxOpMultiAxises(OpTest): """Remove Max with subgradient from gradient check to confirm the success of CI.""" def setUp(self): self.op_type = "reduce_max" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [-2, -1]} self.outputs = { 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) } def test_check_output(self): self.check_output() class TestReduceMinOpMultiAxises(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" def setUp(self): self.op_type = "reduce_min" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [1, 2]} self.outputs = { 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) } def test_check_output(self): self.check_output() class TestKeepDimReduceSumMultiAxises(OpTest): def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [-2, -1], 'keep_dim': True} self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True) } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == '__main__': unittest.main()
5,897
28.49
87
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid.core as core import unittest import numpy as np class TestFeedFetch(unittest.TestCase): def test_feed_fetch(self): scope = core.Scope() place = core.CPUPlace() input_array = np.ones((4, 4, 6)).astype("float32") input_array[0, 0, 0] = 3 input_array[3, 3, 5] = 10 input_tensor = core.LoDTensor([[0, 2, 4]]) input_tensor.set(input_array, place) core.set_feed_variable(scope, input_tensor, "feed", 0) output_tensor = core.get_fetch_variable(scope, "feed", 0) output_lod = output_tensor.lod() self.assertEqual(0, output_lod[0][0]) self.assertEqual(2, output_lod[0][1]) self.assertEqual(4, output_lod[0][2]) output_array = np.array(output_tensor) self.assertEqual(3, output_array[0, 0, 0]) self.assertEqual(10, output_array[3, 3, 5]) if __name__ == "__main__": unittest.main()
1,548
32.673913
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid as fluid from parallel_executor_test_base import TestParallelExecutorBase import unittest def squeeze_excitation(input, num_channels, reduction_ratio): # pool = fluid.layers.pool2d( # input=input, pool_size=0, pool_type='avg', global_pooling=True) conv = input shape = conv.shape reshape = fluid.layers.reshape( x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) pool = fluid.layers.reduce_mean(input=reshape, dim=2) squeeze = fluid.layers.fc(input=pool, size=num_channels / reduction_ratio, act='relu') excitation = fluid.layers.fc(input=squeeze, size=num_channels, act='sigmoid') scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) return scale def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None): conv = fluid.layers.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) / 2, groups=groups, act=None, bias_attr=False) return fluid.layers.batch_norm(input=conv, act=act, momentum=0.1) def shortcut(input, ch_out, stride): ch_in = input.shape[1] if ch_in != ch_out: if stride == 1: filter_size = 1 else: filter_size = 3 return conv_bn_layer(input, ch_out, filter_size, stride) else: return input def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): # The number of first 1x1 convolutional channels for each bottleneck build block # was halved to reduce the compution cost. conv0 = conv_bn_layer( input=input, num_filters=num_filters, filter_size=1, act='relu') conv1 = conv_bn_layer( input=conv0, num_filters=num_filters * 2, filter_size=3, stride=stride, groups=cardinality, act='relu') conv2 = conv_bn_layer( input=conv1, num_filters=num_filters * 2, filter_size=1, act=None) scale = squeeze_excitation( input=conv2, num_channels=num_filters * 2, reduction_ratio=reduction_ratio) short = shortcut(input, num_filters * 2, stride) return fluid.layers.elementwise_add(x=short, y=scale, act='relu') def SE_ResNeXt50Small(batch_size=2, use_feed=False): assert not use_feed, "SE_ResNeXt doesn't support feed yet" img = fluid.layers.fill_constant( shape=[batch_size, 3, 224, 224], dtype='float32', value=0.0) label = fluid.layers.fill_constant( shape=[batch_size, 1], dtype='int64', value=0.0) conv = conv_bn_layer( input=img, num_filters=16, filter_size=3, stride=2, act='relu') conv = conv_bn_layer( input=conv, num_filters=16, filter_size=3, stride=1, act='relu') conv = conv_bn_layer( input=conv, num_filters=16, filter_size=3, stride=1, act='relu') conv = fluid.layers.pool2d( input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') cardinality = 32 reduction_ratio = 16 depth = [3, 4, 6, 3] num_filters = [128, 256, 512, 1024] for block in range(len(depth)): for i in range(depth[block]): conv = bottleneck_block( input=conv, num_filters=num_filters[block], stride=2 if i == 0 and block != 0 else 1, cardinality=cardinality, reduction_ratio=reduction_ratio) shape = conv.shape reshape = fluid.layers.reshape( x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) pool = fluid.layers.reduce_mean(input=reshape, dim=2) dropout = fluid.layers.dropout(x=pool, dropout_prob=0.2) # Classifier layer: prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.mean(loss) return loss class TestResnet(TestParallelExecutorBase): def check_resnet_convergence(self, balance_parameter_opt_between_cards): import functools batch_size = 2 self.check_network_convergence( functools.partial( SE_ResNeXt50Small, batch_size=batch_size), iter=20, batch_size=batch_size, balance_parameter_opt_between_cards=balance_parameter_opt_between_cards ) def test_resnet(self): self.check_resnet_convergence(False) def test_resnet_with_new_strategy(self): self.check_resnet_convergence(True) if __name__ == '__main__': unittest.main()
5,325
33.810458
84
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.fluid.core as core import paddle.fluid as fluid class TestElementWiseAddOp(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) def check_forward_backward(self): def test_with_place(place): out_grad = np.random.random_sample(self.x.shape).astype(np.float32) x_grad = out_grad sum_axis = range(0, len(self.x.shape)) del sum_axis[self.axis] y_grad = np.sum(out_grad, axis=tuple(sum_axis)) var_dict = locals() var_dict['y'] = self.y var_dict['x'] = self.x var_dict['out'] = self.out var_dict['y@GRAD'] = y_grad var_dict['x@GRAD'] = x_grad var_dict['out@GRAD'] = out_grad var_names = ['x', 'y', 'out', 'y@GRAD', 'x@GRAD', 'out@GRAD'] ground_truth = {name: var_dict[name] for name in var_names} program = fluid.Program() with fluid.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( name=name, dtype='float32', shape=ground_truth[name].shape) elementwise_add_op = block.append_op( type="elementwise_add", inputs={ "X": block.var('x'), "Y": block.var('y'), }, outputs={"Out": block.var('out'), }, attrs={"axis": self.axis, }) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( elementwise_add_op.desc, set(), []) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) for var_name in grad_op_desc.output_arg_names(): block.desc.var(var_name.encode("ascii")) grad_op_desc.infer_var_type(block.desc) grad_op_desc.infer_shape(block.desc) for arg in grad_op_desc.output_arg_names(): grad_var = block.desc.find_var(arg.encode("ascii")) grad_var.set_dtype(core.VarDesc.VarType.FP32) exe = fluid.Executor(place) out = exe.run(program, feed={ name: var_dict[name] for name in ['x', 'y', 'out@GRAD'] }, fetch_list=['x@GRAD', 'y@GRAD']) self.__assert_close(x_grad, out[0], "x@GRAD") self.__assert_close(y_grad, out[1], "y@GRAD", atol=1.4) places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( "elementwise_add"): places.append(core.CUDAPlace(0)) for place in places: test_with_place(place) def test_check_forward_backward_with_scale_and_bias(self): np.random.seed(123) self.x = np.random.random((4, 32, 220, 220)).astype(np.float32) self.y = np.random.random((32)).astype(np.float32) self.out = self.x + self.y.reshape(1, 32, 1, 1) self.axis = 1 self.check_forward_backward() if __name__ == '__main__': unittest.main()
4,216
39.548077
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_weight_normalization.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy import collections import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.initializer import ConstantInitializer from paddle.fluid.param_attr import WeightNormParamAttr class TestWeightNormalization(unittest.TestCase): batch_size = 3 hidden_size = 5 data_desc = (['x', [10], 0], ) @classmethod def setUpClass(cls): cls.set_program() @classmethod def set_program(cls): data = fluid.layers.data( name=cls.data_desc[0][0], shape=cls.data_desc[0][1]) out = fluid.layers.fc(input=data, size=cls.hidden_size, param_attr=WeightNormParamAttr( dim=None, name='weight_norm_param', initializer=ConstantInitializer(1.0)), bias_attr=False, act=None) loss = fluid.layers.reduce_sum(out) fluid.backward.append_backward(loss=loss) cls.fetch_list = [ 'weight_norm_param_g', 'weight_norm_param_v', 'weight_norm_param_g@GRAD' ] def run_program(self): outputs = [] places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.set_inputs(place) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) output = exe.run(fluid.default_main_program(), feed=self.inputs, fetch_list=self.fetch_list, return_numpy=False) outputs.append(output) self.actual_outputs = outputs def set_data(self): self.data = collections.OrderedDict() for desc in self.data_desc: data_name = desc[0] data_shape = desc[1] data_lod_level = desc[2] data_lod = [] for i in range(data_lod_level): lod_level_i = numpy.random.randint( low=1, high=5, size=self.batch_size if i == 0 else lod_level_i[-1]) lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist() data_lod.append(lod_level_i) data_value = numpy.random.random( size=[data_lod[-1][-1] if data_lod else self.batch_size ] + data_shape).astype('float32') self.data[data_name] = (data_value, data_lod) def set_inputs(self, place): self.inputs = {} for desc in self.data_desc: tensor = fluid.Tensor() tensor.set(self.data[desc[0]][0], place) if self.data[desc[0]][1]: tensor.set_lod(self.data[desc[0]][1]) self.inputs[desc[0]] = tensor def weight_normalize(self): v = numpy.ones((self.data[self.data_desc[0][0]][0].shape[-1], self.hidden_size)) g = numpy.linalg.norm(v, axis=None, keepdims=True) w = g * v / numpy.linalg.norm(v, axis=None, keepdims=True) x = self.data[self.data_desc[0][0]][0] out = numpy.dot(x, w) g_grad = (numpy.dot(x.T, numpy.ones_like(out)) * (v / numpy.linalg.norm( v, axis=None, keepdims=True))).sum(axis=None, keepdims=True) return g, v, g_grad def test_weight_normalization(self): self.set_data() self.run_program() expect_output = self.weight_normalize() for actual_output in self.actual_outputs: [ self.assertTrue( numpy.allclose( numpy.array(actual), expect, atol=0.001)) for expect, actual in zip(expect_output, actual_output) ] if __name__ == '__main__': unittest.main()
4,565
36.42623
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest from test_softmax_op import stable_softmax class TestSoftmaxWithCrossEntropyOp(OpTest): """ Test softmax with cross entropy operator with discreate one-hot labels. """ def setUp(self): self.op_type = "softmax_with_cross_entropy" batch_size = 41 class_num = 37 logits = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") cross_entropy = np.asmatrix( [[-np.log(softmax[i][labels[i][0]])] for i in range(softmax.shape[0])], dtype="float64") self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype("float64"), "Loss": cross_entropy.astype("float64") } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["Logits"], "Loss") class TestSoftmaxWithCrossEntropyOp2(OpTest): """ Test softmax with cross entropy operator with soft labels. """ def setUp(self): self.op_type = "softmax_with_cross_entropy" batch_size = 41 class_num = 37 logits = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) labels = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float64") labels /= np.sum(labels, axis=1, keepdims=True) cross_entropy = (-labels * np.log(softmax)).sum( axis=1, keepdims=True).astype("float64") self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype("float64"), "Loss": cross_entropy.astype("float64") } self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["Logits"], "Loss") if __name__ == "__main__": unittest.main()
2,899
30.868132
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_scale_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestScaleOp(OpTest): def setUp(self): self.op_type = "scale" self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.attrs = {'scale': -2.3} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == "__main__": unittest.main()
1,119
30.111111
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_auc_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestAucOp(OpTest): def setUp(self): self.op_type = "auc" pred = np.random.random((128, 2)).astype("float32") indices = np.random.randint(0, 2, (128, 2)) labels = np.random.randint(0, 2, (128, 1)) num_thresholds = 200 self.inputs = {'Out': pred, 'Indices': indices, 'Label': labels} self.attrs = {'curve': 'ROC', 'num_thresholds': num_thresholds} # NOTE: sklearn use a different way to generate thresholds # which will cause the result differs slightly: # from sklearn.metrics import roc_curve, auc # fpr, tpr, thresholds = roc_curve(labels, pred) # auc_value = auc(fpr, tpr) # we caculate AUC again using numpy for testing kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] # caculate TP, FN, TN, FP count tp_list = np.ndarray((num_thresholds, )) fn_list = np.ndarray((num_thresholds, )) tn_list = np.ndarray((num_thresholds, )) fp_list = np.ndarray((num_thresholds, )) for idx_thresh, thresh in enumerate(thresholds): tp, fn, tn, fp = 0, 0, 0, 0 for i, lbl in enumerate(labels): if lbl: if pred[i, 0] >= thresh: tp += 1 else: fn += 1 else: if pred[i, 0] >= thresh: fp += 1 else: tn += 1 tp_list[idx_thresh] = tp fn_list[idx_thresh] = fn tn_list[idx_thresh] = tn fp_list[idx_thresh] = fp epsilon = 1e-6 tpr = (tp_list.astype("float32") + epsilon) / ( tp_list + fn_list + epsilon) fpr = fp_list.astype("float32") / (fp_list + tn_list + epsilon) rec = (tp_list.astype("float32") + epsilon) / ( tp_list + fp_list + epsilon) x = fpr[:num_thresholds - 1] - fpr[1:] y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0 auc_value = np.sum(x * y) self.outputs = {'AUC': auc_value} def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
3,101
36.829268
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_pool_max_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False): N, C, D, H, W = x.shape if global_pool: ksize = [D, H, W] paddings = [0, 0, 0] D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) mask = np.zeros((N, C, D_out, H_out, W_out)) for k in xrange(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) for i in xrange(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) for j in xrange(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) for n in xrange(N): for c in xrange(C): arr = x_masked[n, c, :, :, :] index = np.where(arr == np.max(arr)) sub_deep = index[0][0] sub_row = index[1][0] sub_col = index[2][0] index = ((d_start + sub_deep) * H + (h_start + sub_row)) * W + w_start + sub_col mask[n, c, k, i, j] = index return out, mask def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): N, C, H, W = x.shape if global_pool: ksize = [H, W] paddings = [0, 0] H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) mask = np.zeros((N, C, H_out, W_out)) for i in xrange(H_out): for j in xrange(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, r_start:r_end, c_start:c_end] out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) for n in xrange(N): for c in xrange(C): arr = x_masked[n, c, :, :] index = np.where(arr == np.max(arr)) sub_row = index[0][0] sub_col = index[1][0] index = (r_start + sub_row) * W + c_start + sub_col mask[n, c, i, j] = index return out, mask class TestMaxPoolWithIndex_Op(OpTest): def setUp(self): self.init_test_case() self.init_global() input = np.random.random(self.shape).astype("float32") output, mask = self.pool_forward_naive(input, self.ksize, self.strides, self.paddings, self.global_pool) output = output.astype("float32") mask = mask.astype("int32") self.attrs = { 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, 'global_pooling': self.global_pool, } self.inputs = {'X': input} self.outputs = {'Out': output, "Mask": mask} def test_check_output(self): self.check_output() # def test_check_grad(self): # self.check_grad(set(['X']), ['Out'], max_relative_error=0.07) def init_test_case(self): self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] self.paddings = [1, 1, 1] def init_global(self): self.global_pool = False class TestCase1(TestMaxPoolWithIndex_Op): def init_global(self): self.global_pool = True class TestCase2(TestMaxPoolWithIndex_Op): def init_test_case(self): self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] self.strides = [2, 2, 2] self.paddings = [0, 0, 0] def init_global(self): self.global_pool = True class TestCase3(TestCase2): def init_global(self): self.global_pool = False #----------------max_pool2d_with_index---------------- class TestCase4(TestMaxPoolWithIndex_Op): def init_test_case(self): self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [1, 1] self.paddings = [1, 1] def init_global(self): self.global_pool = True class TestCase5(TestCase4): def init_global(self): self.global_pool = False class TestCase6(TestMaxPoolWithIndex_Op): def init_test_case(self): self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [2, 2] self.paddings = [0, 0] def init_global(self): self.global_pool = True class TestCase7(TestCase6): def init_global(self): self.global_pool = False if __name__ == '__main__': unittest.main()
6,359
31.953368
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest class TestElementwiseOp(OpTest): def setUp(self): self.op_type = "elementwise_max" # If x and y have the same value, the max() is not differentiable. # So we generate test data by the following method # to avoid them being too close to each other. x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) def test_check_grad_ingore_x(self): self.check_grad( ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) class TestElementwiseMaxOp_scalar(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" x = np.random.random_integers(-5, 5, [2, 3, 4]).astype("float32") y = np.array([0.5]).astype("float32") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMaxOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" x = np.random.random((32, )).astype("float32") sgn = np.random.choice([-1, 1], (32, )).astype("float32") y = x + sgn * np.random.uniform(0.1, 1, (32, )).astype("float32") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) sgn = np.random.choice([-1, 1], (2, )).astype(np.float32) y = x[:, 0, 0] + sgn * \ np.random.uniform(1, 2, (2, )).astype(np.float32) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 0} self.outputs = { 'Out': np.maximum(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1)) } class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) sgn = np.random.choice([-1, 1], (3, )).astype(np.float32) y = x[0, :, 0] + sgn * \ np.random.uniform(1, 2, (3, )).astype(np.float32) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { 'Out': np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1)) } class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) sgn = np.random.choice([-1, 1], (4, )).astype(np.float32) y = x[0, 0, :] + sgn * \ np.random.uniform(1, 2, (4, )).astype(np.float32) self.inputs = {'X': x, 'Y': y} self.outputs = { 'Out': np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4)) } class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float32) y = x[0, :, :, 0] + sgn * \ np.random.uniform(1, 2, (3, 4)).astype(np.float32) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { 'Out': np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1)) } if __name__ == '__main__': unittest.main()
4,773
35.442748
78
py