body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def get_min_max(ints): '\n Return a tuple(min, max) out of list of unsorted integers.\n\n Args:\n ints(list): list of integers containing one or more integers\n ' if (not isinstance(ints, list)): return (None, None) min_value = None max_value = None for (index, value) in enumerate(ints): if (index == 0): min_value = value max_value = value if (value < min_value): min_value = value elif (value > max_value): max_value = value return (min_value, max_value)
4,778,962,711,514,575,000
Return a tuple(min, max) out of list of unsorted integers. Args: ints(list): list of integers containing one or more integers
problem_6.py
get_min_max
johangenis/problems_vs_algorithms
python
def get_min_max(ints): '\n Return a tuple(min, max) out of list of unsorted integers.\n\n Args:\n ints(list): list of integers containing one or more integers\n ' if (not isinstance(ints, list)): return (None, None) min_value = None max_value = None for (index, value) in enumerate(ints): if (index == 0): min_value = value max_value = value if (value < min_value): min_value = value elif (value > max_value): max_value = value return (min_value, max_value)
def dijkstra_iterator(nodes: list[Node], src_id: int, hf=(lambda x: 0.0)): '\n Internal loop of the Dijkstra algorithm\n as a step by step iterator\n hf is an optional heuristic\n ' visited_nodes = set() h: list[tuple[(float, Node)]] = [] previous = dict() distances = defaultdict((lambda : sys.maxsize)) distances[src_id] = hf(nodes[src_id]) ctx: Context = Context(previous=previous, distances=distances, node=None, visited_nodes=visited_nodes) heappush(h, (0.0, nodes[src_id])) while h: (_, node) = heappop(h) if (node.id in visited_nodes): continue dist = distances[node.id] for (n, d) in ((nodes[k], v) for (k, v) in node.neighbours.items() if (k not in visited_nodes)): new_dist = (dist + d) cost = ((new_dist + hf(n)) - hf(node)) if (cost <= distances[n.id]): distances[n.id] = cost previous[n.id] = node.id heappush(h, (cost, n)) visited_nodes.add(node.id) ctx.node = node (yield ctx) ctx.node = None (yield ctx)
2,886,912,168,696,677,400
Internal loop of the Dijkstra algorithm as a step by step iterator hf is an optional heuristic
bidir_dijkstra.py
dijkstra_iterator
colon3ltocard/pythonalgorithms
python
def dijkstra_iterator(nodes: list[Node], src_id: int, hf=(lambda x: 0.0)): '\n Internal loop of the Dijkstra algorithm\n as a step by step iterator\n hf is an optional heuristic\n ' visited_nodes = set() h: list[tuple[(float, Node)]] = [] previous = dict() distances = defaultdict((lambda : sys.maxsize)) distances[src_id] = hf(nodes[src_id]) ctx: Context = Context(previous=previous, distances=distances, node=None, visited_nodes=visited_nodes) heappush(h, (0.0, nodes[src_id])) while h: (_, node) = heappop(h) if (node.id in visited_nodes): continue dist = distances[node.id] for (n, d) in ((nodes[k], v) for (k, v) in node.neighbours.items() if (k not in visited_nodes)): new_dist = (dist + d) cost = ((new_dist + hf(n)) - hf(node)) if (cost <= distances[n.id]): distances[n.id] = cost previous[n.id] = node.id heappush(h, (cost, n)) visited_nodes.add(node.id) ctx.node = node (yield ctx) ctx.node = None (yield ctx)
def dijkstra_forward(nodes: list[Node], src_id: int, dst_id: int, hf=(lambda x: 0.0)) -> list[int]: "\n 'classical' forward Dijkstra but based on our iterator.\n " coro = dijkstra_iterator(nodes, src_id, hf=hf) for ctx in coro: if (ctx.node is None): return ([], []) elif (ctx.node.id == dst_id): return (ctx.distances[dst_id], list(build_shortest_path(ctx.previous, dst_id, src_id)))
8,656,843,063,080,979,000
'classical' forward Dijkstra but based on our iterator.
bidir_dijkstra.py
dijkstra_forward
colon3ltocard/pythonalgorithms
python
def dijkstra_forward(nodes: list[Node], src_id: int, dst_id: int, hf=(lambda x: 0.0)) -> list[int]: "\n \n " coro = dijkstra_iterator(nodes, src_id, hf=hf) for ctx in coro: if (ctx.node is None): return ([], []) elif (ctx.node.id == dst_id): return (ctx.distances[dst_id], list(build_shortest_path(ctx.previous, dst_id, src_id)))
def bidir_dijkstra(nodes: list[Node], src_id: int, dst_id: int, hff=(lambda _: 0.0), hfb=(lambda _: 0.0), consistent: bool=True) -> list[int]: '\n bidirectionnal dijkstra, we search from both start => end\n and end => start using two iterators.\n hff and hfb are optional heuristics\n for respectively the forward and backward iterators\n (for later bidir A*)\n ' forward = dijkstra_iterator(nodes, src_id, hf=hff) backward = dijkstra_iterator(nodes, dst_id, hf=hfb) shortest = sys.maxsize forward_node = backward_node = None f = [] b = [] for (idx, (ctx_forward, ctx_backward)) in enumerate(zip(forward, backward)): if any(((x.node is None) for x in (ctx_forward, ctx_backward))): return ([], [], (f, b)) f.append(ctx_forward.node) b.append(ctx_backward.node) if (forward_node and ((not consistent) or (sum(((x.distances[x.node.id] - hf(x.node)) for (x, hf) in ((ctx_forward, hff), (ctx_backward, hfb)))) >= shortest))): forward_path = build_shortest_path(ctx_forward.previous, forward_node.id, src_id) backward_path = build_shortest_path(ctx_backward.previous, backward_node.id, dst_id)[::(- 1)] path = (forward_path + backward_path) return (shortest, path, (f, b)) else: for ((ctx, hf), (ctx2, hf2)) in permutations(((ctx_forward, hff), (ctx_backward, hfb)), 2): for (n, d) in ctx.node.neighbours.items(): if (n in ctx2.visited_nodes): distance = ((((ctx.distances[ctx.node.id] + ctx2.distances[n]) + d) - hf(ctx.node)) - hf2(nodes[n])) if (distance < shortest): shortest = distance forward_node = (ctx.node if (ctx is ctx_forward) else nodes[n]) backward_node = (ctx.node if (ctx is ctx_backward) else nodes[n]) print(f'Iter_{idx}: contact between {forward_node}->{backward_node} with d={shortest}')
-3,832,702,865,269,007,000
bidirectionnal dijkstra, we search from both start => end and end => start using two iterators. hff and hfb are optional heuristics for respectively the forward and backward iterators (for later bidir A*)
bidir_dijkstra.py
bidir_dijkstra
colon3ltocard/pythonalgorithms
python
def bidir_dijkstra(nodes: list[Node], src_id: int, dst_id: int, hff=(lambda _: 0.0), hfb=(lambda _: 0.0), consistent: bool=True) -> list[int]: '\n bidirectionnal dijkstra, we search from both start => end\n and end => start using two iterators.\n hff and hfb are optional heuristics\n for respectively the forward and backward iterators\n (for later bidir A*)\n ' forward = dijkstra_iterator(nodes, src_id, hf=hff) backward = dijkstra_iterator(nodes, dst_id, hf=hfb) shortest = sys.maxsize forward_node = backward_node = None f = [] b = [] for (idx, (ctx_forward, ctx_backward)) in enumerate(zip(forward, backward)): if any(((x.node is None) for x in (ctx_forward, ctx_backward))): return ([], [], (f, b)) f.append(ctx_forward.node) b.append(ctx_backward.node) if (forward_node and ((not consistent) or (sum(((x.distances[x.node.id] - hf(x.node)) for (x, hf) in ((ctx_forward, hff), (ctx_backward, hfb)))) >= shortest))): forward_path = build_shortest_path(ctx_forward.previous, forward_node.id, src_id) backward_path = build_shortest_path(ctx_backward.previous, backward_node.id, dst_id)[::(- 1)] path = (forward_path + backward_path) return (shortest, path, (f, b)) else: for ((ctx, hf), (ctx2, hf2)) in permutations(((ctx_forward, hff), (ctx_backward, hfb)), 2): for (n, d) in ctx.node.neighbours.items(): if (n in ctx2.visited_nodes): distance = ((((ctx.distances[ctx.node.id] + ctx2.distances[n]) + d) - hf(ctx.node)) - hf2(nodes[n])) if (distance < shortest): shortest = distance forward_node = (ctx.node if (ctx is ctx_forward) else nodes[n]) backward_node = (ctx.node if (ctx is ctx_backward) else nodes[n]) print(f'Iter_{idx}: contact between {forward_node}->{backward_node} with d={shortest}')
def make_animated_gif(title: str, g: list[Node], dst_file: str, fs: list[Node], bs: list[Node], shortest: list[Node], draw_edges: bool=True, writer: str='ffmpeg', interval: int=250, blinking_ratio=0.5): '\n Makes an animated gif out of two sequences of forward (fs) and backward (bs)\n path-finding algorithm. The final shortest path will be blinked.\n ' anim = Animator(g, title=title, draw_edges=draw_edges) def node_gen(): for (fn, bn) in zip(fs, bs): (yield (fn, bn, [])) res = [g[i] for i in shortest] for _ in range(int((len(fs) * blinking_ratio))): (yield (_, _, res)) ani = animation.FuncAnimation(anim.fig, anim.update, node_gen(), interval=interval, blit=True, repeat_delay=500, save_count=(len(fs) * 2)) ani.save(f'imgs/{dst_file}', writer=writer)
-5,620,099,599,551,044,000
Makes an animated gif out of two sequences of forward (fs) and backward (bs) path-finding algorithm. The final shortest path will be blinked.
bidir_dijkstra.py
make_animated_gif
colon3ltocard/pythonalgorithms
python
def make_animated_gif(title: str, g: list[Node], dst_file: str, fs: list[Node], bs: list[Node], shortest: list[Node], draw_edges: bool=True, writer: str='ffmpeg', interval: int=250, blinking_ratio=0.5): '\n Makes an animated gif out of two sequences of forward (fs) and backward (bs)\n path-finding algorithm. The final shortest path will be blinked.\n ' anim = Animator(g, title=title, draw_edges=draw_edges) def node_gen(): for (fn, bn) in zip(fs, bs): (yield (fn, bn, [])) res = [g[i] for i in shortest] for _ in range(int((len(fs) * blinking_ratio))): (yield (_, _, res)) ani = animation.FuncAnimation(anim.fig, anim.update, node_gen(), interval=interval, blit=True, repeat_delay=500, save_count=(len(fs) * 2)) ani.save(f'imgs/{dst_file}', writer=writer)
def update(self, nodes: tuple[(Node, Node, list[Node])]): '\n Updates the plot with a tuple of nodes (forward, backward, shortest_path)\n ' (f, b, s) = nodes if (not s): self._colors[f.id] = 1 self._colors[b.id] = 2 self.ax.scatter.set_array(self._colors) return (self.ax.scatter,) else: x = [n.x for n in s] y = [n.y for n in s] if self.i: c = 'green' else: c = 'orange' ap = self.ax.plot(x, y, color=c, linewidth=2) self.i = (not self.i) return ap
5,186,616,441,863,305,000
Updates the plot with a tuple of nodes (forward, backward, shortest_path)
bidir_dijkstra.py
update
colon3ltocard/pythonalgorithms
python
def update(self, nodes: tuple[(Node, Node, list[Node])]): '\n \n ' (f, b, s) = nodes if (not s): self._colors[f.id] = 1 self._colors[b.id] = 2 self.ax.scatter.set_array(self._colors) return (self.ax.scatter,) else: x = [n.x for n in s] y = [n.y for n in s] if self.i: c = 'green' else: c = 'orange' ap = self.ax.plot(x, y, color=c, linewidth=2) self.i = (not self.i) return ap
def build(self): '\n Construct the main structure of the network\n ' print('DNN input shape', self.input_shape) if (K.image_dim_ordering() == 'tf'): (batch_sz, bands, frames, channels) = self.input_shape assert (channels >= 1) channel_axis = 3 freq_axis = 1 nn_shape = (bands, frames, channels) else: raise NotImplementedError('[ERROR] Only for TensorFlow background.') nb_filters = self.config['feature_maps'] dropout_rate = self.config['dropout'] pool_sz = [5, 2, 2] gru_nb = [32] fc_nb = [32] feat_input = Input(shape=nn_shape, name='input') x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(feat_input) for sz in pool_sz: x = Conv2D(filters=nb_filters, kernel_size=(3, 3), padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation(self.config['activation'])(x) x = MaxPooling2D(pool_size=(sz, 1))(x) x = Dropout(dropout_rate)(x) x = Permute((2, 1, 3))(x) x = Reshape((frames, (- 1)))(x) for n in gru_nb: x = Bidirectional(GRU(n, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate, return_sequences=True), merge_mode='mul')(x) for n in fc_nb: x = TimeDistributed(Dense(n))(x) x = Dropout(dropout_rate)(x) x = TimeDistributed(Dense(self.nclass))(x) y_pred = Activation(activation=self.config['out_score'], name='output')(x) self._compile_model(input=feat_input, output=y_pred, params=self.config)
-1,732,019,826,705,050,000
Construct the main structure of the network
src/model/sed_ogits.py
build
Vanova/mfom_attribute_detection
python
def build(self): '\n \n ' print('DNN input shape', self.input_shape) if (K.image_dim_ordering() == 'tf'): (batch_sz, bands, frames, channels) = self.input_shape assert (channels >= 1) channel_axis = 3 freq_axis = 1 nn_shape = (bands, frames, channels) else: raise NotImplementedError('[ERROR] Only for TensorFlow background.') nb_filters = self.config['feature_maps'] dropout_rate = self.config['dropout'] pool_sz = [5, 2, 2] gru_nb = [32] fc_nb = [32] feat_input = Input(shape=nn_shape, name='input') x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(feat_input) for sz in pool_sz: x = Conv2D(filters=nb_filters, kernel_size=(3, 3), padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation(self.config['activation'])(x) x = MaxPooling2D(pool_size=(sz, 1))(x) x = Dropout(dropout_rate)(x) x = Permute((2, 1, 3))(x) x = Reshape((frames, (- 1)))(x) for n in gru_nb: x = Bidirectional(GRU(n, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate, return_sequences=True), merge_mode='mul')(x) for n in fc_nb: x = TimeDistributed(Dense(n))(x) x = Dropout(dropout_rate)(x) x = TimeDistributed(Dense(self.nclass))(x) y_pred = Activation(activation=self.config['out_score'], name='output')(x) self._compile_model(input=feat_input, output=y_pred, params=self.config)
def rebuild(self, new_config): "\n Recompile the model with the new hyper parameters.\n NOTE: network topology is changing according to the 'new_config'\n " self.config.update(new_config) (batch_sz, bands, frames, channels) = self.input_shape self.input_shape = (self.config['batch'], bands, self.config['context_wnd'], channels) self.build()
7,933,325,750,823,798,000
Recompile the model with the new hyper parameters. NOTE: network topology is changing according to the 'new_config'
src/model/sed_ogits.py
rebuild
Vanova/mfom_attribute_detection
python
def rebuild(self, new_config): "\n Recompile the model with the new hyper parameters.\n NOTE: network topology is changing according to the 'new_config'\n " self.config.update(new_config) (batch_sz, bands, frames, channels) = self.input_shape self.input_shape = (self.config['batch'], bands, self.config['context_wnd'], channels) self.build()
def chage_optimizer(self, new_config, change_out_unit=False): '\n Recompile the model with the new loss and optimizer.\n NOTE: network topology is not changing.\n ' if new_config['freeze_wt']: for layer in self.model.layers[:(- 4)]: layer.trainable = False input = self.model.get_layer(name='input').output output = self.model.get_layer(name='output').output if change_out_unit: la = self.model.layers[(- 2)].output output = Activation(activation=new_config['out_score'], name='output')(la) print(('[INFO] output scores has been changed: %s to %s' % (self.config['out_score'], new_config['out_score']))) self._compile_model(input=input, output=output, params=new_config)
1,734,875,810,475,922,400
Recompile the model with the new loss and optimizer. NOTE: network topology is not changing.
src/model/sed_ogits.py
chage_optimizer
Vanova/mfom_attribute_detection
python
def chage_optimizer(self, new_config, change_out_unit=False): '\n Recompile the model with the new loss and optimizer.\n NOTE: network topology is not changing.\n ' if new_config['freeze_wt']: for layer in self.model.layers[:(- 4)]: layer.trainable = False input = self.model.get_layer(name='input').output output = self.model.get_layer(name='output').output if change_out_unit: la = self.model.layers[(- 2)].output output = Activation(activation=new_config['out_score'], name='output')(la) print(('[INFO] output scores has been changed: %s to %s' % (self.config['out_score'], new_config['out_score']))) self._compile_model(input=input, output=output, params=new_config)
def _compile_model(self, input, output, params): '\n Compile network structure with particular loss and optimizer\n ' if (params['loss'] in obj.MFOM_OBJECTIVES): (_, _, frames, _) = self.input_shape y_true = Input(shape=(frames, self.nclass), name='y_true') psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, output]) y_pred = mfom.SmoothErrorCounter(name='smooth_error_counter')(psi) input = [y_true, input] output = y_pred loss = obj.MFOM_OBJECTIVES[params['loss']] elif (params['loss'] == obj.mfom_eer_embed.__name__): loss = obj.mfom_eer_embed else: loss = params['loss'] if (params['optimizer'] == 'adam'): optimizer = Adam(lr=params['learn_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08) elif (params['optimizer'] == 'sgd'): optimizer = SGD(lr=params['learn_rate'], decay=1e-06, momentum=0.9, nesterov=True) elif (params['optimizer'] == 'adadelta'): optimizer = Adadelta(lr=params['learn_rate']) elif (params['optimizer'] == 'rmsprop'): optimizer = RMSprop(lr=params['learn_rate']) else: optimizer = params['optimizer'] self.model = Model(input=input, output=output) self.model.compile(loss=loss, optimizer=optimizer) self.model.summary()
6,074,246,226,617,041,000
Compile network structure with particular loss and optimizer
src/model/sed_ogits.py
_compile_model
Vanova/mfom_attribute_detection
python
def _compile_model(self, input, output, params): '\n \n ' if (params['loss'] in obj.MFOM_OBJECTIVES): (_, _, frames, _) = self.input_shape y_true = Input(shape=(frames, self.nclass), name='y_true') psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, output]) y_pred = mfom.SmoothErrorCounter(name='smooth_error_counter')(psi) input = [y_true, input] output = y_pred loss = obj.MFOM_OBJECTIVES[params['loss']] elif (params['loss'] == obj.mfom_eer_embed.__name__): loss = obj.mfom_eer_embed else: loss = params['loss'] if (params['optimizer'] == 'adam'): optimizer = Adam(lr=params['learn_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08) elif (params['optimizer'] == 'sgd'): optimizer = SGD(lr=params['learn_rate'], decay=1e-06, momentum=0.9, nesterov=True) elif (params['optimizer'] == 'adadelta'): optimizer = Adadelta(lr=params['learn_rate']) elif (params['optimizer'] == 'rmsprop'): optimizer = RMSprop(lr=params['learn_rate']) else: optimizer = params['optimizer'] self.model = Model(input=input, output=output) self.model.compile(loss=loss, optimizer=optimizer) self.model.summary()
def test_recompute_optimizer_backward(self): ' test recompute optimizer backward ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', ''.join(outs))
5,152,517,799,279,989,000
test recompute optimizer backward
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
test_recompute_optimizer_backward
0x45f/Paddle
python
def test_recompute_optimizer_backward(self): ' ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', .join(outs))
def test_recompute_optimizer_backward_gradients(self): ' test recompute optimizer backward + gradients ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) with fluid.program_guard(train_prog, startup_prog): opt.apply_gradients(params_grads) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', ''.join(outs))
-2,802,897,495,768,435,700
test recompute optimizer backward + gradients
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
test_recompute_optimizer_backward_gradients
0x45f/Paddle
python
def test_recompute_optimizer_backward_gradients(self): ' ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) with fluid.program_guard(train_prog, startup_prog): opt.apply_gradients(params_grads) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', .join(outs))
def test_recompute_optimizer_backward_optimize(self): ' test recompute optimizer backward + optimize ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) opt.apply_optimize(avg_cost, startup_prog, params_grads) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', ''.join(outs))
7,818,845,906,098,357,000
test recompute optimizer backward + optimize
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
test_recompute_optimizer_backward_optimize
0x45f/Paddle
python
def test_recompute_optimizer_backward_optimize(self): ' ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) opt.apply_optimize(avg_cost, startup_prog, params_grads) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', .join(outs))
def test_recompute_optimizer_backward(self): ' test recompute optimizer backward ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', ''.join(outs))
5,152,517,799,279,989,000
test recompute optimizer backward
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
test_recompute_optimizer_backward
0x45f/Paddle
python
def test_recompute_optimizer_backward(self): ' ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', .join(outs))
def test_recompute_optimizer_backward(self): ' test recompute optimizer backward ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', ''.join(outs))
5,152,517,799,279,989,000
test recompute optimizer backward
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
test_recompute_optimizer_backward
0x45f/Paddle
python
def test_recompute_optimizer_backward(self): ' ' (train_prog, startup_prog) = (fluid.Program(), fluid.Program()) (avg_cost, strategy) = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) outs = [op.output('Out')[0] for op in avg_cost.block.ops if (op.type == 'mul')] self.assertIn('subprog', .join(outs))
def __init__(self, value: AutoMapperNumberInputType) -> None: '\n Converts the value to a timestamp type in Spark\n\n\n :param value: value\n :param formats: (Optional) formats to use for trying to parse the value otherwise uses Spark defaults\n ' super().__init__() self.value: AutoMapperDataTypeBase = (value if isinstance(value, AutoMapperDataTypeBase) else AutoMapperValueParser.parse_value(value))
370,410,302,628,395,260
Converts the value to a timestamp type in Spark :param value: value :param formats: (Optional) formats to use for trying to parse the value otherwise uses Spark defaults
spark_auto_mapper/data_types/unix_timestamp.py
__init__
icanbwell/SparkAutoMapper
python
def __init__(self, value: AutoMapperNumberInputType) -> None: '\n Converts the value to a timestamp type in Spark\n\n\n :param value: value\n :param formats: (Optional) formats to use for trying to parse the value otherwise uses Spark defaults\n ' super().__init__() self.value: AutoMapperDataTypeBase = (value if isinstance(value, AutoMapperDataTypeBase) else AutoMapperValueParser.parse_value(value))
def set_login_url(self): ' Sets the LOGIN_URL variable in the suite scope which will\n automatically log into the target Salesforce org.\n\n Typically, this is run during Suite Setup\n ' BuiltIn().set_suite_variable('${LOGIN_URL}', self.org.start_url)
-1,357,690,966,029,153,500
Sets the LOGIN_URL variable in the suite scope which will automatically log into the target Salesforce org. Typically, this is run during Suite Setup
cumulusci/robotframework/CumulusCI.py
set_login_url
jdominiczak/CumulusCI
python
def set_login_url(self): ' Sets the LOGIN_URL variable in the suite scope which will\n automatically log into the target Salesforce org.\n\n Typically, this is run during Suite Setup\n ' BuiltIn().set_suite_variable('${LOGIN_URL}', self.org.start_url)
def get_org_info(self): ' Returns a dictionary of the org information for the current target\n Salesforce org\n ' return self.org.config
2,211,106,333,232,215,000
Returns a dictionary of the org information for the current target Salesforce org
cumulusci/robotframework/CumulusCI.py
get_org_info
jdominiczak/CumulusCI
python
def get_org_info(self): ' Returns a dictionary of the org information for the current target\n Salesforce org\n ' return self.org.config
def login_url(self, org=None): ' Returns the login url which will automatically log into the target\n Salesforce org. By default, the org_name passed to the library\n constructor is used but this can be overridden with the org option\n to log into a different org.\n ' if (org is None): org = self.org else: org = self.keychain.get_org(org) return org.start_url
2,138,725,497,225,529,600
Returns the login url which will automatically log into the target Salesforce org. By default, the org_name passed to the library constructor is used but this can be overridden with the org option to log into a different org.
cumulusci/robotframework/CumulusCI.py
login_url
jdominiczak/CumulusCI
python
def login_url(self, org=None): ' Returns the login url which will automatically log into the target\n Salesforce org. By default, the org_name passed to the library\n constructor is used but this can be overridden with the org option\n to log into a different org.\n ' if (org is None): org = self.org else: org = self.keychain.get_org(org) return org.start_url
def get_namespace_prefix(self, package=None): ' Returns the namespace prefix (including __) for the specified package name.\n (Defaults to project__package__name_managed from the current project config.)\n\n Returns an empty string if the package is not installed as a managed package.\n ' result = '' if (package is None): package = self.project_config.project__package__name_managed packages = self.tooling.query('SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name FROM InstalledSubscriberPackage') match = [p for p in packages['records'] if (p['SubscriberPackage']['Name'] == package)] if match: result = (match[0]['SubscriberPackage']['NamespacePrefix'] + '__') return result
-2,532,857,310,747,178,500
Returns the namespace prefix (including __) for the specified package name. (Defaults to project__package__name_managed from the current project config.) Returns an empty string if the package is not installed as a managed package.
cumulusci/robotframework/CumulusCI.py
get_namespace_prefix
jdominiczak/CumulusCI
python
def get_namespace_prefix(self, package=None): ' Returns the namespace prefix (including __) for the specified package name.\n (Defaults to project__package__name_managed from the current project config.)\n\n Returns an empty string if the package is not installed as a managed package.\n ' result = if (package is None): package = self.project_config.project__package__name_managed packages = self.tooling.query('SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name FROM InstalledSubscriberPackage') match = [p for p in packages['records'] if (p['SubscriberPackage']['Name'] == package)] if match: result = (match[0]['SubscriberPackage']['NamespacePrefix'] + '__') return result
def run_task(self, task_name, **options): ' Runs a named CumulusCI task for the current project with optional\n support for overriding task options via kwargs.\n\n Examples:\n | =Keyword= | =task_name= | =task_options= | =comment= |\n | Run Task | deploy | | Run deploy with standard options |\n | Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |\n ' task_config = self.project_config.get_task(task_name) class_path = task_config.class_path logger.console('\n') (task_class, task_config) = self._init_task(class_path, options, task_config) return self._run_task(task_class, task_config)
2,809,326,719,546,375,000
Runs a named CumulusCI task for the current project with optional support for overriding task options via kwargs. Examples: | =Keyword= | =task_name= | =task_options= | =comment= | | Run Task | deploy | | Run deploy with standard options | | Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
cumulusci/robotframework/CumulusCI.py
run_task
jdominiczak/CumulusCI
python
def run_task(self, task_name, **options): ' Runs a named CumulusCI task for the current project with optional\n support for overriding task options via kwargs.\n\n Examples:\n | =Keyword= | =task_name= | =task_options= | =comment= |\n | Run Task | deploy | | Run deploy with standard options |\n | Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |\n ' task_config = self.project_config.get_task(task_name) class_path = task_config.class_path logger.console('\n') (task_class, task_config) = self._init_task(class_path, options, task_config) return self._run_task(task_class, task_config)
def run_task_class(self, class_path, **options): " Runs a CumulusCI task class with task options via kwargs.\n\n Use this keyword to run logic from CumulusCI tasks which have not\n been configured in the project's cumulusci.yml file. This is\n most useful in cases where a test needs to use task logic for\n logic unique to the test and thus not worth making into a named\n task for the project\n\n Examples:\n | =Keyword= | =task_class= | =task_options= |\n | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |\n " logger.console('\n') (task_class, task_config) = self._init_task(class_path, options, TaskConfig()) return self._run_task(task_class, task_config)
6,214,255,676,449,666,000
Runs a CumulusCI task class with task options via kwargs. Use this keyword to run logic from CumulusCI tasks which have not been configured in the project's cumulusci.yml file. This is most useful in cases where a test needs to use task logic for logic unique to the test and thus not worth making into a named task for the project Examples: | =Keyword= | =task_class= | =task_options= | | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
cumulusci/robotframework/CumulusCI.py
run_task_class
jdominiczak/CumulusCI
python
def run_task_class(self, class_path, **options): " Runs a CumulusCI task class with task options via kwargs.\n\n Use this keyword to run logic from CumulusCI tasks which have not\n been configured in the project's cumulusci.yml file. This is\n most useful in cases where a test needs to use task logic for\n logic unique to the test and thus not worth making into a named\n task for the project\n\n Examples:\n | =Keyword= | =task_class= | =task_options= |\n | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |\n " logger.console('\n') (task_class, task_config) = self._init_task(class_path, options, TaskConfig()) return self._run_task(task_class, task_config)
def debug(self): 'Pauses execution and enters the Python debugger.' set_pdb_trace()
-8,269,812,006,413,925,000
Pauses execution and enters the Python debugger.
cumulusci/robotframework/CumulusCI.py
debug
jdominiczak/CumulusCI
python
def debug(self): set_pdb_trace()
def load_image_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False): 'loads images in the folder_path and returns a ndarray and threshold the label image' image_list = [] label_list = [] for image_name in os.listdir(folder_path): image_original = np.load((folder_path + image_name)) image_original = image_original['a'] image_ct = image_original[:, 0:len(image_original)] image_spect = image_original[:, len(image_original):(len(image_original) * 2)] label = image_original[:, (len(image_original) * 2):(len(image_original) * 3)] if (HE == True): image_ct = cv2.equalizeHist(image_ct) image_spect = cv2.equalizeHist(image_spect) elif (Truc == True): clahe = cv2.createCLAHE(clipLimit=0.1, tileGridSize=(8, 8)) image_spect = clahe.apply(image_spect) image_ct = clahe.apply(image_ct) else: image_spect = image_spect image_ct = image_ct if (Aug == True): 'SPECT' imageSPECT_aug_1 = ndimage.rotate(image_spect, (- 90)) imageSPECT_aug_2 = np.flipud(imageSPECT_aug_1) imageSPECT_aug_3 = ndimage.rotate(image_spect, (- 180)) imageSPECT_aug_4 = np.flipud(imageSPECT_aug_3) imageSPECT_aug_5 = ndimage.rotate(image_spect, (- 270)) imageSPECT_aug_6 = np.flipud(imageSPECT_aug_5) imageSPECT_aug_7 = np.flipud(image_spect) 'CT' imageCT_aug_1 = ndimage.rotate(image_ct, (- 90)) imageCT_aug_2 = np.flipud(imageCT_aug_1) imageCT_aug_3 = ndimage.rotate(image_ct, (- 180)) imageCT_aug_4 = np.flipud(imageCT_aug_3) imageCT_aug_5 = ndimage.rotate(image_ct, (- 270)) imageCT_aug_6 = np.flipud(imageCT_aug_5) imageCT_aug_7 = np.flipud(image_ct) 'label' label_aug_1 = ndimage.rotate(label, (- 90)) label_aug_1 = label_aug_1.astype(int) label_aug_2 = np.flipud(label_aug_1) label_aug_2 = label_aug_2.astype(int) label_aug_3 = ndimage.rotate(label, (- 180)) label_aug_3 = label_aug_3.astype(int) label_aug_4 = np.flipud(label_aug_3) label_aug_4 = label_aug_4.astype(int) label_aug_5 = ndimage.rotate(label, (- 270)) label_aug_5 = label_aug_5.astype(int) label_aug_6 = np.flipud(label_aug_5) label_aug_6 = label_aug_6.astype(int) label_aug_7 = np.flipud(label) label_aug_7 = label_aug_7.astype(int) image_all_0 = np.concatenate((image_ct, image_spect), axis=1) image_all_1 = np.concatenate((imageCT_aug_1, imageSPECT_aug_1), axis=1) image_all_2 = np.concatenate((imageCT_aug_2, imageSPECT_aug_2), axis=1) image_all_3 = np.concatenate((imageCT_aug_3, imageSPECT_aug_3), axis=1) image_all_4 = np.concatenate((imageCT_aug_4, imageSPECT_aug_4), axis=1) image_all_5 = np.concatenate((imageCT_aug_5, imageSPECT_aug_5), axis=1) image_all_6 = np.concatenate((imageCT_aug_6, imageSPECT_aug_6), axis=1) image_all_7 = np.concatenate((imageCT_aug_7, imageSPECT_aug_7), axis=1) image_list.append(image_all_0) image_list.append(image_all_1) image_list.append(image_all_2) image_list.append(image_all_3) image_list.append(image_all_4) image_list.append(image_all_5) image_list.append(image_all_6) image_list.append(image_all_7) label_list.append(label) label_list.append(label_aug_1) label_list.append(label_aug_2) label_list.append(label_aug_3) label_list.append(label_aug_4) label_list.append(label_aug_5) label_list.append(label_aug_6) label_list.append(label_aug_7) else: image_all = np.concatenate((image_ct, image_spect), axis=1) image_list.append(image_all) label_list.append(label) image_array = np.asarray(image_list) label_array = np.asarray(label_list) return (image_array, label_array)
-2,177,866,537,373,515,800
loads images in the folder_path and returns a ndarray and threshold the label image
DIPDenoising/image_reading.py
load_image_from_folder
junyuchen245/NM-Img-Denoising-DIP-Keras
python
def load_image_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False): image_list = [] label_list = [] for image_name in os.listdir(folder_path): image_original = np.load((folder_path + image_name)) image_original = image_original['a'] image_ct = image_original[:, 0:len(image_original)] image_spect = image_original[:, len(image_original):(len(image_original) * 2)] label = image_original[:, (len(image_original) * 2):(len(image_original) * 3)] if (HE == True): image_ct = cv2.equalizeHist(image_ct) image_spect = cv2.equalizeHist(image_spect) elif (Truc == True): clahe = cv2.createCLAHE(clipLimit=0.1, tileGridSize=(8, 8)) image_spect = clahe.apply(image_spect) image_ct = clahe.apply(image_ct) else: image_spect = image_spect image_ct = image_ct if (Aug == True): 'SPECT' imageSPECT_aug_1 = ndimage.rotate(image_spect, (- 90)) imageSPECT_aug_2 = np.flipud(imageSPECT_aug_1) imageSPECT_aug_3 = ndimage.rotate(image_spect, (- 180)) imageSPECT_aug_4 = np.flipud(imageSPECT_aug_3) imageSPECT_aug_5 = ndimage.rotate(image_spect, (- 270)) imageSPECT_aug_6 = np.flipud(imageSPECT_aug_5) imageSPECT_aug_7 = np.flipud(image_spect) 'CT' imageCT_aug_1 = ndimage.rotate(image_ct, (- 90)) imageCT_aug_2 = np.flipud(imageCT_aug_1) imageCT_aug_3 = ndimage.rotate(image_ct, (- 180)) imageCT_aug_4 = np.flipud(imageCT_aug_3) imageCT_aug_5 = ndimage.rotate(image_ct, (- 270)) imageCT_aug_6 = np.flipud(imageCT_aug_5) imageCT_aug_7 = np.flipud(image_ct) 'label' label_aug_1 = ndimage.rotate(label, (- 90)) label_aug_1 = label_aug_1.astype(int) label_aug_2 = np.flipud(label_aug_1) label_aug_2 = label_aug_2.astype(int) label_aug_3 = ndimage.rotate(label, (- 180)) label_aug_3 = label_aug_3.astype(int) label_aug_4 = np.flipud(label_aug_3) label_aug_4 = label_aug_4.astype(int) label_aug_5 = ndimage.rotate(label, (- 270)) label_aug_5 = label_aug_5.astype(int) label_aug_6 = np.flipud(label_aug_5) label_aug_6 = label_aug_6.astype(int) label_aug_7 = np.flipud(label) label_aug_7 = label_aug_7.astype(int) image_all_0 = np.concatenate((image_ct, image_spect), axis=1) image_all_1 = np.concatenate((imageCT_aug_1, imageSPECT_aug_1), axis=1) image_all_2 = np.concatenate((imageCT_aug_2, imageSPECT_aug_2), axis=1) image_all_3 = np.concatenate((imageCT_aug_3, imageSPECT_aug_3), axis=1) image_all_4 = np.concatenate((imageCT_aug_4, imageSPECT_aug_4), axis=1) image_all_5 = np.concatenate((imageCT_aug_5, imageSPECT_aug_5), axis=1) image_all_6 = np.concatenate((imageCT_aug_6, imageSPECT_aug_6), axis=1) image_all_7 = np.concatenate((imageCT_aug_7, imageSPECT_aug_7), axis=1) image_list.append(image_all_0) image_list.append(image_all_1) image_list.append(image_all_2) image_list.append(image_all_3) image_list.append(image_all_4) image_list.append(image_all_5) image_list.append(image_all_6) image_list.append(image_all_7) label_list.append(label) label_list.append(label_aug_1) label_list.append(label_aug_2) label_list.append(label_aug_3) label_list.append(label_aug_4) label_list.append(label_aug_5) label_list.append(label_aug_6) label_list.append(label_aug_7) else: image_all = np.concatenate((image_ct, image_spect), axis=1) image_list.append(image_all) label_list.append(label) image_array = np.asarray(image_list) label_array = np.asarray(label_list) return (image_array, label_array)
def load_test_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False): 'loads images in the folder_path and returns a ndarray and threshold the label image' image_list = [] for image_name in os.listdir(folder_path): image_original = np.load((folder_path + image_name)) image_original = image_original['a'] image_ct = image_original[:, 0:len(image_original)] image_spect = image_original[:, len(image_original):(len(image_original) * 2)] image_all = np.concatenate((image_ct, image_spect), axis=1) image_list.append(image_all) image_array = np.asarray(image_list) return image_array
-3,532,767,422,098,932,700
loads images in the folder_path and returns a ndarray and threshold the label image
DIPDenoising/image_reading.py
load_test_from_folder
junyuchen245/NM-Img-Denoising-DIP-Keras
python
def load_test_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False): image_list = [] for image_name in os.listdir(folder_path): image_original = np.load((folder_path + image_name)) image_original = image_original['a'] image_ct = image_original[:, 0:len(image_original)] image_spect = image_original[:, len(image_original):(len(image_original) * 2)] image_all = np.concatenate((image_ct, image_spect), axis=1) image_list.append(image_all) image_array = np.asarray(image_list) return image_array
def __init__(__self__, *, s3: pulumi.Input['SigningJobDestinationS3Args']): "\n :param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.\n " pulumi.set(__self__, 's3', s3)
6,355,375,750,964,583,000
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
sdk/python/pulumi_aws/signer/_inputs.py
__init__
chivandikwa/pulumi-aws
python
def __init__(__self__, *, s3: pulumi.Input['SigningJobDestinationS3Args']): "\n \n " pulumi.set(__self__, 's3', s3)
@property @pulumi.getter def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']: '\n A configuration block describing the S3 Destination object: See S3 Destination below for details.\n ' return pulumi.get(self, 's3')
6,717,553,919,461,040,000
A configuration block describing the S3 Destination object: See S3 Destination below for details.
sdk/python/pulumi_aws/signer/_inputs.py
s3
chivandikwa/pulumi-aws
python
@property @pulumi.getter def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']: '\n \n ' return pulumi.get(self, 's3')
def __init__(__self__, *, bucket: pulumi.Input[str], prefix: Optional[pulumi.Input[str]]=None): '\n :param pulumi.Input[str] bucket: Name of the S3 bucket.\n :param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.\n ' pulumi.set(__self__, 'bucket', bucket) if (prefix is not None): pulumi.set(__self__, 'prefix', prefix)
-8,556,780,313,228,384,000
:param pulumi.Input[str] bucket: Name of the S3 bucket. :param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
sdk/python/pulumi_aws/signer/_inputs.py
__init__
chivandikwa/pulumi-aws
python
def __init__(__self__, *, bucket: pulumi.Input[str], prefix: Optional[pulumi.Input[str]]=None): '\n :param pulumi.Input[str] bucket: Name of the S3 bucket.\n :param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.\n ' pulumi.set(__self__, 'bucket', bucket) if (prefix is not None): pulumi.set(__self__, 'prefix', prefix)
@property @pulumi.getter def bucket(self) -> pulumi.Input[str]: '\n Name of the S3 bucket.\n ' return pulumi.get(self, 'bucket')
-1,710,090,463,754,171,000
Name of the S3 bucket.
sdk/python/pulumi_aws/signer/_inputs.py
bucket
chivandikwa/pulumi-aws
python
@property @pulumi.getter def bucket(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'bucket')
@property @pulumi.getter def prefix(self) -> Optional[pulumi.Input[str]]: '\n An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.\n ' return pulumi.get(self, 'prefix')
4,363,513,374,241,139,000
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
sdk/python/pulumi_aws/signer/_inputs.py
prefix
chivandikwa/pulumi-aws
python
@property @pulumi.getter def prefix(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'prefix')
def __init__(__self__, *, s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]=None): "\n :param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.\n " if (s3s is not None): pulumi.set(__self__, 's3s', s3s)
8,890,170,183,575,213,000
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
sdk/python/pulumi_aws/signer/_inputs.py
__init__
chivandikwa/pulumi-aws
python
def __init__(__self__, *, s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]=None): "\n \n " if (s3s is not None): pulumi.set(__self__, 's3s', s3s)
@property @pulumi.getter def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]: '\n A configuration block describing the S3 Destination object: See S3 Destination below for details.\n ' return pulumi.get(self, 's3s')
8,498,705,302,498,874,000
A configuration block describing the S3 Destination object: See S3 Destination below for details.
sdk/python/pulumi_aws/signer/_inputs.py
s3s
chivandikwa/pulumi-aws
python
@property @pulumi.getter def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]: '\n \n ' return pulumi.get(self, 's3s')
def __init__(__self__, *, bucket: Optional[pulumi.Input[str]]=None, key: Optional[pulumi.Input[str]]=None): '\n :param pulumi.Input[str] bucket: Name of the S3 bucket.\n :param pulumi.Input[str] key: Key name of the object that contains your unsigned code.\n ' if (bucket is not None): pulumi.set(__self__, 'bucket', bucket) if (key is not None): pulumi.set(__self__, 'key', key)
7,574,837,572,411,065,000
:param pulumi.Input[str] bucket: Name of the S3 bucket. :param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
sdk/python/pulumi_aws/signer/_inputs.py
__init__
chivandikwa/pulumi-aws
python
def __init__(__self__, *, bucket: Optional[pulumi.Input[str]]=None, key: Optional[pulumi.Input[str]]=None): '\n :param pulumi.Input[str] bucket: Name of the S3 bucket.\n :param pulumi.Input[str] key: Key name of the object that contains your unsigned code.\n ' if (bucket is not None): pulumi.set(__self__, 'bucket', bucket) if (key is not None): pulumi.set(__self__, 'key', key)
@property @pulumi.getter def bucket(self) -> Optional[pulumi.Input[str]]: '\n Name of the S3 bucket.\n ' return pulumi.get(self, 'bucket')
7,665,507,557,751,155,000
Name of the S3 bucket.
sdk/python/pulumi_aws/signer/_inputs.py
bucket
chivandikwa/pulumi-aws
python
@property @pulumi.getter def bucket(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'bucket')
@property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: '\n Key name of the object that contains your unsigned code.\n ' return pulumi.get(self, 'key')
-8,948,908,011,397,422,000
Key name of the object that contains your unsigned code.
sdk/python/pulumi_aws/signer/_inputs.py
key
chivandikwa/pulumi-aws
python
@property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'key')
def __init__(__self__, *, s3: pulumi.Input['SigningJobSourceS3Args']): "\n :param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.\n " pulumi.set(__self__, 's3', s3)
-6,220,106,824,449,930,000
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
sdk/python/pulumi_aws/signer/_inputs.py
__init__
chivandikwa/pulumi-aws
python
def __init__(__self__, *, s3: pulumi.Input['SigningJobSourceS3Args']): "\n \n " pulumi.set(__self__, 's3', s3)
@property @pulumi.getter def s3(self) -> pulumi.Input['SigningJobSourceS3Args']: '\n A configuration block describing the S3 Destination object: See S3 Destination below for details.\n ' return pulumi.get(self, 's3')
3,627,296,741,726,690,300
A configuration block describing the S3 Destination object: See S3 Destination below for details.
sdk/python/pulumi_aws/signer/_inputs.py
s3
chivandikwa/pulumi-aws
python
@property @pulumi.getter def s3(self) -> pulumi.Input['SigningJobSourceS3Args']: '\n \n ' return pulumi.get(self, 's3')
def __init__(__self__, *, bucket: pulumi.Input[str], key: pulumi.Input[str], version: pulumi.Input[str]): '\n :param pulumi.Input[str] bucket: Name of the S3 bucket.\n :param pulumi.Input[str] key: Key name of the object that contains your unsigned code.\n :param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.\n ' pulumi.set(__self__, 'bucket', bucket) pulumi.set(__self__, 'key', key) pulumi.set(__self__, 'version', version)
1,291,970,849,343,384,000
:param pulumi.Input[str] bucket: Name of the S3 bucket. :param pulumi.Input[str] key: Key name of the object that contains your unsigned code. :param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
sdk/python/pulumi_aws/signer/_inputs.py
__init__
chivandikwa/pulumi-aws
python
def __init__(__self__, *, bucket: pulumi.Input[str], key: pulumi.Input[str], version: pulumi.Input[str]): '\n :param pulumi.Input[str] bucket: Name of the S3 bucket.\n :param pulumi.Input[str] key: Key name of the object that contains your unsigned code.\n :param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.\n ' pulumi.set(__self__, 'bucket', bucket) pulumi.set(__self__, 'key', key) pulumi.set(__self__, 'version', version)
@property @pulumi.getter def bucket(self) -> pulumi.Input[str]: '\n Name of the S3 bucket.\n ' return pulumi.get(self, 'bucket')
-1,710,090,463,754,171,000
Name of the S3 bucket.
sdk/python/pulumi_aws/signer/_inputs.py
bucket
chivandikwa/pulumi-aws
python
@property @pulumi.getter def bucket(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'bucket')
@property @pulumi.getter def key(self) -> pulumi.Input[str]: '\n Key name of the object that contains your unsigned code.\n ' return pulumi.get(self, 'key')
836,810,978,507,954,400
Key name of the object that contains your unsigned code.
sdk/python/pulumi_aws/signer/_inputs.py
key
chivandikwa/pulumi-aws
python
@property @pulumi.getter def key(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'key')
@property @pulumi.getter def version(self) -> pulumi.Input[str]: '\n Version of your source image in your version enabled S3 bucket.\n ' return pulumi.get(self, 'version')
1,773,487,770,454,480,000
Version of your source image in your version enabled S3 bucket.
sdk/python/pulumi_aws/signer/_inputs.py
version
chivandikwa/pulumi-aws
python
@property @pulumi.getter def version(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'version')
def stations_level_over_threshold(stations: list, tol: float) -> list: 'function takes in stations and returns a list of tuples contating station and\n relative water lever where the relative water level greater than tol ' stations = consistant_typical_range_stations(stations) res_list = [] for station in stations: rel_level = station.relative_water_level() if (rel_level is not None): if (rel_level > tol): res_list.append((station, rel_level)) return res_list
3,081,818,777,980,905,500
function takes in stations and returns a list of tuples contating station and relative water lever where the relative water level greater than tol
floodsystem/flood.py
stations_level_over_threshold
LakeeSiv/Flood
python
def stations_level_over_threshold(stations: list, tol: float) -> list: 'function takes in stations and returns a list of tuples contating station and\n relative water lever where the relative water level greater than tol ' stations = consistant_typical_range_stations(stations) res_list = [] for station in stations: rel_level = station.relative_water_level() if (rel_level is not None): if (rel_level > tol): res_list.append((station, rel_level)) return res_list
def stations_highest_rel_level(stations, N): 'Returns a list of N MonitoringStation objects ordered from highest to lowest risk' stations = consistant_typical_range_stations(stations) def key(x): if (x.relative_water_level() is not None): return x.relative_water_level() else: return float(0) stationByHighestLevel = sorted(stations, key=key, reverse=True) NstationByLevel = stationByHighestLevel[:N] return NstationByLevel
-7,452,110,879,722,056,000
Returns a list of N MonitoringStation objects ordered from highest to lowest risk
floodsystem/flood.py
stations_highest_rel_level
LakeeSiv/Flood
python
def stations_highest_rel_level(stations, N): stations = consistant_typical_range_stations(stations) def key(x): if (x.relative_water_level() is not None): return x.relative_water_level() else: return float(0) stationByHighestLevel = sorted(stations, key=key, reverse=True) NstationByLevel = stationByHighestLevel[:N] return NstationByLevel
def get_network_interface_tap_configuration(network_interface_name: Optional[str]=None, resource_group_name: Optional[str]=None, tap_configuration_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetNetworkInterfaceTapConfigurationResult: '\n Use this data source to access information about an existing resource.\n\n :param str network_interface_name: The name of the network interface.\n :param str resource_group_name: The name of the resource group.\n :param str tap_configuration_name: The name of the tap configuration.\n ' __args__ = dict() __args__['networkInterfaceName'] = network_interface_name __args__['resourceGroupName'] = resource_group_name __args__['tapConfigurationName'] = tap_configuration_name if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getNetworkInterfaceTapConfiguration', __args__, opts=opts, typ=GetNetworkInterfaceTapConfigurationResult).value return AwaitableGetNetworkInterfaceTapConfigurationResult(etag=__ret__.etag, name=__ret__.name, provisioning_state=__ret__.provisioning_state, type=__ret__.type, virtual_network_tap=__ret__.virtual_network_tap)
-1,626,526,267,666,726,700
Use this data source to access information about an existing resource. :param str network_interface_name: The name of the network interface. :param str resource_group_name: The name of the resource group. :param str tap_configuration_name: The name of the tap configuration.
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
get_network_interface_tap_configuration
test-wiz-sec/pulumi-azure-nextgen
python
def get_network_interface_tap_configuration(network_interface_name: Optional[str]=None, resource_group_name: Optional[str]=None, tap_configuration_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetNetworkInterfaceTapConfigurationResult: '\n Use this data source to access information about an existing resource.\n\n :param str network_interface_name: The name of the network interface.\n :param str resource_group_name: The name of the resource group.\n :param str tap_configuration_name: The name of the tap configuration.\n ' __args__ = dict() __args__['networkInterfaceName'] = network_interface_name __args__['resourceGroupName'] = resource_group_name __args__['tapConfigurationName'] = tap_configuration_name if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getNetworkInterfaceTapConfiguration', __args__, opts=opts, typ=GetNetworkInterfaceTapConfigurationResult).value return AwaitableGetNetworkInterfaceTapConfigurationResult(etag=__ret__.etag, name=__ret__.name, provisioning_state=__ret__.provisioning_state, type=__ret__.type, virtual_network_tap=__ret__.virtual_network_tap)
@property @pulumi.getter def etag(self) -> str: '\n A unique read-only string that changes whenever the resource is updated.\n ' return pulumi.get(self, 'etag')
-4,757,010,955,465,940,000
A unique read-only string that changes whenever the resource is updated.
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
etag
test-wiz-sec/pulumi-azure-nextgen
python
@property @pulumi.getter def etag(self) -> str: '\n \n ' return pulumi.get(self, 'etag')
@property @pulumi.getter def name(self) -> Optional[str]: '\n The name of the resource that is unique within a resource group. This name can be used to access the resource.\n ' return pulumi.get(self, 'name')
7,871,952,733,487,996,000
The name of the resource that is unique within a resource group. This name can be used to access the resource.
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
name
test-wiz-sec/pulumi-azure-nextgen
python
@property @pulumi.getter def name(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> str: '\n The provisioning state of the network interface tap configuration resource.\n ' return pulumi.get(self, 'provisioning_state')
2,762,668,277,548,461,600
The provisioning state of the network interface tap configuration resource.
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
provisioning_state
test-wiz-sec/pulumi-azure-nextgen
python
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> str: '\n \n ' return pulumi.get(self, 'provisioning_state')
@property @pulumi.getter def type(self) -> str: '\n Sub Resource type.\n ' return pulumi.get(self, 'type')
-1,978,523,207,940,031,500
Sub Resource type.
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
type
test-wiz-sec/pulumi-azure-nextgen
python
@property @pulumi.getter def type(self) -> str: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter(name='virtualNetworkTap') def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']: '\n The reference to the Virtual Network Tap resource.\n ' return pulumi.get(self, 'virtual_network_tap')
8,563,566,634,166,654,000
The reference to the Virtual Network Tap resource.
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
virtual_network_tap
test-wiz-sec/pulumi-azure-nextgen
python
@property @pulumi.getter(name='virtualNetworkTap') def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']: '\n \n ' return pulumi.get(self, 'virtual_network_tap')
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): 'Configure environment for DeepMind-style Atari.\n ' if episode_life: env = EpisodicLifeEnv(env) if ('FIRE' in env.unwrapped.get_action_meanings()): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) env = ImageToPyTorch(env) if frame_stack: env = FrameStack(env, 4) return env
8,419,616,476,582,256,000
Configure environment for DeepMind-style Atari.
cleanrl/experiments/dqn2_atari_visual.py
wrap_deepmind
HelgeS/cleanrl
python
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): '\n ' if episode_life: env = EpisodicLifeEnv(env) if ('FIRE' in env.unwrapped.get_action_meanings()): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) env = ImageToPyTorch(env) if frame_stack: env = FrameStack(env, 4) return env
def __init__(self, env, noop_max=30): 'Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n ' gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert (env.unwrapped.get_action_meanings()[0] == 'NOOP')
-3,116,499,769,739,602,000
Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0.
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, env, noop_max=30): 'Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n ' gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert (env.unwrapped.get_action_meanings()[0] == 'NOOP')
def reset(self, **kwargs): ' Do no-op action for a number of steps in [1, noop_max].' self.env.reset(**kwargs) if (self.override_num_noops is not None): noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, (self.noop_max + 1)) assert (noops > 0) obs = None for _ in range(noops): (obs, _, done, _) = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs
-3,653,542,374,860,504,600
Do no-op action for a number of steps in [1, noop_max].
cleanrl/experiments/dqn2_atari_visual.py
reset
HelgeS/cleanrl
python
def reset(self, **kwargs): ' ' self.env.reset(**kwargs) if (self.override_num_noops is not None): noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, (self.noop_max + 1)) assert (noops > 0) obs = None for _ in range(noops): (obs, _, done, _) = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs
def __init__(self, env): 'Take action on reset for environments that are fixed until firing.' gym.Wrapper.__init__(self, env) assert (env.unwrapped.get_action_meanings()[1] == 'FIRE') assert (len(env.unwrapped.get_action_meanings()) >= 3)
-4,566,798,254,225,915,400
Take action on reset for environments that are fixed until firing.
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, env): gym.Wrapper.__init__(self, env) assert (env.unwrapped.get_action_meanings()[1] == 'FIRE') assert (len(env.unwrapped.get_action_meanings()) >= 3)
def __init__(self, env): 'Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n ' gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True
8,971,507,481,187,434,000
Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation.
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, env): 'Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n ' gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True
def reset(self, **kwargs): 'Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n ' if self.was_real_done: obs = self.env.reset(**kwargs) else: (obs, _, _, _) = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs
6,497,171,709,227,915,000
Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes.
cleanrl/experiments/dqn2_atari_visual.py
reset
HelgeS/cleanrl
python
def reset(self, **kwargs): 'Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n ' if self.was_real_done: obs = self.env.reset(**kwargs) else: (obs, _, _, _) = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs
def __init__(self, env, skip=4): 'Return only every `skip`-th frame' gym.Wrapper.__init__(self, env) self._obs_buffer = np.zeros(((2,) + env.observation_space.shape), dtype=np.uint8) self._skip = skip
-1,973,326,398,823,476,000
Return only every `skip`-th frame
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, env, skip=4): gym.Wrapper.__init__(self, env) self._obs_buffer = np.zeros(((2,) + env.observation_space.shape), dtype=np.uint8) self._skip = skip
def step(self, action): 'Repeat action, sum reward, and max over last observations.' total_reward = 0.0 done = None for i in range(self._skip): (obs, reward, done, info) = self.env.step(action) if (i == (self._skip - 2)): self._obs_buffer[0] = obs if (i == (self._skip - 1)): self._obs_buffer[1] = obs total_reward += reward if done: break max_frame = self._obs_buffer.max(axis=0) return (max_frame, total_reward, done, info)
-8,302,944,132,917,912,000
Repeat action, sum reward, and max over last observations.
cleanrl/experiments/dqn2_atari_visual.py
step
HelgeS/cleanrl
python
def step(self, action): total_reward = 0.0 done = None for i in range(self._skip): (obs, reward, done, info) = self.env.step(action) if (i == (self._skip - 2)): self._obs_buffer[0] = obs if (i == (self._skip - 1)): self._obs_buffer[1] = obs total_reward += reward if done: break max_frame = self._obs_buffer.max(axis=0) return (max_frame, total_reward, done, info)
def reward(self, reward): 'Bin reward to {+1, 0, -1} by its sign.' return np.sign(reward)
-3,165,823,258,035,945,500
Bin reward to {+1, 0, -1} by its sign.
cleanrl/experiments/dqn2_atari_visual.py
reward
HelgeS/cleanrl
python
def reward(self, reward): return np.sign(reward)
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): '\n Warp frames to 84x84 as done in the Nature paper and later work.\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n ' super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box(low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8) if (self._key is None): original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert ((original_space.dtype == np.uint8) and (len(original_space.shape) == 3))
-1,792,364,871,353,848,800
Warp frames to 84x84 as done in the Nature paper and later work. If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which observation should be warped.
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): '\n Warp frames to 84x84 as done in the Nature paper and later work.\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n ' super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box(low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8) if (self._key is None): original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert ((original_space.dtype == np.uint8) and (len(original_space.shape) == 3))
def __init__(self, env, k): 'Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n ' gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(((shp[0] * k),) + shp[1:]), dtype=env.observation_space.dtype)
3,172,038,940,569,455,600
Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, env, k): 'Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n ' gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(((shp[0] * k),) + shp[1:]), dtype=env.observation_space.dtype)
def __init__(self, frames): "This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was." self._frames = frames self._out = None
-6,724,169,720,603,208,000
This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.
cleanrl/experiments/dqn2_atari_visual.py
__init__
HelgeS/cleanrl
python
def __init__(self, frames): "This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was." self._frames = frames self._out = None
def run_job_and_check_output(self, expected_output, sort=False, literal_eval=False): 'Helper function to run job and compare output.' job_id = self.job_class.create_new() self.assertEqual(self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0) self.job_class.enqueue(job_id) self.assertEqual(self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() actual_output = self.job_class.get_output(job_id) if literal_eval: actual_output_dict = {} expected_output_dict = {} for item in [ast.literal_eval(value) for value in actual_output]: value = item[1] if isinstance(value, list): value = sorted(value) actual_output_dict[item[0]] = value for item in [ast.literal_eval(value) for value in expected_output]: value = item[1] if isinstance(value, list): value = sorted(value) expected_output_dict[item[0]] = value self.assertEqual(sorted(actual_output_dict.keys()), sorted(expected_output_dict.keys())) for key in actual_output_dict: self.assertEqual(actual_output_dict[key], expected_output_dict[key]) elif sort: self.assertEqual(sorted(actual_output), sorted(expected_output)) else: self.assertEqual(actual_output, expected_output)
-1,385,014,196,414,906,400
Helper function to run job and compare output.
core/domain/prod_validation_jobs_one_off_test.py
run_job_and_check_output
cclauss/oppia
python
def run_job_and_check_output(self, expected_output, sort=False, literal_eval=False): job_id = self.job_class.create_new() self.assertEqual(self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0) self.job_class.enqueue(job_id) self.assertEqual(self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() actual_output = self.job_class.get_output(job_id) if literal_eval: actual_output_dict = {} expected_output_dict = {} for item in [ast.literal_eval(value) for value in actual_output]: value = item[1] if isinstance(value, list): value = sorted(value) actual_output_dict[item[0]] = value for item in [ast.literal_eval(value) for value in expected_output]: value = item[1] if isinstance(value, list): value = sorted(value) expected_output_dict[item[0]] = value self.assertEqual(sorted(actual_output_dict.keys()), sorted(expected_output_dict.keys())) for key in actual_output_dict: self.assertEqual(actual_output_dict[key], expected_output_dict[key]) elif sort: self.assertEqual(sorted(actual_output), sorted(expected_output)) else: self.assertEqual(actual_output, expected_output)
def update_datastore_types_for_mock_datetime(): 'Updates datastore types for MockDatetime13Hours to ensure that validation\n of ndb datetime properties does not fail.\n ' datastore_types._VALIDATE_PROPERTY_VALUES[MockDatetime13Hours] = datastore_types.ValidatePropertyNothing datastore_types._PACK_PROPERTY_VALUES[MockDatetime13Hours] = datastore_types.PackDatetime datastore_types._PROPERTY_MEANINGS[MockDatetime13Hours] = datastore_types.entity_pb.Property.GD_WHEN
5,530,078,662,511,757,000
Updates datastore types for MockDatetime13Hours to ensure that validation of ndb datetime properties does not fail.
core/domain/prod_validation_jobs_one_off_test.py
update_datastore_types_for_mock_datetime
cclauss/oppia
python
def update_datastore_types_for_mock_datetime(): 'Updates datastore types for MockDatetime13Hours to ensure that validation\n of ndb datetime properties does not fail.\n ' datastore_types._VALIDATE_PROPERTY_VALUES[MockDatetime13Hours] = datastore_types.ValidatePropertyNothing datastore_types._PACK_PROPERTY_VALUES[MockDatetime13Hours] = datastore_types.PackDatetime datastore_types._PROPERTY_MEANINGS[MockDatetime13Hours] = datastore_types.entity_pb.Property.GD_WHEN
def __instancecheck__(cls, other): 'Validates whether the given instance is a datatime\n instance.\n ' return isinstance(other, OriginalDatetimeType)
629,265,886,878,505,000
Validates whether the given instance is a datatime instance.
core/domain/prod_validation_jobs_one_off_test.py
__instancecheck__
cclauss/oppia
python
def __instancecheck__(cls, other): 'Validates whether the given instance is a datatime\n instance.\n ' return isinstance(other, OriginalDatetimeType)
@classmethod def utcnow(cls): 'Returns the current date and time 13 hours behind UTC.' return (CURRENT_DATETIME - datetime.timedelta(hours=13))
-6,697,928,566,259,835,000
Returns the current date and time 13 hours behind UTC.
core/domain/prod_validation_jobs_one_off_test.py
utcnow
cclauss/oppia
python
@classmethod def utcnow(cls): return (CURRENT_DATETIME - datetime.timedelta(hours=13))
def backward(self, top, propagate_down, bottom): 'This layer does not propagate gradients.' pass
8,785,054,024,326,067,000
This layer does not propagate gradients.
lib/model/rpn/proposal_layer.py
backward
busyboxs/pytorch-faster-rcnn
python
def backward(self, top, propagate_down, bottom): pass
def reshape(self, bottom, top): 'Reshaping happens during the call to forward.' pass
-1,223,691,358,648,909,800
Reshaping happens during the call to forward.
lib/model/rpn/proposal_layer.py
reshape
busyboxs/pytorch-faster-rcnn
python
def reshape(self, bottom, top): pass
def _filter_boxes(self, boxes, min_size): 'Remove all boxes with any side smaller than min_size.' ws = ((boxes[:, :, 2] - boxes[:, :, 0]) + 1) hs = ((boxes[:, :, 3] - boxes[:, :, 1]) + 1) keep = ((ws >= min_size.view((- 1), 1).expand_as(ws)) & (hs >= min_size.view((- 1), 1).expand_as(hs))) return keep
1,782,366,848,494,310,100
Remove all boxes with any side smaller than min_size.
lib/model/rpn/proposal_layer.py
_filter_boxes
busyboxs/pytorch-faster-rcnn
python
def _filter_boxes(self, boxes, min_size): ws = ((boxes[:, :, 2] - boxes[:, :, 0]) + 1) hs = ((boxes[:, :, 3] - boxes[:, :, 1]) + 1) keep = ((ws >= min_size.view((- 1), 1).expand_as(ws)) & (hs >= min_size.view((- 1), 1).expand_as(hs))) return keep
@skipUnlessDarwin @add_test_categories(['pyapi']) def test_objc_properties(self): 'Test that expr uses the correct property getters and setters' if (self.getArchitecture() == 'i386'): self.skipTest('requires modern objc runtime') self.build() exe = os.path.join(os.getcwd(), 'a.out') target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) main_bkpt = target.BreakpointCreateBySourceRegex('Set a breakpoint here.', lldb.SBFileSpec(self.source_name)) self.assertTrue((main_bkpt and (main_bkpt.GetNumLocations() == 1)), VALID_BREAKPOINT) process = target.LaunchSimple(None, None, self.get_process_working_directory()) self.assertTrue((process.GetState() == lldb.eStateStopped), PROCESS_STOPPED) threads = lldbutil.get_threads_stopped_at_breakpoint(process, main_bkpt) self.assertTrue((len(threads) == 1)) thread = threads[0] frame = thread.GetFrameAtIndex(0) mine = frame.FindVariable('mine') self.assertTrue(mine.IsValid()) access_count = mine.GetChildMemberWithName('_access_count') self.assertTrue(access_count.IsValid()) start_access_count = access_count.GetValueAsUnsigned(123456) self.assertTrue((start_access_count != 123456)) nonexistant_value = frame.EvaluateExpression('mine.nonexistantInt', False) nonexistant_error = nonexistant_value.GetError() self.assertTrue(nonexistant_error.Success()) nonexistant_int = nonexistant_value.GetValueAsUnsigned(123456) self.assertTrue((nonexistant_int == 6)) new_access_count = access_count.GetValueAsUnsigned(123456) self.assertTrue(((new_access_count - start_access_count) == 1)) start_access_count = new_access_count nonexistant_change = frame.EvaluateExpression('mine.nonexistantInt = 10', False) nonexistant_error = nonexistant_change.GetError() self.assertTrue(nonexistant_error.Success()) new_access_count = access_count.GetValueAsUnsigned(123456) self.assertTrue(((new_access_count - start_access_count) == 1)) start_access_count = new_access_count backed_value = frame.EvaluateExpression('mine.backedInt', False) backed_error = backed_value.GetError() self.assertTrue(backed_error.Success()) backing_value = mine.GetChildMemberWithName('_backedInt') self.assertTrue(backing_value.IsValid()) self.assertTrue((backed_value.GetValueAsUnsigned(12345) == backing_value.GetValueAsUnsigned(23456))) unbacked_value = frame.EvaluateExpression('mine.unbackedInt', False) unbacked_error = unbacked_value.GetError() self.assertTrue(unbacked_error.Success()) idWithProtocol_value = frame.EvaluateExpression('mine.idWithProtocol', False) idWithProtocol_error = idWithProtocol_value.GetError() self.assertTrue(idWithProtocol_error.Success()) self.assertTrue((idWithProtocol_value.GetTypeName() == 'id')) value = frame.EvaluateExpression('BaseClass.classInt', False) self.assertTrue(value.GetError().Success()) self.assertTrue((value.GetValueAsUnsigned(11111) == 123)) value = frame.EvaluateExpression('BaseClass.classInt = 234', False) self.assertTrue(value.GetError().Success()) value = frame.EvaluateExpression('BaseClass.classInt', False) self.assertTrue(value.GetError().Success()) self.assertTrue((value.GetValueAsUnsigned(11111) == 234))
8,712,816,977,307,540,000
Test that expr uses the correct property getters and setters
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/objc/objc-property/TestObjCProperty.py
test_objc_properties
Polidea/SiriusObfuscator
python
@skipUnlessDarwin @add_test_categories(['pyapi']) def test_objc_properties(self): if (self.getArchitecture() == 'i386'): self.skipTest('requires modern objc runtime') self.build() exe = os.path.join(os.getcwd(), 'a.out') target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) main_bkpt = target.BreakpointCreateBySourceRegex('Set a breakpoint here.', lldb.SBFileSpec(self.source_name)) self.assertTrue((main_bkpt and (main_bkpt.GetNumLocations() == 1)), VALID_BREAKPOINT) process = target.LaunchSimple(None, None, self.get_process_working_directory()) self.assertTrue((process.GetState() == lldb.eStateStopped), PROCESS_STOPPED) threads = lldbutil.get_threads_stopped_at_breakpoint(process, main_bkpt) self.assertTrue((len(threads) == 1)) thread = threads[0] frame = thread.GetFrameAtIndex(0) mine = frame.FindVariable('mine') self.assertTrue(mine.IsValid()) access_count = mine.GetChildMemberWithName('_access_count') self.assertTrue(access_count.IsValid()) start_access_count = access_count.GetValueAsUnsigned(123456) self.assertTrue((start_access_count != 123456)) nonexistant_value = frame.EvaluateExpression('mine.nonexistantInt', False) nonexistant_error = nonexistant_value.GetError() self.assertTrue(nonexistant_error.Success()) nonexistant_int = nonexistant_value.GetValueAsUnsigned(123456) self.assertTrue((nonexistant_int == 6)) new_access_count = access_count.GetValueAsUnsigned(123456) self.assertTrue(((new_access_count - start_access_count) == 1)) start_access_count = new_access_count nonexistant_change = frame.EvaluateExpression('mine.nonexistantInt = 10', False) nonexistant_error = nonexistant_change.GetError() self.assertTrue(nonexistant_error.Success()) new_access_count = access_count.GetValueAsUnsigned(123456) self.assertTrue(((new_access_count - start_access_count) == 1)) start_access_count = new_access_count backed_value = frame.EvaluateExpression('mine.backedInt', False) backed_error = backed_value.GetError() self.assertTrue(backed_error.Success()) backing_value = mine.GetChildMemberWithName('_backedInt') self.assertTrue(backing_value.IsValid()) self.assertTrue((backed_value.GetValueAsUnsigned(12345) == backing_value.GetValueAsUnsigned(23456))) unbacked_value = frame.EvaluateExpression('mine.unbackedInt', False) unbacked_error = unbacked_value.GetError() self.assertTrue(unbacked_error.Success()) idWithProtocol_value = frame.EvaluateExpression('mine.idWithProtocol', False) idWithProtocol_error = idWithProtocol_value.GetError() self.assertTrue(idWithProtocol_error.Success()) self.assertTrue((idWithProtocol_value.GetTypeName() == 'id')) value = frame.EvaluateExpression('BaseClass.classInt', False) self.assertTrue(value.GetError().Success()) self.assertTrue((value.GetValueAsUnsigned(11111) == 123)) value = frame.EvaluateExpression('BaseClass.classInt = 234', False) self.assertTrue(value.GetError().Success()) value = frame.EvaluateExpression('BaseClass.classInt', False) self.assertTrue(value.GetError().Success()) self.assertTrue((value.GetValueAsUnsigned(11111) == 234))
def get_commit_distance_version(repo_dir: str, version: Version, latest_tag: str) -> Optional[Version]: '\n This function creates a string which describes the version of the\n monorepo or package that includes the commit distance and SHA revision\n number.\n\n For a mono repository, the full commit distance is used. The same is true\n for a single package. For a package inside a mono repository that does not\n apply mono versioning, the packages\' local commit distance is used.\n\n This is close to what `git describe --tags` does. An example version number\n generated by this function is: `0.1.0+24.gd9ade3f`. If the working state is\n dirty, `.dirty` will be appended to the local version.\n\n Notes:\n\n - If there is no commit distance from the *latest_tag* to the current\n state of the repository, this function returns None.\n - The version returned by this function is a PEP440 local version that\n cannot be used for packages when submitting them to PyPI.\n - If the tag for the version of *subject* does not exist on the repository,\n it will fall back to 0.0.0 as the version number which is treated as\n "the beginning of the repository", even if no tag for this version exists.\n\n Todo: We could try to find the previous tag for this subject and use that.\n ' git = Git(repo_dir) dirty = git.has_diff() if git.rev_parse(latest_tag): distance = len(git.rev_list((latest_tag + '..HEAD'))) else: logger.warning('tag "%s" does not exist', latest_tag) version = Version('0.0.0') distance = len(git.rev_list('HEAD')) if (distance == 0): if dirty: return parse_version((str(version) + '+dirty')) return None rev = git.rev_parse('HEAD') assert rev, git local = '+{}.g{}{}'.format(distance, rev[:7], ('.dirty' if dirty else '')) return parse_version((str(version) + local))
7,152,067,297,977,686,000
This function creates a string which describes the version of the monorepo or package that includes the commit distance and SHA revision number. For a mono repository, the full commit distance is used. The same is true for a single package. For a package inside a mono repository that does not apply mono versioning, the packages' local commit distance is used. This is close to what `git describe --tags` does. An example version number generated by this function is: `0.1.0+24.gd9ade3f`. If the working state is dirty, `.dirty` will be appended to the local version. Notes: - If there is no commit distance from the *latest_tag* to the current state of the repository, this function returns None. - The version returned by this function is a PEP440 local version that cannot be used for packages when submitting them to PyPI. - If the tag for the version of *subject* does not exist on the repository, it will fall back to 0.0.0 as the version number which is treated as "the beginning of the repository", even if no tag for this version exists. Todo: We could try to find the previous tag for this subject and use that.
src/shut/model/version.py
get_commit_distance_version
NiklasRosenstein/shut
python
def get_commit_distance_version(repo_dir: str, version: Version, latest_tag: str) -> Optional[Version]: '\n This function creates a string which describes the version of the\n monorepo or package that includes the commit distance and SHA revision\n number.\n\n For a mono repository, the full commit distance is used. The same is true\n for a single package. For a package inside a mono repository that does not\n apply mono versioning, the packages\' local commit distance is used.\n\n This is close to what `git describe --tags` does. An example version number\n generated by this function is: `0.1.0+24.gd9ade3f`. If the working state is\n dirty, `.dirty` will be appended to the local version.\n\n Notes:\n\n - If there is no commit distance from the *latest_tag* to the current\n state of the repository, this function returns None.\n - The version returned by this function is a PEP440 local version that\n cannot be used for packages when submitting them to PyPI.\n - If the tag for the version of *subject* does not exist on the repository,\n it will fall back to 0.0.0 as the version number which is treated as\n "the beginning of the repository", even if no tag for this version exists.\n\n Todo: We could try to find the previous tag for this subject and use that.\n ' git = Git(repo_dir) dirty = git.has_diff() if git.rev_parse(latest_tag): distance = len(git.rev_list((latest_tag + '..HEAD'))) else: logger.warning('tag "%s" does not exist', latest_tag) version = Version('0.0.0') distance = len(git.rev_list('HEAD')) if (distance == 0): if dirty: return parse_version((str(version) + '+dirty')) return None rev = git.rev_parse('HEAD') assert rev, git local = '+{}.g{}{}'.format(distance, rev[:7], ('.dirty' if dirty else )) return parse_version((str(version) + local))
def extend_with_language_model(BaseModel): '添加下三角的Attention Mask(语言模型用)\n ' class LanguageModel(LM_Mask, BaseModel): '带下三角Attention Mask的派生模型\n ' def __init__(self, *args, **kwargs): super(LanguageModel, self).__init__(*args, **kwargs) self.with_mlm = (self.with_mlm or True) return LanguageModel
-374,363,184,057,374,100
添加下三角的Attention Mask(语言模型用)
bert4keras/models.py
extend_with_language_model
CurisZhou/bert4keras
python
def extend_with_language_model(BaseModel): '\n ' class LanguageModel(LM_Mask, BaseModel): '带下三角Attention Mask的派生模型\n ' def __init__(self, *args, **kwargs): super(LanguageModel, self).__init__(*args, **kwargs) self.with_mlm = (self.with_mlm or True) return LanguageModel
def extend_with_unified_language_model(BaseModel): '添加UniLM的Attention Mask(Seq2Seq模型用)\n ' class UnifiedLanguageModel(UniLM_Mask, BaseModel): '带UniLM的Attention Mask的派生模型\n UniLM: https://arxiv.org/abs/1905.03197\n ' def __init__(self, *args, **kwargs): super(UnifiedLanguageModel, self).__init__(*args, **kwargs) self.with_mlm = (self.with_mlm or True) return UnifiedLanguageModel
-1,459,502,272,783,444,500
添加UniLM的Attention Mask(Seq2Seq模型用)
bert4keras/models.py
extend_with_unified_language_model
CurisZhou/bert4keras
python
def extend_with_unified_language_model(BaseModel): '\n ' class UnifiedLanguageModel(UniLM_Mask, BaseModel): '带UniLM的Attention Mask的派生模型\n UniLM: https://arxiv.org/abs/1905.03197\n ' def __init__(self, *args, **kwargs): super(UnifiedLanguageModel, self).__init__(*args, **kwargs) self.with_mlm = (self.with_mlm or True) return UnifiedLanguageModel
def build_transformer_model(config_path=None, checkpoint_path=None, model='bert', application='encoder', return_keras_model=True, **kwargs): '根据配置文件构建模型,可选加载checkpoint权重\n ' configs = {} if (config_path is not None): configs.update(json.load(open(config_path))) configs.update(kwargs) if ('max_position' not in configs): configs['max_position'] = configs.get('max_position_embeddings', 512) if ('dropout_rate' not in configs): configs['dropout_rate'] = configs.get('hidden_dropout_prob') if ('segment_vocab_size' not in configs): configs['segment_vocab_size'] = configs.get('type_vocab_size', 2) models = {'bert': BERT, 'albert': ALBERT, 'albert_unshared': ALBERT_Unshared, 'roberta': BERT, 'nezha': NEZHA, 'electra': ELECTRA, 'gpt': GPT, 'gpt2': GPT2, 'gpt2_ml': GPT2_ML, 't5': T5, 't5_encoder': T5_Encoder, 't5_decoder': T5_Decoder, 't5.1.0': T5, 't5.1.0_encoder': T5_Encoder, 't5.1.0_decoder': T5_Decoder, 't5.1.1': T5, 't5.1.1_encoder': T5_Encoder, 't5.1.1_decoder': T5_Decoder} if is_string(model): model = model.lower() MODEL = models[model] else: MODEL = model application = application.lower() if ((application in ['lm', 'unilm']) and (model in ['electra', 't5'])): raise ValueError(('"%s" model can not be used as "%s" application.\n' % (model, application))) if (application == 'lm'): MODEL = extend_with_language_model(MODEL) elif (application == 'unilm'): MODEL = extend_with_unified_language_model(MODEL) if model.startswith('t5.1.1'): configs['version'] = 't5.1.1' transformer = MODEL(**configs) transformer.build(**configs) if (checkpoint_path is not None): transformer.load_weights_from_checkpoint(checkpoint_path) if return_keras_model: return transformer.model else: return transformer
-4,409,276,050,895,325,000
根据配置文件构建模型,可选加载checkpoint权重
bert4keras/models.py
build_transformer_model
CurisZhou/bert4keras
python
def build_transformer_model(config_path=None, checkpoint_path=None, model='bert', application='encoder', return_keras_model=True, **kwargs): '\n ' configs = {} if (config_path is not None): configs.update(json.load(open(config_path))) configs.update(kwargs) if ('max_position' not in configs): configs['max_position'] = configs.get('max_position_embeddings', 512) if ('dropout_rate' not in configs): configs['dropout_rate'] = configs.get('hidden_dropout_prob') if ('segment_vocab_size' not in configs): configs['segment_vocab_size'] = configs.get('type_vocab_size', 2) models = {'bert': BERT, 'albert': ALBERT, 'albert_unshared': ALBERT_Unshared, 'roberta': BERT, 'nezha': NEZHA, 'electra': ELECTRA, 'gpt': GPT, 'gpt2': GPT2, 'gpt2_ml': GPT2_ML, 't5': T5, 't5_encoder': T5_Encoder, 't5_decoder': T5_Decoder, 't5.1.0': T5, 't5.1.0_encoder': T5_Encoder, 't5.1.0_decoder': T5_Decoder, 't5.1.1': T5, 't5.1.1_encoder': T5_Encoder, 't5.1.1_decoder': T5_Decoder} if is_string(model): model = model.lower() MODEL = models[model] else: MODEL = model application = application.lower() if ((application in ['lm', 'unilm']) and (model in ['electra', 't5'])): raise ValueError(('"%s" model can not be used as "%s" application.\n' % (model, application))) if (application == 'lm'): MODEL = extend_with_language_model(MODEL) elif (application == 'unilm'): MODEL = extend_with_unified_language_model(MODEL) if model.startswith('t5.1.1'): configs['version'] = 't5.1.1' transformer = MODEL(**configs) transformer.build(**configs) if (checkpoint_path is not None): transformer.load_weights_from_checkpoint(checkpoint_path) if return_keras_model: return transformer.model else: return transformer
def build(self, attention_caches=None, layer_norm_cond=None, layer_norm_cond_hidden_size=None, layer_norm_cond_hidden_act=None, additional_input_layers=None, **kwargs): '模型构建函数\n attention_caches:为Attention的K,V的缓存序列字典,格式为\n {Attention层名: [K缓存, V缓存]};\n layer_norm_*系列参数:实现Conditional Layer Normalization时使用,\n 用来实现以“固定长度向量”为条件的条件Bert。\n ' if self.built: return None inputs = self.get_inputs() self.set_inputs(inputs, additional_input_layers) self.attention_caches = (attention_caches or {}) self.layer_norm_conds = [layer_norm_cond, layer_norm_cond_hidden_size, (layer_norm_cond_hidden_act or 'linear')] outputs = self.call(inputs) self.set_outputs(outputs) self.model = Model(self.inputs, self.outputs, name=self.name) self.built = True
3,740,199,043,613,940,000
模型构建函数 attention_caches:为Attention的K,V的缓存序列字典,格式为 {Attention层名: [K缓存, V缓存]}; layer_norm_*系列参数:实现Conditional Layer Normalization时使用, 用来实现以“固定长度向量”为条件的条件Bert。
bert4keras/models.py
build
CurisZhou/bert4keras
python
def build(self, attention_caches=None, layer_norm_cond=None, layer_norm_cond_hidden_size=None, layer_norm_cond_hidden_act=None, additional_input_layers=None, **kwargs): '模型构建函数\n attention_caches:为Attention的K,V的缓存序列字典,格式为\n {Attention层名: [K缓存, V缓存]};\n layer_norm_*系列参数:实现Conditional Layer Normalization时使用,\n 用来实现以“固定长度向量”为条件的条件Bert。\n ' if self.built: return None inputs = self.get_inputs() self.set_inputs(inputs, additional_input_layers) self.attention_caches = (attention_caches or {}) self.layer_norm_conds = [layer_norm_cond, layer_norm_cond_hidden_size, (layer_norm_cond_hidden_act or 'linear')] outputs = self.call(inputs) self.set_outputs(outputs) self.model = Model(self.inputs, self.outputs, name=self.name) self.built = True
def call(self, inputs): '定义模型的执行流程\n ' outputs = self.apply_embeddings(inputs) for i in range(self.num_hidden_layers): outputs = self.apply_main_layers(outputs, i) outputs = self.apply_final_layers(outputs) return outputs
-6,229,499,654,623,413,000
定义模型的执行流程
bert4keras/models.py
call
CurisZhou/bert4keras
python
def call(self, inputs): '\n ' outputs = self.apply_embeddings(inputs) for i in range(self.num_hidden_layers): outputs = self.apply_main_layers(outputs, i) outputs = self.apply_final_layers(outputs) return outputs
def prefixed(self, name): '给名字加前缀\n ' if (name is not None): return (self.prefix + name)
-4,764,178,677,985,925,000
给名字加前缀
bert4keras/models.py
prefixed
CurisZhou/bert4keras
python
def prefixed(self, name): '\n ' if (name is not None): return (self.prefix + name)
def apply(self, inputs=None, layer=None, arguments=None, **kwargs): '通过apply调用层会自动重用同名层\n inputs: 上一层的输出;\n layer: 要调用的层类名;\n arguments: 传递给layer.call的参数;\n kwargs: 传递给层初始化的参数。\n ' if ((layer is Dropout) and (self.dropout_rate == 0)): return inputs if ((layer is MultiHeadAttention) and self.residual_attention_scores): kwargs['return_attention_scores'] = True arguments = (arguments or {}) name = self.prefixed(kwargs.get('name')) kwargs['name'] = name if (name not in self.layers): layer = layer(**kwargs) name = layer.name self.layers[name] = layer if (inputs is None): return self.layers[name] else: if isinstance(self.layers[name], MultiHeadAttention): if (name in self.attention_caches): (k_cache, v_cache) = self.attention_caches[name] (k_name, v_name) = ((name + '-Cached-Key'), (name + '-Cached-Value')) k = Concatenate1D(name=k_name)([k_cache, inputs[1]]) v = Concatenate1D(name=v_name)([v_cache, inputs[2]]) inputs = ((inputs[:1] + [k, v]) + inputs[3:]) if self.residual_attention_scores: if (self.attention_scores is not None): if arguments.get('a_bias'): a_bias = Add(name=(name + '-Attention-Bias'))([inputs[3], self.attention_scores]) else: a_bias = self.attention_scores inputs = ((inputs[:3] + [a_bias]) + inputs[4:]) arguments['a_bias'] = True (o, a) = self.layers[name](inputs, **arguments) self.attention_scores = a return o return self.layers[name](inputs, **arguments)
-4,756,943,625,493,727,000
通过apply调用层会自动重用同名层 inputs: 上一层的输出; layer: 要调用的层类名; arguments: 传递给layer.call的参数; kwargs: 传递给层初始化的参数。
bert4keras/models.py
apply
CurisZhou/bert4keras
python
def apply(self, inputs=None, layer=None, arguments=None, **kwargs): '通过apply调用层会自动重用同名层\n inputs: 上一层的输出;\n layer: 要调用的层类名;\n arguments: 传递给layer.call的参数;\n kwargs: 传递给层初始化的参数。\n ' if ((layer is Dropout) and (self.dropout_rate == 0)): return inputs if ((layer is MultiHeadAttention) and self.residual_attention_scores): kwargs['return_attention_scores'] = True arguments = (arguments or {}) name = self.prefixed(kwargs.get('name')) kwargs['name'] = name if (name not in self.layers): layer = layer(**kwargs) name = layer.name self.layers[name] = layer if (inputs is None): return self.layers[name] else: if isinstance(self.layers[name], MultiHeadAttention): if (name in self.attention_caches): (k_cache, v_cache) = self.attention_caches[name] (k_name, v_name) = ((name + '-Cached-Key'), (name + '-Cached-Value')) k = Concatenate1D(name=k_name)([k_cache, inputs[1]]) v = Concatenate1D(name=v_name)([v_cache, inputs[2]]) inputs = ((inputs[:1] + [k, v]) + inputs[3:]) if self.residual_attention_scores: if (self.attention_scores is not None): if arguments.get('a_bias'): a_bias = Add(name=(name + '-Attention-Bias'))([inputs[3], self.attention_scores]) else: a_bias = self.attention_scores inputs = ((inputs[:3] + [a_bias]) + inputs[4:]) arguments['a_bias'] = True (o, a) = self.layers[name](inputs, **arguments) self.attention_scores = a return o return self.layers[name](inputs, **arguments)
def compute_attention_bias(self, inputs=None): '定义每一层的Attention Bias\n ' return self.attention_bias
-1,749,003,794,285,315,800
定义每一层的Attention Bias
bert4keras/models.py
compute_attention_bias
CurisZhou/bert4keras
python
def compute_attention_bias(self, inputs=None): '\n ' return self.attention_bias
def compute_position_bias(self, inputs=None): '定义每一层的Position Bias(一般相对位置编码用)\n ' return self.position_bias
-8,838,572,378,965,173,000
定义每一层的Position Bias(一般相对位置编码用)
bert4keras/models.py
compute_position_bias
CurisZhou/bert4keras
python
def compute_position_bias(self, inputs=None): '\n ' return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None): '设置input和inputs属性\n ' if (inputs is None): inputs = [] elif (not isinstance(inputs, list)): inputs = [inputs] inputs = inputs[:] if (additional_input_layers is not None): if (not isinstance(additional_input_layers, list)): additional_input_layers = [additional_input_layers] inputs.extend(additional_input_layers) self.inputs = inputs if (len(inputs) > 1): self.input = inputs else: self.input = inputs[0]
-2,619,954,512,554,763,300
设置input和inputs属性
bert4keras/models.py
set_inputs
CurisZhou/bert4keras
python
def set_inputs(self, inputs, additional_input_layers=None): '\n ' if (inputs is None): inputs = [] elif (not isinstance(inputs, list)): inputs = [inputs] inputs = inputs[:] if (additional_input_layers is not None): if (not isinstance(additional_input_layers, list)): additional_input_layers = [additional_input_layers] inputs.extend(additional_input_layers) self.inputs = inputs if (len(inputs) > 1): self.input = inputs else: self.input = inputs[0]
def set_outputs(self, outputs): '设置output和oututs属性\n ' if (not isinstance(outputs, list)): outputs = [outputs] outputs = outputs[:] self.outputs = outputs if (len(outputs) > 1): self.output = outputs else: self.output = outputs[0]
-4,584,545,269,230,479,000
设置output和oututs属性
bert4keras/models.py
set_outputs
CurisZhou/bert4keras
python
def set_outputs(self, outputs): '\n ' if (not isinstance(outputs, list)): outputs = [outputs] outputs = outputs[:] self.outputs = outputs if (len(outputs) > 1): self.output = outputs else: self.output = outputs[0]
@property def initializer(self): '默认使用截断正态分布初始化\n ' return keras.initializers.TruncatedNormal(stddev=0.02)
7,058,008,652,299,686,000
默认使用截断正态分布初始化
bert4keras/models.py
initializer
CurisZhou/bert4keras
python
@property def initializer(self): '\n ' return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs): '将list中的None过滤掉\n ' inputs = [i for i in inputs if (i is not None)] if (len(inputs) == 1): inputs = inputs[0] return inputs
887,025,803,912,393,000
将list中的None过滤掉
bert4keras/models.py
simplify
CurisZhou/bert4keras
python
def simplify(self, inputs): '\n ' inputs = [i for i in inputs if (i is not None)] if (len(inputs) == 1): inputs = inputs[0] return inputs
def load_embeddings(self, embeddings): '处理Embedding层权重\n ' if (self.keep_tokens is not None): embeddings = embeddings[self.keep_tokens] if (self.compound_tokens is not None): ext_embeddings = [] for item in self.compound_tokens: if isinstance(item, list): item = (item, ([1] * len(item))) ext_embeddings.append(np.average(embeddings[item[0]], 0, item[1])) embeddings = np.concatenate([embeddings, ext_embeddings], 0) return embeddings
-4,447,431,261,012,996,600
处理Embedding层权重
bert4keras/models.py
load_embeddings
CurisZhou/bert4keras
python
def load_embeddings(self, embeddings): '\n ' if (self.keep_tokens is not None): embeddings = embeddings[self.keep_tokens] if (self.compound_tokens is not None): ext_embeddings = [] for item in self.compound_tokens: if isinstance(item, list): item = (item, ([1] * len(item))) ext_embeddings.append(np.average(embeddings[item[0]], 0, item[1])) embeddings = np.concatenate([embeddings, ext_embeddings], 0) return embeddings
def load_variable(self, checkpoint, name): '加载单个变量的函数\n ' if isinstance(checkpoint, dict): return checkpoint[name] else: return tf.train.load_variable(checkpoint, name)
2,776,577,819,828,255,000
加载单个变量的函数
bert4keras/models.py
load_variable
CurisZhou/bert4keras
python
def load_variable(self, checkpoint, name): '\n ' if isinstance(checkpoint, dict): return checkpoint[name] else: return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value, dtype=None): '创建一个变量\n ' dtype = (dtype or K.floatx()) return (K.variable(self.initializer(value.shape, dtype), dtype, name=name), value)
-6,833,356,355,497,773,000
创建一个变量
bert4keras/models.py
create_variable
CurisZhou/bert4keras
python
def create_variable(self, name, value, dtype=None): '\n ' dtype = (dtype or K.floatx()) return (K.variable(self.initializer(value.shape, dtype), dtype, name=name), value)
def variable_mapping(self): '构建keras层与checkpoint的变量名之间的映射表\n ' return {}
-7,573,230,003,583,619,000
构建keras层与checkpoint的变量名之间的映射表
bert4keras/models.py
variable_mapping
CurisZhou/bert4keras
python
def variable_mapping(self): '\n ' return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None): '根据mapping从checkpoint加载权重\n ' mapping = (mapping or self.variable_mapping()) mapping = {self.prefixed(k): v for (k, v) in mapping.items()} mapping = {k: v for (k, v) in mapping.items() if (k in self.layers)} weight_value_pairs = [] for (layer, variables) in mapping.items(): layer = self.layers[layer] weights = layer.trainable_weights values = [self.load_variable(checkpoint, v) for v in variables] if isinstance(layer, MultiHeadAttention): '如果key_size不等于head_size,则可以通过\n 正交矩阵将相应的权重投影到合适的shape。\n ' count = 2 if layer.use_bias: count += 2 heads = self.num_attention_heads head_size = self.attention_head_size key_size = self.attention_key_size W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T if layer.attention_scale: W = ((W * (key_size ** 0.25)) / (head_size ** 0.25)) for i in range(count): (w, v) = (weights[i], values[i]) (w_shape, v_shape) = (K.int_shape(w), v.shape) if (w_shape[(- 1)] != v_shape[(- 1)]): pre_shape = w_shape[:(- 1)] v = v.reshape((pre_shape + (heads, head_size))) v = np.dot(v, W) v = v.reshape((pre_shape + ((heads * key_size),))) values[i] = v weight_value_pairs.extend(zip(weights, values)) K.batch_set_value(weight_value_pairs)
-816,479,244,274,387,300
根据mapping从checkpoint加载权重
bert4keras/models.py
load_weights_from_checkpoint
CurisZhou/bert4keras
python
def load_weights_from_checkpoint(self, checkpoint, mapping=None): '\n ' mapping = (mapping or self.variable_mapping()) mapping = {self.prefixed(k): v for (k, v) in mapping.items()} mapping = {k: v for (k, v) in mapping.items() if (k in self.layers)} weight_value_pairs = [] for (layer, variables) in mapping.items(): layer = self.layers[layer] weights = layer.trainable_weights values = [self.load_variable(checkpoint, v) for v in variables] if isinstance(layer, MultiHeadAttention): '如果key_size不等于head_size,则可以通过\n 正交矩阵将相应的权重投影到合适的shape。\n ' count = 2 if layer.use_bias: count += 2 heads = self.num_attention_heads head_size = self.attention_head_size key_size = self.attention_key_size W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T if layer.attention_scale: W = ((W * (key_size ** 0.25)) / (head_size ** 0.25)) for i in range(count): (w, v) = (weights[i], values[i]) (w_shape, v_shape) = (K.int_shape(w), v.shape) if (w_shape[(- 1)] != v_shape[(- 1)]): pre_shape = w_shape[:(- 1)] v = v.reshape((pre_shape + (heads, head_size))) v = np.dot(v, W) v = v.reshape((pre_shape + ((heads * key_size),))) values[i] = v weight_value_pairs.extend(zip(weights, values)) K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None): '根据mapping将权重保存为checkpoint格式\n ' mapping = (mapping or self.variable_mapping()) mapping = {self.prefixed(k): v for (k, v) in mapping.items()} mapping = {k: v for (k, v) in mapping.items() if (k in self.layers)} with tf.Graph().as_default(): (all_variables, all_values) = ([], []) for (layer, variables) in mapping.items(): layer = self.layers[layer] values = K.batch_get_value(layer.trainable_weights) for (name, value) in zip(variables, values): (variable, value) = self.create_variable(name, value, dtype) all_variables.append(variable) all_values.append(value) with tf.Session() as sess: K.batch_set_value(zip(all_variables, all_values)) saver = tf.train.Saver() saver.save(sess, filename)
6,964,417,971,275,611,000
根据mapping将权重保存为checkpoint格式
bert4keras/models.py
save_weights_as_checkpoint
CurisZhou/bert4keras
python
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None): '\n ' mapping = (mapping or self.variable_mapping()) mapping = {self.prefixed(k): v for (k, v) in mapping.items()} mapping = {k: v for (k, v) in mapping.items() if (k in self.layers)} with tf.Graph().as_default(): (all_variables, all_values) = ([], []) for (layer, variables) in mapping.items(): layer = self.layers[layer] values = K.batch_get_value(layer.trainable_weights) for (name, value) in zip(variables, values): (variable, value) = self.create_variable(name, value, dtype) all_variables.append(variable) all_values.append(value) with tf.Session() as sess: K.batch_set_value(zip(all_variables, all_values)) saver = tf.train.Saver() saver.save(sess, filename)
def compute_attention_bias(self, inputs=None): '通过idxs序列的比较来得到对应的mask\n ' if (self.attention_bias is None): def lm_mask(s): seq_len = K.shape(s)[1] idxs = K.arange(0, seq_len) mask = (idxs[None, :] <= idxs[:, None]) mask = K.cast(mask, K.floatx()) return ((- (1 - mask[(None, None)])) * 1000000000000.0) self.attention_bias = self.apply(inputs=self.inputs[0], layer=Lambda, function=lm_mask, name='Attention-LM-Mask') return self.attention_bias
85,604,987,621,142,820
通过idxs序列的比较来得到对应的mask
bert4keras/models.py
compute_attention_bias
CurisZhou/bert4keras
python
def compute_attention_bias(self, inputs=None): '\n ' if (self.attention_bias is None): def lm_mask(s): seq_len = K.shape(s)[1] idxs = K.arange(0, seq_len) mask = (idxs[None, :] <= idxs[:, None]) mask = K.cast(mask, K.floatx()) return ((- (1 - mask[(None, None)])) * 1000000000000.0) self.attention_bias = self.apply(inputs=self.inputs[0], layer=Lambda, function=lm_mask, name='Attention-LM-Mask') return self.attention_bias
def compute_attention_bias(self, inputs=None): '通过idxs序列的比较来得到对应的mask\n ' if (self.attention_bias is None): def unilm_mask(s): idxs = K.cumsum(s, axis=1) mask = (idxs[:, None, :] <= idxs[:, :, None]) mask = K.cast(mask, K.floatx()) return ((- (1 - mask[:, None])) * 1000000000000.0) self.attention_bias = self.apply(inputs=self.inputs[1], layer=Lambda, function=unilm_mask, name='Attention-UniLM-Mask') return self.attention_bias
5,273,371,874,282,328,000
通过idxs序列的比较来得到对应的mask
bert4keras/models.py
compute_attention_bias
CurisZhou/bert4keras
python
def compute_attention_bias(self, inputs=None): '\n ' if (self.attention_bias is None): def unilm_mask(s): idxs = K.cumsum(s, axis=1) mask = (idxs[:, None, :] <= idxs[:, :, None]) mask = K.cast(mask, K.floatx()) return ((- (1 - mask[:, None])) * 1000000000000.0) self.attention_bias = self.apply(inputs=self.inputs[1], layer=Lambda, function=unilm_mask, name='Attention-UniLM-Mask') return self.attention_bias
def get_inputs(self): 'BERT的输入是token_ids和segment_ids\n (但允许自行传入位置id,以实现一些特殊需求)\n ' x_in = self.apply(layer=Input, shape=(self.sequence_length,), name='Input-Token') inputs = [x_in] if (self.segment_vocab_size > 0): s_in = self.apply(layer=Input, shape=(self.sequence_length,), name='Input-Segment') inputs.append(s_in) if self.custom_position_ids: p_in = self.apply(layer=Input, shape=(self.sequence_length,), name='Input-Position') inputs.append(p_in) return inputs
7,269,396,549,803,590,000
BERT的输入是token_ids和segment_ids (但允许自行传入位置id,以实现一些特殊需求)
bert4keras/models.py
get_inputs
CurisZhou/bert4keras
python
def get_inputs(self): 'BERT的输入是token_ids和segment_ids\n (但允许自行传入位置id,以实现一些特殊需求)\n ' x_in = self.apply(layer=Input, shape=(self.sequence_length,), name='Input-Token') inputs = [x_in] if (self.segment_vocab_size > 0): s_in = self.apply(layer=Input, shape=(self.sequence_length,), name='Input-Segment') inputs.append(s_in) if self.custom_position_ids: p_in = self.apply(layer=Input, shape=(self.sequence_length,), name='Input-Position') inputs.append(p_in) return inputs
def apply_embeddings(self, inputs): 'BERT的embedding是token、position、segment三者embedding之和\n ' inputs = inputs[:] x = inputs.pop(0) if (self.segment_vocab_size > 0): s = inputs.pop(0) if self.custom_position_ids: p = inputs.pop(0) else: p = None z = self.layer_norm_conds[0] x = self.apply(inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token') if (self.segment_vocab_size > 0): if self.shared_segment_embeddings: name = 'Embedding-Token' else: name = 'Embedding-Segment' s = self.apply(inputs=s, layer=Embedding, input_dim=self.segment_vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, name=name) x = self.apply(inputs=[x, s], layer=Add, name='Embedding-Token-Segment') x = self.apply(inputs=self.simplify([x, p]), layer=PositionEmbedding, input_dim=self.max_position, output_dim=self.embedding_size, merge_mode='add', hierarchical=self.hierarchical_position, embeddings_initializer=self.initializer, custom_position_ids=self.custom_position_ids, name='Embedding-Position') x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Embedding-Norm') x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name='Embedding-Dropout') if (self.embedding_size != self.hidden_size): x = self.apply(inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Embedding-Mapping') return x
1,239,644,505,264,093,200
BERT的embedding是token、position、segment三者embedding之和
bert4keras/models.py
apply_embeddings
CurisZhou/bert4keras
python
def apply_embeddings(self, inputs): '\n ' inputs = inputs[:] x = inputs.pop(0) if (self.segment_vocab_size > 0): s = inputs.pop(0) if self.custom_position_ids: p = inputs.pop(0) else: p = None z = self.layer_norm_conds[0] x = self.apply(inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token') if (self.segment_vocab_size > 0): if self.shared_segment_embeddings: name = 'Embedding-Token' else: name = 'Embedding-Segment' s = self.apply(inputs=s, layer=Embedding, input_dim=self.segment_vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, name=name) x = self.apply(inputs=[x, s], layer=Add, name='Embedding-Token-Segment') x = self.apply(inputs=self.simplify([x, p]), layer=PositionEmbedding, input_dim=self.max_position, output_dim=self.embedding_size, merge_mode='add', hierarchical=self.hierarchical_position, embeddings_initializer=self.initializer, custom_position_ids=self.custom_position_ids, name='Embedding-Position') x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Embedding-Norm') x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name='Embedding-Dropout') if (self.embedding_size != self.hidden_size): x = self.apply(inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Embedding-Mapping') return x
def apply_main_layers(self, inputs, index): 'BERT的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n ' x = inputs z = self.layer_norm_conds[0] attention_name = ('Transformer-%d-MultiHeadSelfAttention' % index) feed_forward_name = ('Transformer-%d-FeedForward' % index) attention_mask = self.compute_attention_bias(index) (xi, x, arguments) = (x, [x, x, x], {'a_bias': None}) if (attention_mask is not None): arguments['a_bias'] = True x.append(attention_mask) x = self.apply(inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, out_dim=self.hidden_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % attention_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % attention_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % attention_name)) xi = x x = self.apply(inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % feed_forward_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % feed_forward_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % feed_forward_name)) return x
2,721,950,488,057,795,000
BERT的主体是基于Self-Attention的模块 顺序:Att --> Add --> LN --> FFN --> Add --> LN
bert4keras/models.py
apply_main_layers
CurisZhou/bert4keras
python
def apply_main_layers(self, inputs, index): 'BERT的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n ' x = inputs z = self.layer_norm_conds[0] attention_name = ('Transformer-%d-MultiHeadSelfAttention' % index) feed_forward_name = ('Transformer-%d-FeedForward' % index) attention_mask = self.compute_attention_bias(index) (xi, x, arguments) = (x, [x, x, x], {'a_bias': None}) if (attention_mask is not None): arguments['a_bias'] = True x.append(attention_mask) x = self.apply(inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, out_dim=self.hidden_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % attention_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % attention_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % attention_name)) xi = x x = self.apply(inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % feed_forward_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % feed_forward_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % feed_forward_name)) return x
def apply_final_layers(self, inputs): '根据剩余参数决定输出\n ' x = inputs z = self.layer_norm_conds[0] outputs = [x] if self.with_pool: x = outputs[0] x = self.apply(inputs=x, layer=Lambda, function=(lambda x: x[:, 0]), name='Pooler') pool_activation = ('tanh' if (self.with_pool is True) else self.with_pool) x = self.apply(inputs=x, layer=Dense, units=self.hidden_size, activation=pool_activation, kernel_initializer=self.initializer, name='Pooler-Dense') if self.with_nsp: x = self.apply(inputs=x, layer=Dense, units=2, activation='softmax', kernel_initializer=self.initializer, name='NSP-Proba') outputs.append(x) if self.with_mlm: x = outputs[0] x = self.apply(inputs=x, layer=Dense, units=self.embedding_size, activation=self.hidden_act, kernel_initializer=self.initializer, name='MLM-Dense') x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='MLM-Norm') x = self.apply(inputs=x, layer=Embedding, arguments={'mode': 'dense'}, name='Embedding-Token') x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias') mlm_activation = ('softmax' if (self.with_mlm is True) else self.with_mlm) x = self.apply(inputs=x, layer=Activation, activation=mlm_activation, name='MLM-Activation') outputs.append(x) if (len(outputs) == 1): outputs = outputs[0] elif (len(outputs) == 2): outputs = outputs[1] else: outputs = outputs[1:] return outputs
-7,915,576,084,156,456,000
根据剩余参数决定输出
bert4keras/models.py
apply_final_layers
CurisZhou/bert4keras
python
def apply_final_layers(self, inputs): '\n ' x = inputs z = self.layer_norm_conds[0] outputs = [x] if self.with_pool: x = outputs[0] x = self.apply(inputs=x, layer=Lambda, function=(lambda x: x[:, 0]), name='Pooler') pool_activation = ('tanh' if (self.with_pool is True) else self.with_pool) x = self.apply(inputs=x, layer=Dense, units=self.hidden_size, activation=pool_activation, kernel_initializer=self.initializer, name='Pooler-Dense') if self.with_nsp: x = self.apply(inputs=x, layer=Dense, units=2, activation='softmax', kernel_initializer=self.initializer, name='NSP-Proba') outputs.append(x) if self.with_mlm: x = outputs[0] x = self.apply(inputs=x, layer=Dense, units=self.embedding_size, activation=self.hidden_act, kernel_initializer=self.initializer, name='MLM-Dense') x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='MLM-Norm') x = self.apply(inputs=x, layer=Embedding, arguments={'mode': 'dense'}, name='Embedding-Token') x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias') mlm_activation = ('softmax' if (self.with_mlm is True) else self.with_mlm) x = self.apply(inputs=x, layer=Activation, activation=mlm_activation, name='MLM-Activation') outputs.append(x) if (len(outputs) == 1): outputs = outputs[0] elif (len(outputs) == 2): outputs = outputs[1] else: outputs = outputs[1:] return outputs
def load_variable(self, checkpoint, name): '加载单个变量的函数\n ' variable = super(BERT, self).load_variable(checkpoint, name) if (name in ['bert/embeddings/word_embeddings', 'cls/predictions/output_bias']): return self.load_embeddings(variable) elif (name == 'cls/seq_relationship/output_weights'): return variable.T else: return variable
-4,583,431,683,787,596,300
加载单个变量的函数
bert4keras/models.py
load_variable
CurisZhou/bert4keras
python
def load_variable(self, checkpoint, name): '\n ' variable = super(BERT, self).load_variable(checkpoint, name) if (name in ['bert/embeddings/word_embeddings', 'cls/predictions/output_bias']): return self.load_embeddings(variable) elif (name == 'cls/seq_relationship/output_weights'): return variable.T else: return variable
def create_variable(self, name, value, dtype=None): '在tensorflow中创建一个变量\n ' if (name == 'cls/seq_relationship/output_weights'): value = value.T return super(BERT, self).create_variable(name, value, dtype)
9,075,869,065,190,741,000
在tensorflow中创建一个变量
bert4keras/models.py
create_variable
CurisZhou/bert4keras
python
def create_variable(self, name, value, dtype=None): '\n ' if (name == 'cls/seq_relationship/output_weights'): value = value.T return super(BERT, self).create_variable(name, value, dtype)
def variable_mapping(self): '映射到官方BERT权重格式\n ' mapping = {'Embedding-Token': ['bert/embeddings/word_embeddings'], 'Embedding-Segment': ['bert/embeddings/token_type_embeddings'], 'Embedding-Position': ['bert/embeddings/position_embeddings'], 'Embedding-Norm': ['bert/embeddings/LayerNorm/beta', 'bert/embeddings/LayerNorm/gamma'], 'Embedding-Mapping': ['bert/encoder/embedding_hidden_mapping_in/kernel', 'bert/encoder/embedding_hidden_mapping_in/bias'], 'Pooler-Dense': ['bert/pooler/dense/kernel', 'bert/pooler/dense/bias'], 'NSP-Proba': ['cls/seq_relationship/output_weights', 'cls/seq_relationship/output_bias'], 'MLM-Dense': ['cls/predictions/transform/dense/kernel', 'cls/predictions/transform/dense/bias'], 'MLM-Norm': ['cls/predictions/transform/LayerNorm/beta', 'cls/predictions/transform/LayerNorm/gamma'], 'MLM-Bias': ['cls/predictions/output_bias']} for i in range(self.num_hidden_layers): prefix = ('bert/encoder/layer_%d/' % i) mapping.update({('Transformer-%d-MultiHeadSelfAttention' % i): [(prefix + 'attention/self/query/kernel'), (prefix + 'attention/self/query/bias'), (prefix + 'attention/self/key/kernel'), (prefix + 'attention/self/key/bias'), (prefix + 'attention/self/value/kernel'), (prefix + 'attention/self/value/bias'), (prefix + 'attention/output/dense/kernel'), (prefix + 'attention/output/dense/bias')], ('Transformer-%d-MultiHeadSelfAttention-Norm' % i): [(prefix + 'attention/output/LayerNorm/beta'), (prefix + 'attention/output/LayerNorm/gamma')], ('Transformer-%d-FeedForward' % i): [(prefix + 'intermediate/dense/kernel'), (prefix + 'intermediate/dense/bias'), (prefix + 'output/dense/kernel'), (prefix + 'output/dense/bias')], ('Transformer-%d-FeedForward-Norm' % i): [(prefix + 'output/LayerNorm/beta'), (prefix + 'output/LayerNorm/gamma')]}) return mapping
-1,916,485,818,333,458,000
映射到官方BERT权重格式
bert4keras/models.py
variable_mapping
CurisZhou/bert4keras
python
def variable_mapping(self): '\n ' mapping = {'Embedding-Token': ['bert/embeddings/word_embeddings'], 'Embedding-Segment': ['bert/embeddings/token_type_embeddings'], 'Embedding-Position': ['bert/embeddings/position_embeddings'], 'Embedding-Norm': ['bert/embeddings/LayerNorm/beta', 'bert/embeddings/LayerNorm/gamma'], 'Embedding-Mapping': ['bert/encoder/embedding_hidden_mapping_in/kernel', 'bert/encoder/embedding_hidden_mapping_in/bias'], 'Pooler-Dense': ['bert/pooler/dense/kernel', 'bert/pooler/dense/bias'], 'NSP-Proba': ['cls/seq_relationship/output_weights', 'cls/seq_relationship/output_bias'], 'MLM-Dense': ['cls/predictions/transform/dense/kernel', 'cls/predictions/transform/dense/bias'], 'MLM-Norm': ['cls/predictions/transform/LayerNorm/beta', 'cls/predictions/transform/LayerNorm/gamma'], 'MLM-Bias': ['cls/predictions/output_bias']} for i in range(self.num_hidden_layers): prefix = ('bert/encoder/layer_%d/' % i) mapping.update({('Transformer-%d-MultiHeadSelfAttention' % i): [(prefix + 'attention/self/query/kernel'), (prefix + 'attention/self/query/bias'), (prefix + 'attention/self/key/kernel'), (prefix + 'attention/self/key/bias'), (prefix + 'attention/self/value/kernel'), (prefix + 'attention/self/value/bias'), (prefix + 'attention/output/dense/kernel'), (prefix + 'attention/output/dense/bias')], ('Transformer-%d-MultiHeadSelfAttention-Norm' % i): [(prefix + 'attention/output/LayerNorm/beta'), (prefix + 'attention/output/LayerNorm/gamma')], ('Transformer-%d-FeedForward' % i): [(prefix + 'intermediate/dense/kernel'), (prefix + 'intermediate/dense/bias'), (prefix + 'output/dense/kernel'), (prefix + 'output/dense/bias')], ('Transformer-%d-FeedForward-Norm' % i): [(prefix + 'output/LayerNorm/beta'), (prefix + 'output/LayerNorm/gamma')]}) return mapping
def apply_main_layers(self, inputs, index): 'ALBERT的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n ' x = inputs z = self.layer_norm_conds[0] attention_name = 'Transformer-MultiHeadSelfAttention' feed_forward_name = 'Transformer-FeedForward' attention_mask = self.compute_attention_bias(index) (xi, x, arguments) = (x, [x, x, x], {'a_bias': None}) if (attention_mask is not None): arguments['a_bias'] = True x.append(attention_mask) x = self.apply(inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, out_dim=self.hidden_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % attention_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % attention_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % attention_name)) xi = x x = self.apply(inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % feed_forward_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % feed_forward_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % feed_forward_name)) return x
3,918,039,020,763,709,000
ALBERT的主体是基于Self-Attention的模块 顺序:Att --> Add --> LN --> FFN --> Add --> LN
bert4keras/models.py
apply_main_layers
CurisZhou/bert4keras
python
def apply_main_layers(self, inputs, index): 'ALBERT的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n ' x = inputs z = self.layer_norm_conds[0] attention_name = 'Transformer-MultiHeadSelfAttention' feed_forward_name = 'Transformer-FeedForward' attention_mask = self.compute_attention_bias(index) (xi, x, arguments) = (x, [x, x, x], {'a_bias': None}) if (attention_mask is not None): arguments['a_bias'] = True x.append(attention_mask) x = self.apply(inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, out_dim=self.hidden_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % attention_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % attention_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % attention_name)) xi = x x = self.apply(inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name) x = self.apply(inputs=x, layer=Dropout, rate=self.dropout_rate, name=('%s-Dropout' % feed_forward_name)) x = self.apply(inputs=[xi, x], layer=Add, name=('%s-Add' % feed_forward_name)) x = self.apply(inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name=('%s-Norm' % feed_forward_name)) return x
def variable_mapping(self): '映射到官方ALBERT权重格式\n ' mapping = super(ALBERT, self).variable_mapping() prefix = 'bert/encoder/transformer/group_0/inner_group_0/' mapping.update({'Transformer-MultiHeadSelfAttention': [(prefix + 'attention_1/self/query/kernel'), (prefix + 'attention_1/self/query/bias'), (prefix + 'attention_1/self/key/kernel'), (prefix + 'attention_1/self/key/bias'), (prefix + 'attention_1/self/value/kernel'), (prefix + 'attention_1/self/value/bias'), (prefix + 'attention_1/output/dense/kernel'), (prefix + 'attention_1/output/dense/bias')], 'Transformer-MultiHeadSelfAttention-Norm': [(prefix + 'LayerNorm/beta'), (prefix + 'LayerNorm/gamma')], 'Transformer-FeedForward': [(prefix + 'ffn_1/intermediate/dense/kernel'), (prefix + 'ffn_1/intermediate/dense/bias'), (prefix + 'ffn_1/intermediate/output/dense/kernel'), (prefix + 'ffn_1/intermediate/output/dense/bias')], 'Transformer-FeedForward-Norm': [(prefix + 'LayerNorm_1/beta'), (prefix + 'LayerNorm_1/gamma')]}) return mapping
336,771,692,772,204,350
映射到官方ALBERT权重格式
bert4keras/models.py
variable_mapping
CurisZhou/bert4keras
python
def variable_mapping(self): '\n ' mapping = super(ALBERT, self).variable_mapping() prefix = 'bert/encoder/transformer/group_0/inner_group_0/' mapping.update({'Transformer-MultiHeadSelfAttention': [(prefix + 'attention_1/self/query/kernel'), (prefix + 'attention_1/self/query/bias'), (prefix + 'attention_1/self/key/kernel'), (prefix + 'attention_1/self/key/bias'), (prefix + 'attention_1/self/value/kernel'), (prefix + 'attention_1/self/value/bias'), (prefix + 'attention_1/output/dense/kernel'), (prefix + 'attention_1/output/dense/bias')], 'Transformer-MultiHeadSelfAttention-Norm': [(prefix + 'LayerNorm/beta'), (prefix + 'LayerNorm/gamma')], 'Transformer-FeedForward': [(prefix + 'ffn_1/intermediate/dense/kernel'), (prefix + 'ffn_1/intermediate/dense/bias'), (prefix + 'ffn_1/intermediate/output/dense/kernel'), (prefix + 'ffn_1/intermediate/output/dense/bias')], 'Transformer-FeedForward-Norm': [(prefix + 'LayerNorm_1/beta'), (prefix + 'LayerNorm_1/gamma')]}) return mapping