content
stringlengths
22
815k
id
int64
0
4.91M
def test_start_sep_graph() -> nx.Graph: """test graph with known clique partition that needs start_separate""" G = nx.Graph() G.add_nodes_from(range(6)) G.add_edges_from([(0, 1, {'weight': 1.0}), (0, 2, {'weight': -10}), (0, 3, {'weight': 1}), (0, 4, {'weight': -10}), (0, 5, {'weight': -10}), (1, 2, {'weight': 1.2}), (1, 3, {'weight': -10}), (1, 4, {'weight': -10}), (1, 5, {'weight': -10}), (2, 3, {'weight': 1}), (2, 4, {'weight': -1}), (2, 5, {'weight': 0.5}), (3, 4, {'weight': 0.5}), (3, 5, {'weight': -1})]) return G
24,600
def structure(table_toplevels): """ Accepts an ordered sequence of TopLevel instances and returns a navigable object structure representation of the TOML file. """ table_toplevels = tuple(table_toplevels) obj = NamedDict() last_array_of_tables = None # The Name of the last array-of-tables header for toplevel in table_toplevels: if isinstance(toplevel, toplevels.AnonymousTable): obj[''] = toplevel.table_element elif isinstance(toplevel, toplevels.Table): if last_array_of_tables and toplevel.name.is_prefixed_with(last_array_of_tables): seq = obj[last_array_of_tables] unprefixed_name = toplevel.name.without_prefix(last_array_of_tables) seq[-1] = CascadeDict(seq[-1], NamedDict({unprefixed_name: toplevel.table_element})) else: obj[toplevel.name] = toplevel.table_element else: # It's an ArrayOfTables if last_array_of_tables and toplevel.name != last_array_of_tables and \ toplevel.name.is_prefixed_with(last_array_of_tables): seq = obj[last_array_of_tables] unprefixed_name = toplevel.name.without_prefix(last_array_of_tables) if unprefixed_name in seq[-1]: seq[-1][unprefixed_name].append(toplevel.table_element) else: cascaded_with = NamedDict({unprefixed_name: [toplevel.table_element]}) seq[-1] = CascadeDict(seq[-1], cascaded_with) else: obj.append(toplevel.name, toplevel.table_element) last_array_of_tables = toplevel.name return obj
24,601
def evaluate(exe, metric, loss, correct, dev_program, data_loader, phase="eval"): """ The evaluate process, calcluate the eval loss and metric. """ metric.reset() returns = [loss] if isinstance(correct, list) or isinstance(correct, tuple): returns.extend(list(correct)) else: returns.append(correct) for batch in data_loader: exe.run(dev_program, feed=batch, \ fetch_list=returns) return_numpys = exe.run(dev_program, feed=batch, \ fetch_list=returns) metric_numpy = return_numpys[1] if len(return_numpys[ 1:]) == 1 else return_numpys[1:] metric.update(metric_numpy) res = metric.accumulate() if isinstance(metric, Mcc): print("%s loss: %f, mcc: %s" % (phase, return_numpys[0], res[0])) elif isinstance(metric, PearsonAndSpearman): print("%s loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s" % (phase, return_numpys[0], res[0], res[1], res[2])) else: print("%s loss: %f, acc: %s, " % (phase, return_numpys[0], res))
24,602
def h(q): """Binary entropy func""" if q in {0, 1}: return 0 return (q * math.log(1 / q, 2)) + ((1 - q) * math.log(1 / (1 - q), 2))
24,603
def job_scheduler_sequential(dict_of_jobs): """ Choose a gpum then launches jobs sequentially on that GPU in tmux sessions. :param dict_of_jobs: """ keys = list(dict_of_jobs.keys()) while get_first_available_gpu() < 0: time.sleep(15) # Sleeps for 30 sec gpu_id = get_first_available_gpu() while len(keys) > 0: has = os.system(f"tmux has-session -t GPU{gpu_id} 2>/dev/null") if not int(has) == 0: job_key = keys.pop() job = dict_of_jobs[job_key] name_tmux = f"GPU{gpu_id}" cmd = f"conda activate python3; {job} 2>&1 | tee log_terminals/{gpu_id}_{job_key}.txt; tmux kill-session -t {name_tmux}" CMD = f'tmux new-session -d -s {name_tmux} \; send-keys "{cmd}" Enter' print(CMD) os.system(CMD) time.sleep(60)
24,604
def has_next_page(page_info: dict) -> bool: """ Extracts value from a dict with hasNextPage key, raises an error if the key is not available :param page_info: pagination info :return: a bool indicating if response hase a next page """ has_next_page = page_info.get('hasNextPage') if has_next_page is None: raise KeyNotFoundException('hasNextPage key not available') else: return has_next_page
24,605
def _stuw_code(current_name=None): """" Zoekt door TYPESTUW naar de naam van het stuwtype, geeft attribuut waarde uit DAMO """ if current_name not in TYPESTUW.values(): return 99 for i, name in TYPESTUW.items(): if name == current_name: return i
24,606
def test_edge_width(): """Test setting edge width.""" np.random.seed(0) data = np.random.random((10, 2, 2)) data[:, 0, :] = 20 * data[:, 0, :] layer = Vectors(data) assert layer.edge_width == 1 layer.edge_width = 2 assert layer.edge_width == 2 layer = Vectors(data, edge_width=3) assert layer.edge_width == 3
24,607
def test_three_column_image_text_section_under_homepage(): """ Three column image text section subpage should provide expected values. """ home_page = HomePageFactory.create() assert home_page.three_column_image_text_section is None assert models.ThreeColumnImageTextSection.can_create_at(home_page) three_column_image_text_section = ThreeColumnImageTextSectionFactory.create( column_image_text_section=json.dumps( [ { "type": "column_image_text_section", "value": { "heading": "heading of block", "sub_heading": "subheading of block", "body": "<p>body of the block</p>", "image__title": "title of the image", }, } ] ), parent=home_page, ) assert home_page.three_column_image_text_section assert home_page.three_column_image_text_section == three_column_image_text_section for ( item ) in ( three_column_image_text_section.column_image_text_section ): # pylint: disable=not-an-iterable assert item.value.get("heading") == "heading of block" assert item.value.get("sub_heading") == "subheading of block" assert item.value.get("body").source == "<p>body of the block</p>"
24,608
def _build_init_nodes(context, device): """ Build initial inputs for beam search algo """ decoder_input = _prepare_init_inputs(context, device) root_node = BeamSearchNode(None, None, decoder_input, 0, len(context)) return [root_node]
24,609
def fetch_preset(output_filename=None, nproc=8, add_structure=True): """ Fetches preset list of docs determined via trial and error, An initial query via the frontend on 06/28/2019 showed 12870, and subsequent sampling of ids from 8000-25000 yielded all 12820. Successful query ids were stored in indices.json, up which this function should be able extract all of the relevant data. Args: output_filename (str): output filename for all collected docs nproc (int): number of processes to use Returns: (List): list of isotherm documents """ # Load indices from json doc iso_ids = loadfn(os.path.join(MOF_TDA_PATH, "ingest", "indices.json")) # Fetch all docs from ids isotherms = fetch_many_docs(iso_ids, nproc=nproc) # Dump to json if output specified if output_filename is not None: dumpfn(isotherms, output_filename) return isotherms
24,610
def parse_args(): """Function to read CCB-ID command line arguments Args: None - reads from sys.argv Returns: an argparse object """ # create the argument parser parser = args.create_parser(description='Apply a CCB-ID species classification model to csv or image data.') # set up the arguments for dealing with file i/o args.input(parser) args.mask(parser) args.output(parser) args.ecodse(parser) args.models(parser, help='path to the ccbid model to apply', default=None, required=True) # arguments to turn on certian flags or set specific parameters args.remove_outliers(parser) args.aggregate(parser) args.labels(parser) args.cpus(parser) # maybe add function to model object to update the n_cpus in each model args.verbose(parser) # parse the inputs from sys.argv return parser.parse_args(sys.argv[1:])
24,611
def compute_accuracy(data): """Return [wpm, accuracy].""" prompted_text = data["promptedText"][0] typed_text = data.get("typedText", [""])[0] start_time = float(data["startTime"][0]) end_time = float(data["endTime"][0]) return [typing.wpm(typed_text, end_time - start_time), typing.accuracy(typed_text, prompted_text)]
24,612
def drop_talbes(): """ Drop all model tables """ models = ( m for m in globals().values() if isinstance( m, type) and issubclass( m, db.Model)) drop_model_tables(models, fail_silently=True)
24,613
def write_file(filename: str, content: str, mode: str = "w") -> IO: """Save content to a file, overwriting it by default.""" with open(filename, mode) as file: file.write(content) return file
24,614
def get_minimum_integer_attribute_value(node, attribute_name): """ Returns the minimum value that a specific integer attribute has set :param node: str :param attribute_name: str :return: float """ return maya.cmds.attributeQuery(attribute_name, min=True, node=node)[0]
24,615
def get_star_locs(plotfile): """Given a plotfile, return the location of the primary and the secondary.""" import numpy as np import yt import string ds = yt.load(plotfile) # Get a numpy array corresponding to the density. problo = ds.domain_left_edge.v probhi = ds.domain_right_edge.v dim = ds.domain_dimensions dx = (probhi - problo) / dim dens = (ds.covering_grid(level=0, left_edge=[0.0, 0.0, 0.0], dims=ds.domain_dimensions)['density']).v # Calculate the orbital parameters M_solar = 1.99e33 Gconst = 6.67e-8 M_P = 0.90 M_S = 0.60 M_P = M_P * M_solar M_S = M_S * M_solar # Get a numpy array corresponding to the density. a = (Gconst * (M_P + M_S) * rot_period**2 / (4.0 * np.pi**2))**(1.0/3.0) a_2 = a / (1 + M_S / M_P) a_1 = (M_S / M_P) * a_2 # Guess the locations of the stars based on perfect circular rotation f = open(plotfile + '/job_info', 'r') for line in f: if string.find(line, "rotational_period") > 0: rot_period = float(string.split(line, "= ")[1]) break f.close() t = (ds.current_time).v center = (probhi + problo) / 2.0 loc_P = [-a_1 * np.cos(2 * np.pi * t / rot_period) + center[0], -a_1 * np.sin(2 * np.pi * t / rot_period) + center[1], 0.0 + center[2]] loc_S = [ a_2 * np.cos(2 * np.pi * t / rot_period) + center[0], a_2 * np.sin(2 * np.pi * t / rot_period) + center[1], 0.0 + center[2]] loc_P = np.array(loc_P) loc_S = np.array(loc_S) # Create an array of the zone positions x = problo[0] + dx[0] * (np.arange(dim[0]) + 0.5e0) y = problo[1] + dx[1] * (np.arange(dim[1]) + 0.5e0) z = problo[2] + dx[2] * (np.arange(dim[2]) + 0.5e0) xx, yy, zz = np.meshgrid(x, y, z, indexing="ij") rr = (xx**2 + yy**2 + zz**2)**0.5 # Now what we'll do is to split up the grid into two parts. # zones that are closer to the primary's expected location and # zones that are closer to the secondary's expected location. rr_P = ( (xx - loc_P[0])**2 + (yy - loc_P[1])**2 + (zz - loc_P[2])**2 )**0.5 rr_S = ( (xx - loc_S[0])**2 + (yy - loc_S[1])**2 + (zz - loc_S[2])**2 )**0.5 P_idx = np.where( rr_P < rr_S ) S_idx = np.where( rr_S < rr_P ) # Now, do a center of mass sum on each star. xx_P_com = np.sum( dens[P_idx] * xx[P_idx] ) / np.sum(dens[P_idx]) yy_P_com = np.sum( dens[P_idx] * yy[P_idx] ) / np.sum(dens[P_idx]) zz_P_com = np.sum( dens[P_idx] * zz[P_idx] ) / np.sum(dens[P_idx]) xx_S_com = np.sum( dens[S_idx] * xx[S_idx] ) / np.sum(dens[S_idx]) yy_S_com = np.sum( dens[S_idx] * yy[S_idx] ) / np.sum(dens[S_idx]) zz_S_com = np.sum( dens[S_idx] * zz[S_idx] ) / np.sum(dens[S_idx]) return [xx_P_com, yy_P_com, zz_P_com, xx_S_com, yy_S_com, zz_S_com]
24,616
def get_list(_list, persistent_attributes): """ Check if the user supplied a list and if its a custom list, also check for for any saved lists :param _list: User supplied list :param persistent_attributes: The persistent attribs from the app :return: The list name , If list is custom or not """ if _list is not None and (_list.lower() != 'watchlist' and _list.lower() != 'watch list'): return _list, True else: # if default isnt set use watchlist if "list" in persistent_attributes: if persistent_attributes["list"] != 'watchlist' and persistent_attributes["list"] != 'watch list': _list = persistent_attributes["list"] _usecustomlist = True else: _list = 'watchlist' _usecustomlist = False else: _list = 'watchlist' _usecustomlist = False return _list, _usecustomlist
24,617
def num_active_mesos_tasks(): """ An example metric used by the relay.mesos demo to query mesos master for the number of currently running tasks. """ while True: data = json.load(urllib2.urlopen( os.environ['RELAY_MESOS_MASTER_STATE_FOR_DEMO'])) yield data['started_tasks'] + data['staged_tasks'] - ( data['failed_tasks'] + data['killed_tasks'] + data['lost_tasks'] + data['finished_tasks'])
24,618
def inode_for_pid_sock(pid, addr, port): """ Given a pid that is inside a network namespace, and the address/port of a LISTEN socket, find the inode of the socket regardless of which pid in the ns it's attached to. """ expected_laddr = '%02X%02X%02X%02X:%04X' % (addr[3], addr[2], addr[1], addr[0], socket.htons(port)) for line in open('/proc/{}/net/tcp'.format(pid), 'r').readlines(): parts = re.split(r'\s+', line.strip()) local_addr = parts[1] remote_addr = parts[2] if remote_addr != '00000000:0000': continue # not a listen socket if local_addr == expected_laddr: return int(parts[9])
24,619
def print_mem_usage(df): """ print memory footprint of a pandas dataframe""" mb_usage = df.memory_usage(deep=True).sum() / 1e6 print(f"Memory usage:{mb_usage:.2f} MB")
24,620
def get_edges_from_route_matrix(route_matrix: Matrix) -> List[Tuple]: """Returns a list of the edges used in a route according to the route matrix :param route_matrix: A matrix indicating which edges contain the optimal route :type route_matrix: Matrix :return: The row and column for the edge in the matrix :rtype: Tuple :yield: List of tuples for each edge connecting two nodes :rtype: List[Tuple] """ def get_first_row(route_matrix): for row in range(len(route_matrix)): nodes_in_row = sum(route_matrix[row]) if nodes_in_row == 1: return row elif nodes_in_row == 0: continue else: raise ValueError(f'Invalid number of nodes in row: {nodes_in_row}') def get_next_node_from_row(i, route_matrix): for j in range(len(route_matrix)): if route_matrix[i][j] == 1: return (i, j) raise ValueError(f"Node {i} is not connected to another node.") edges = [] route_length = np.sum(route_matrix) row = get_first_row(route_matrix) while len(edges) < route_length: try: to_node = get_next_node_from_row(row, route_matrix) row = to_node[1] edges.append(to_node) except ValueError: logging.info('End of open route found.') # transpose the matrix route_matrix = [[route_matrix[j][i] for j in range(len(route_matrix))] for i in range(len(route_matrix))] # reverse the edges edges = [(edges[-1][1], edges[-1][0])] row = edges[0][1] return edges
24,621
def nicer(string): """ >>> nicer("qjhvhtzxzqqjkmpb") True >>> nicer("xxyxx") True >>> nicer("uurcxstgmygtbstg") False >>> nicer("ieodomkazucvgmuy") False """ pair = False for i in range(0, len(string) - 3): for j in range(i + 2, len(string) - 1): if string[i:i + 2] == string[j:j + 2]: pair = True break if not pair: return False for i in range(0, len(string) - 2): if string[i] == string[i + 2]: return True return False
24,622
def cloud(): """ cloud drawing """ cloud = GOval(100, 30, x=70, y=150) cloud.filled = True cloud.color = 'white' cloud.fill_color = 'white' window.add(cloud) cloud = GOval(100, 30, x=170, y=100) cloud.filled = True cloud.color = 'white' cloud.fill_color = 'white' window.add(cloud) cloud = GOval(100, 30, x=270, y=10) cloud.filled = True cloud.color = 'white' cloud.fill_color = 'white' window.add(cloud) cloud = GOval(100, 30, x=700, y=160) cloud.filled = True cloud.color = 'white' cloud.fill_color = 'white' window.add(cloud) cloud = GOval(100, 30, x=500, y=70) cloud.filled = True cloud.color = 'white' cloud.fill_color = 'white' window.add(cloud)
24,623
def multiple_choice(value: Union[list, str]): """ Handle a single string or list of strings """ if isinstance(value, list): # account for this odd [None] value for empty multi-select fields if value == [None]: return None # we use string formatting to handle the possibility that the list contains ints return ", ".join([f"{val}" for val in value]) return value
24,624
def Jnu_vD82(wav): """Estimate of ISRF at optical wavelengths by van Dishoeck & Black (1982) see Fig 1 in Heays et al. (2017) Parameters ---------- wav : array of float wavelength in angstrom Returns ------- Jnu : array of float Mean intensity Jnu in cgs units """ if wav is not None and not isinstance(wav, au.quantity.Quantity): wav = (wav*au.angstrom).to(au.angstrom) else: wav = wav.to(au.angstrom) w = wav.value return 2.44e-16*w**2.7/au.cm**2/au.s/au.Hz
24,625
def _coexp_ufunc(m0, exp0, m1, exp1): """ Returns a co-exp couple of couples """ # Implementation for real if (m0 in numba_float_types) and (m1 in numba_float_types): def impl(m0, exp0, m1, exp1): co_m0, co_m1 = m0, m1 d_exp = exp0 - exp1 if m0 == 0.: exp = exp1 elif m1 == 0.: exp = exp0 elif (exp1 > exp0): co_m0 = _exp2_shift(co_m0, d_exp) exp = exp1 elif (exp0 > exp1): co_m1 = _exp2_shift(co_m1, -d_exp) exp = exp0 else: # exp0 == exp1 exp = exp0 return (co_m0, co_m1, exp) # Implementation for complex elif (m0 in numba_complex_types) or (m1 in numba_complex_types): def impl(m0, exp0, m1, exp1): co_m0, co_m1 = m0, m1 d_exp = exp0 - exp1 if m0 == 0.: exp = exp1 elif m1 == 0.: exp = exp0 elif (exp1 > exp0): co_m0 = (_exp2_shift(co_m0.real, d_exp) + 1j * _exp2_shift(co_m0.imag, d_exp)) exp = exp1 elif (exp0 > exp1): co_m1 = (_exp2_shift(co_m1.real, -d_exp) + 1j * _exp2_shift(co_m1.imag, -d_exp)) exp = exp0 else: # exp0 == exp1 exp = exp0 return (co_m0, co_m1, exp) else: raise TypingError("datatype not accepted {}{}".format(m0, m1)) return impl
24,626
def get_lorem(length=None, **kwargs): """ Get a text (based on lorem ipsum. :return str: :: print get_lorem() # -> atque rerum et aut reiciendis... """ lorem = ' '.join(g.get_choices(LOREM_CHOICES)) if length: lorem = lorem[:length] return lorem
24,627
def try_get_graphql_scalar_type(property_name, property_type_id): """Return the matching GraphQLScalarType for the property type id or None if none exists.""" maybe_graphql_type = ORIENTDB_TO_GRAPHQL_SCALARS.get(property_type_id, None) if not maybe_graphql_type: warnings.warn( 'Ignoring property "{}" with unsupported property type: ' "{}".format(property_name, PROPERTY_TYPE_ID_TO_NAME[property_type_id]) ) return maybe_graphql_type
24,628
def get(path): """Get GCE metadata value.""" attribute_url = ( 'http://{}/computeMetadata/v1/'.format(_METADATA_SERVER) + path) headers = {'Metadata-Flavor': 'Google'} operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT') response = requests.get( attribute_url, headers=headers, timeout=operations_timeout) response.raise_for_status() return response.text
24,629
def greedy_helper(hyper_list, node_dict, fib_heap, total_weight, weight=None): """ Greedy peeling algorithm. Peel nodes iteratively based on their current degree. Parameters ---------- G: undirected, graph (networkx) node_dict: dict, node id as key, tuple (neighbor list, heap node) as value. Here heap node is a pointer to the corresponding node in fibheap. fibheap: FibonacciHeap, support fast extraction of min degree node and value change. total_weight: edge weight sum. weight: str that specify the edge attribute name of edge weight; None if the graph is unweighted. Returns ---------- H: list, subset of nodes corresponding to densest subgraph. max_avg: float, density of H induced subgraph. new_loads: dict, new loads for nodes, only used for the flowless algorithm when T>1. """ n = len(node_dict.keys()) avg_degree = total_weight / n H = list(node_dict.keys()) max_avg = avg_degree new_loads = dict() for i in range(n - 1): # find min node from graph (remove from heap) to_remove = fib_heap.extract_min() node_to_remove = to_remove.value degree_to_remove = to_remove.key new_loads[node_to_remove] = degree_to_remove for e_index in node_dict[node_to_remove][0]: e = hyper_list[e_index] for neighbor in e: if neighbor != node_to_remove: fib_heap.decrease_key(node_dict[neighbor][1], node_dict[neighbor][1].key - 1) node_dict[neighbor][0].remove(e_index) total_weight -= 1 del node_dict[node_to_remove] avg_degree = total_weight / (n - i - 1) if max_avg < avg_degree: max_avg = avg_degree H = list(node_dict.keys()) return H, max_avg, new_loads
24,630
def test_medicationrequest_1(base_settings): """No. 1 tests collection for MedicationRequest. Test File: medicationrequest0302.json """ filename = base_settings["unittest_data_dir"] / "medicationrequest0302.json" inst = medicationrequest.MedicationRequest.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "MedicationRequest" == inst.resource_type impl_medicationrequest_1(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "MedicationRequest" == data["resourceType"] inst2 = medicationrequest.MedicationRequest(**data) impl_medicationrequest_1(inst2)
24,631
def CleanFloat(number, locale = 'en'): """\ Return number without decimal points if .0, otherwise with .x) """ try: if number % 1 == 0: return str(int(number)) else: return str(float(number)) except: return number
24,632
def ssd_bboxes_encode(boxes): """ Labels anchors with ground truth inputs. Args: boxex: ground truth with shape [N, 5], for each row, it stores [y, x, h, w, cls]. Returns: gt_loc: location ground truth with shape [num_anchors, 4]. gt_label: class ground truth with shape [num_anchors, 1]. num_matched_boxes: number of positives in an image. """ def jaccard_with_anchors(bbox): """Compute jaccard score a box and the anchors.""" # Intersection bbox and volume. ymin = np.maximum(y1, bbox[0]) xmin = np.maximum(x1, bbox[1]) ymax = np.minimum(y2, bbox[2]) xmax = np.minimum(x2, bbox[3]) w = np.maximum(xmax - xmin, 0.) h = np.maximum(ymax - ymin, 0.) # Volumes. inter_vol = h * w union_vol = vol_anchors + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - inter_vol jaccard = inter_vol / union_vol return np.squeeze(jaccard) pre_scores = np.zeros((config.num_ssd_boxes), dtype=np.float32) t_boxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32) t_label = np.zeros((config.num_ssd_boxes), dtype=np.int64) for bbox in boxes: label = int(bbox[4]) scores = jaccard_with_anchors(bbox) idx = np.argmax(scores) scores[idx] = 2.0 mask = (scores > matching_threshold) mask = mask & (scores > pre_scores) pre_scores = np.maximum(pre_scores, scores * mask) t_label = mask * label + (1 - mask) * t_label for i in range(4): t_boxes[:, i] = mask * bbox[i] + (1 - mask) * t_boxes[:, i] index = np.nonzero(t_label) # Transform to tlbr. bboxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32) bboxes[:, [0, 1]] = (t_boxes[:, [0, 1]] + t_boxes[:, [2, 3]]) / 2 bboxes[:, [2, 3]] = t_boxes[:, [2, 3]] - t_boxes[:, [0, 1]] # Encode features. bboxes_t = bboxes[index] default_boxes_t = default_boxes[index] bboxes_t[:, :2] = (bboxes_t[:, :2] - default_boxes_t[:, :2]) / (default_boxes_t[:, 2:] * config.prior_scaling[0]) tmp = np.maximum(bboxes_t[:, 2:4] / default_boxes_t[:, 2:4], 0.000001) bboxes_t[:, 2:4] = np.log(tmp) / config.prior_scaling[1] bboxes[index] = bboxes_t num_match = np.array([len(np.nonzero(t_label)[0])], dtype=np.int32) return bboxes, t_label.astype(np.int32), num_match
24,633
def _get_partial_prediction(input_data: dt.BatchedTrainTocopoData, target_data_token_ids: dt.NDArrayIntBO, target_data_is_target_copy: dt.NDArrayBoolBOV, target_data_is_target_pointer: dt.NDArrayBoolBOV ) -> dt.BatchedTrainTocopoData: """Create BatchedTrainTocopoData that contains the latest predictions. This function creates BatchedTrainTocopoData for the autoregressive prediction. The returned batched_partial_prediction contains the prediction made so far by the autoregressive prediction, notebly BatchedTrainTocopoTargetData.token_ids, BatchedTrainTocopoTargetData.is_target_copy and BatchedTrainTocopoTargetData.is_target_pointer. batched_partial_prediction should be used by the autoregressive prediction to generate the next prediction. Args: input_data: The input data that we generate the autoregressive prediction. We used it copy the BatchedTrainGraphNodeData and BatchedTrainGraphEdgeData. But BatchedTrainTocopoTargetData should not be copied from the input data since it contains the ground truth. target_data_token_ids: Token ids that the autoregressive prediction predicted so far. target_data_is_target_copy: is_target_copy matrix that the autoregressive prediction predicted so far. target_data_is_target_pointer: is_target_pointer that the autoregressive prediction predicted so far. Returns: A instance of BatchedTrainTocopoData, where the BatchedTrainGraphNodeData and BatchedTrainGraphEdgeData is the same as input_data. But BatchedTrainTocopoTargetData holds the prediction made so far. """ # BatchedTrainTocopoTargetData contains the latest prediction. # We must not copy from input_data, but rather use the target_data_token_ids, # target_data_is_target_copy and target_data_is_target_pointer that are # predicted by the autoregressive prediction. batched_partial_prediction_tocopo_target_data = ( dt.BatchedTrainTocopoTargetData( token_ids=target_data_token_ids, is_target_copy=target_data_is_target_copy, is_target_pointer=target_data_is_target_pointer)) # BatchedTrainGraphNodeData and BatchedTrainGraphEdgeData is the same as the # input_data. batched_partial_prediction_graph_node_data = dt.BatchedTrainGraphNodeData( token_ids=input_data.node_data.token_ids, type_ids=input_data.node_data.type_ids, token_positions=input_data.node_data.token_positions, pointer_candidates=input_data.node_data.pointer_candidates ) batched_partial_prediction_graph_edge_data = dt.BatchedTrainGraphEdgeData( edges=input_data.edge_data.edges, time_edges=input_data.edge_data.time_edges) batched_partial_prediction = dt.BatchedTrainTocopoData( node_data=batched_partial_prediction_graph_node_data, edge_data=batched_partial_prediction_graph_edge_data, target_data=batched_partial_prediction_tocopo_target_data ) return batched_partial_prediction
24,634
def get_energy_spectrum_old(udata, x0=0, x1=None, y0=0, y1=None, z0=0, z1=None, dx=None, dy=None, dz=None, nkout=None, window=None, correct_signal_loss=True, remove_undersampled_region=True, cc=1.75, notebook=True): """ DEPRECATED: TM cleaned up the code, and improved the literacy and transparency of the algorithm- TM (Sep 2020) Returns 1D energy spectrum from velocity field data ... The algorithm implemented in this function is VERY QUICK because it does not use the two-point autorcorrelation tensor. ... Instead, it converts u(kx, ky, kz)u*(kx, ky, kz) into u(kr)u*(kr). (here * dentoes the complex conjugate) ... CAUTION: Must provide udata with aspect ratio ~ 1 ...... The conversion process induces unnecessary error IF the dimension of u(kx, ky, kz) is skewed. ...... i.e. Make udata.shape like (800, 800), (1024, 1024), (512, 512) for accurate results. ... KNOWN ISSUES: ...... This function returns a bad result for udata with shape like (800, 800, 2) Parameters ---------- udata: nd array epsilon: nd array or float, default: None dissipation rate used for scaling energy spectrum If not given, it uses the values estimated using the rate-of-strain tensor nu: flaot, viscosity x0: int index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1]. x1: int index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1]. y0: int index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1]. y1: int index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1]. t0: int index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1]. t1: int index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1]. dx: float spacing in x dy: float spacing in y dz: float spacing in z nkout: int, default: None number of bins to compute energy/dissipation spectrum notebook: bool, default: True Use tqdm.tqdm_notebook if True. Use tqdm.tqdm otherwise window: str Windowing reduces undesirable effects due to the discreteness of the data. A wideband window such as 'flattop' is recommended for turbulent energy spectra. For the type of applying window function, choose from below: boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann, kaiser (needs beta), gaussian (needs standard deviation), general_gaussian (needs power, width), slepian (needs width), chebwin (needs attenuation), exponential (needs decay scale), tukey (needs taper fraction) correct_signal_loss: bool, default: True If True, it would compensate for the loss of the signals due to windowing. Always recommended to obtain accurate spectral densities. remove_undersampled_region: bool, default: True If True, it will not sample the region with less statistics. cc: float, default: 1.75 A numerical factor to compensate for the signal loss due to approximations. ... cc=1.75 was obtained from the JHTD data. Returns ------- e_k: numpy array Energy spectrum with shape (number of data points, duration) e_k_err: numpy array Energy spectrum error with shape (number of data points, duration) kk: numpy array Wavenumber with shape (number of data points, duration) """ print('get_energy_spectrum_old(): is DEPRECATED since 09/01/20') print('... Still works perfectly. Yet, TM highly recommends to use the updated function: get_energy_spectrum()') if notebook: from tqdm import tqdm_notebook as tqdm print('Using tqdm_notebook. If this is a mistake, set notebook=False') else: from tqdm import tqdm def delete_masked_elements(data, mask): """ Deletes elements of data using mask, and returns a 1d array Parameters ---------- data: N-d array mask: N-d array, bool Returns ------- compressed_data """ data_masked = ma.array(data, mask=mask) compressed_data = data_masked.compressed() '...Reduced data using a given mask' return compressed_data def convert_nd_spec_to_1d(e_ks, ks, nkout=None, cc=1.75): """ Convert the results of get_energy_spectrum_nd() into a 1D spectrum ... This is actually a tricky problem. Importantly, this will output the SPECTRAL DENSITY not power which is integrated spectral density (i.e.- spectral density * delta_kx * delta_ky * delta_ky.) ... Ask Takumi for derivation. The derivation goes like this. ...... 1. Start with the Parseval's theorem. ...... 2. Write the discretized equation about the TKE: Average TKE = sum deltak * E(k) ...... 3. Using 1, write down the avg TKE ...... 4. Equate 2 and 3. You get e_k1d * jacobian / (n_samples * deltak) ...... IF deltak = deltakr where deltakr = np.sqrt(deltakx**2 + deltaky**2) for 2D ...... where e_k1d is just a histogram value obtained from the DFT result (i.e. POWER- spectral density integrated over a px) ...... 5. Finally, convert this into the SPECTRAL DENSITY. This is two-fold. ...... 5.1. ...... e_k1d * jacobian / (n_samples * deltak) is not necessarily the correct density ...... if deltak is not equal to deltakr. ...... This is because e_k1d comes from the histogram of the input velocity field. ...... One can show that the correction is just (deltak / deltakr) ** dim ...... 5.2 ...... After 5.1, this is finally the integrated power between k and k + deltak ...... Now divide this by deltak to get the spectral density. Parameters ---------- e_ks ks nkout d: int/float, DIMENSION OF THE FLOW (NOT DIMENSION OF AVAILABLE VELOCITY FIELD) ... For 3D turbulence, d = 3 ... d is equal to 3 even if udata is an 2D field embedded in an actual 3D field, ... For 2D turbulence, d = 2 Returns ------- """ dim = ks.shape[0] duration = e_ks.shape[-1] if dim == 2: deltakx, deltaky = ks[0, 0, 1] - ks[0, 0, 0], \ ks[1, 1, 0] - ks[1, 0, 0] e_ks *= deltakx * deltaky # use the raw DFT outputs (power=integrated density over a px) deltakr = np.sqrt(deltakx ** 2 + deltaky ** 2) # radial k spacing of the velocity field dx, dy = 2.*np.pi / ks[0, 0, 0] * -0.5, 2.*np.pi / ks[1, 0, 0] * -0.5 if dim == 3: deltakx, deltaky, deltakz = ks[0, 0, 1, 0] - ks[0, 0, 0, 0], \ ks[1, 1, 0, 0] - ks[1, 0, 0, 0], \ ks[2, 0, 0, 1] - ks[2, 0, 0, 0] e_ks *= deltakx * deltaky * deltakz # use the raw DFT outputs (power=integrated density over a px) deltakr = np.sqrt(deltakx ** 2 + deltaky ** 2 + deltakz ** 2) # radial k spacing of the velocity field dx, dy, dz = 2.*np.pi / ks[0, 0, 0] * -0.5, 2.*np.pi / ks[1, 0, 0] * -0.5, 2.*np.pi / ks[2, 0, 0] * -0.5 kk = np.zeros((ks.shape[1:])) for i in range(dim): kk += ks[i, ...] ** 2 kk = np.sqrt(kk) # radial k if nkout is None: nkout = int(np.max(ks.shape[1:]) * 0.8) shape = (nkout, duration) e_k1ds = np.empty(shape) e_k1d_errs = np.empty(shape) k1ds = np.empty(shape) if remove_undersampled_region: kx_max, ky_max = np.nanmax(ks[0, ...]), np.nanmax(ks[1, ...]) k_max = np.nanmin([kx_max, ky_max]) if dim == 3: kz_max = np.nanmax(ks[2, ...]) k_max = np.nanmin([k_max, kz_max]) for t in range(duration): # flatten arrays to feed to binned_statistic\ kk_flatten, e_knd_flatten = kk.flatten(), e_ks[..., t].flatten() if remove_undersampled_region: mask = np.abs(kk_flatten) > k_max kk_flatten = delete_masked_elements(kk_flatten, mask) e_knd_flatten = delete_masked_elements(e_knd_flatten, mask) # get a histogram k_means, k_edges, binnumber = binned_statistic(kk_flatten, kk_flatten, statistic='mean', bins=nkout) k_binwidth = (k_edges[1] - k_edges[0]) k1d = k_edges[1:] - k_binwidth / 2 e_k1d, _, _ = binned_statistic(kk_flatten, e_knd_flatten, statistic='mean', bins=nkout) e_k1d_err, _, _ = binned_statistic(kk_flatten, e_knd_flatten, statistic='std', bins=nkout) # # WEIGHTED AVERAGE # ke_k1d, _, _ = binned_statistic(kk_flatten, kk_flatten * e_knd_flatten, statistic='mean', bins=nkout) # e_k1d = ke_k1d / k1d # ke_k1d_err, _, _ = binned_statistic(kk_flatten, kk_flatten * e_knd_flatten, statistic='std', bins=nkout) # e_k1d_err = ke_k1d_err / k1d # One must fix the power by some numerical factor due to the DFT and the definition of E(k) n_samples = len(kk_flatten) deltak = k1d[1] - k1d[0] if dim == 2: jacobian = 2 * np.pi * k1d elif dim == 3: jacobian = 4 * np.pi * k1d ** 2 # Insert to a big array # ... A quick derivation of this math is given in the docstring. k1ds[..., t] = k1d # OLD stuff # e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltak) # e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltak) # print deltak # Old stuff 2: scaling that works? # e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltak) * (deltak / deltakr) ** dim / deltak # e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltak) * (deltak / deltakr) ** dim / deltak # print(dx, dy, deltakr, deltakx * dx * ks.shape[2]) print(deltakr, deltak) # 2019-2020 August # e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltakr ** 2) * cc # e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltakr ** 2) * cc # # Update in Aug, 2020- TM e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltakr ** 2) * cc e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltakr ** 2) * cc return e_k1ds, e_k1d_errs, k1ds dim, duration = len(udata), udata.shape[-1] e_ks, ks = get_energy_spectrum_nd_old(udata, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1, dx=dx, dy=dy, dz=dz, window=window, correct_signal_loss=correct_signal_loss) e_k, e_k_err, kk = convert_nd_spec_to_1d(e_ks, ks, nkout=nkout, cc=cc) # #### NORMALIZATION IS NO LONGER NEEDED #### - Takumi, Apr 2019 # # normalization # energy_avg, energy_avg_err = get_spatial_avg_energy(udata, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1) # # for t in range(duration): # I = np.trapz(e_k[0:, t], kk[0:, t]) # print I # N = I / energy_avg[t] # normalizing factor # e_k[:, t] /= N # e_k_err[:, t] /= N if notebook: from tqdm import tqdm as tqdm return e_k, e_k_err, kk
24,635
def createDefaultClasses(datasetTXT): """ :param datasetTXT: dict with text from txt files indexed by filename :return: Dict with key:filename, value:list of lists with classes per sentence in the document """ classesDict = {} for fileName in datasetTXT: classesDict[fileName] = [] sentences = nltkSentenceSplit(datasetTXT[fileName], verbose=False) for sentence in sentences: sentence = nltkTokenize(sentence) classesDict[fileName].append([int(0) for _ in sentence]) return classesDict
24,636
def getGlobals(): """ :return: (dict) """ return globals()
24,637
def split_text_to_words(words: Iterable[str]) -> List[Word]: """Transform split text into list of Word.""" return [Word(word, len(word)) for word in words]
24,638
def init_module_operation(): """This function imports the primary modules for the package and returns ``True`` when successful.""" import khorosjx khorosjx.init_module('admin', 'content', 'groups', 'spaces', 'users') return True
24,639
def connect_to_rds(aws, region): """ Return boto connection to the RDS in the specified environment's region. """ set_progress('Connecting to AWS RDS in region {0}.'.format(region)) wrapper = aws.get_api_wrapper() client = wrapper.get_boto3_client( 'rds', aws.serviceaccount, aws.servicepasswd, region ) return client
24,640
def export_graphviz(DecisionTreeClassificationModel, featureNames=None, categoryNames=None, classNames=None, filled=True, roundedCorners=True, roundLeaves=True): """ Generates a DOT string out of a Spark's fitted DecisionTreeClassificationModel, which can be drawn with any library capable of handling the DOT format. If you want to plot in a single step, please use the function plot_tree(). Arguments: DecisionTreeClassificationModel -- a pyspark.ml.classification.DecisionTreeClassificationModel instance featureNames -- a list with the feature names. This is probably the same list you usually pass to your VectorAssembler constructor categoryNames -- a dictionary with the featureNames that are categorical as the keys, and the different categories as the values. This is probably the featureNames as key, StringIndexerModel.labels attribute as value for each categorical feature classNames -- a list with the class names for your target column. This is probably the StringIndexerModel.labels for your target column filled -- boolean which indicates whether to fill nodes with colour or not. Color gamma will be the prediction class for each node, and color intensity the impurity at such node roundedCorners -- boolean which indicates whether to round rectangle corners for the nodes roundLeaves -- boolean which indicates whether to represent leaf nodes as ellipses rather than rectangles Returns: a DOT string ready to be processed by any DOT handling library """ tree_dict = loads(generate_tree_json(DecisionTreeClassificationModel, withNodeIDs=False)) num_classes = get_num_classes(tree_dict) color_brew = generate_color_brew(num_classes) node_list = [] tree_dict_with_id = add_node_ids(tree_dict) graph = relations_to_str(tree_dict_with_id, featureNames=featureNames, categoryNames=categoryNames, classNames=classNames, numClasses=num_classes, nodeList=node_list, filled=filled, roundLeaves=roundLeaves, color_brew=color_brew) node_properties = "\n".join(node_list) filled_and_rounded = [] if filled: filled_and_rounded.append("filled") if roundedCorners: filled_and_rounded.append("rounded") dot_string = """digraph Tree { node [shape=box style="%s"] subgraph body { %s %s} }""" % (",".join(filled_and_rounded), "".join(graph), node_properties) return dot_string
24,641
def _get_next_sequence_values(session, base_mapper, num_values): """Fetches the next `num_values` ids from the `id` sequence on the `base_mapper` table. For example, if the next id in the `model_id_seq` sequence is 12, then `_get_next_sequence_values(session, Model.__mapper__, 5)` will return [12, 13, 14, 15, 16]. """ assert _has_normal_id_primary_key( base_mapper ), "_get_next_sequence_values assumes that the sequence produces integer values" id_seq_name = _get_id_sequence_name(base_mapper) # Table.schema is the canonical place to get the name of the schema. # See https://docs.sqlalchemy.org/en/13/core/metadata.html#sqlalchemy.schema.Table.params.schema schema = base_mapper.entity.__table__.schema sequence = sqlalchemy.Sequence(id_seq_name, schema=schema) # Select the next num_values from `sequence` raw_ids = tuples_to_scalar_list( session.connection().execute( sqlalchemy.select([sequence.next_value()]).select_from( sqlalchemy.text("generate_series(1, :num_values)") ), {"num_values": num_values}, ) ) assert len(raw_ids) == num_values, u"Expected to get {} new ids, instead got {}".format( num_values, len(raw_ids) ) # session.execute returns `long`s since Postgres sequences use `bigint` by default. # However, we need ints since the column type for our primary key is `integer`. return [int(id_) for id_ in raw_ids]
24,642
def overview(request): """ Dashboard: Process overview page. """ responses_dict = get_data_for_user(request.user) responses_dict_by_step = get_step_responses(responses_dict) # Add step status dictionary step_status = get_step_completeness(responses_dict_by_step) responses_dict_by_step['step_status'] = step_status responses_dict_by_step['active_page'] = 'overview' responses_dict_by_step['derived'] = get_derived_data(responses_dict) # Dashnav needs filing option to determine which steps to show for question in responses_dict_by_step['signing_filing']: responses_dict_by_step[question['question_id']] = question['value'] response = render(request, 'overview.html', context=responses_dict_by_step) # set this session variable after the page is already rendered request.session['viewed_dashboard_during_session'] = True return response
24,643
def _normalize(y, data, width, height, depth, dim): """ 功能:将图像数据归一化(从 0~255 归一为 0~1)\n 参数:\n y: 图像数据归一化的结果\n data:图像数据\n width:图像 width\n height:图像 height\n depth:图像 depth\n dim:图像数据维度(2维 or 3维数组)\n 返回值:NULL\n """ # 2维数组 if ArrayDim.TWO == dim: for i in range(0, width): for j in range(0, height): y[i, j] = min(data[i, j] / 255, 1) # 3维数组 else: for k in range(0, depth): for i in range(0, width): for j in range(0, height): y[i, j, k] = min(data[i, j, k] / 255, 1)
24,644
def _guess_os(): """Try to guess the current OS""" try: abi_name = ida_typeinf.get_abi_name() except: abi_name = ida_nalt.get_abi_name() if "OSX" == abi_name: return "macos" inf = ida_idaapi.get_inf_structure() file_type = inf.filetype if file_type in (ida_ida.f_ELF, ida_ida.f_AOUT, ida_ida.f_COFF): return "linux" elif file_type == ida_ida.f_MACHO: return "macos" elif file_type in ( ida_ida.f_PE, ida_ida.f_EXE, ida_ida.f_EXE_old, ida_ida.f_COM, ida_ida.f_COM_old, ): return "windows" else: # Default return "linux" #raise UnhandledOSException("Unrecognized OS type")
24,645
def create_conf(name, address, *services): """Create an Apple TV configuration.""" atv = conf.AppleTV(name, address) for service in services: atv.add_service(service) return atv
24,646
def log_transform(x): """ Log transformation from total precipitation in mm/day""" tp_max = 23.40308390557766 y = np.log(x*(np.e-1)/tp_max + 1) return y
24,647
def test_bigquery_run_sql(): """Test run_sql against bigquery database""" statement = "SELECT 1 + 1;" database = BigqueryDatabase(conn_id=DEFAULT_CONN_ID) response = database.run_sql(statement) assert response.first()[0] == 2
24,648
def get_flight(arguments): """ connects to skypicker servive and get most optimal flight base on search criteria :param arguments: inputs arguments from parse_arg :return dict: flight """ api_url = 'https://api.skypicker.com/flights?v=3&' adults = '1' # convert time format 2018-04-13 -> 13/04/2018 date = datetime.datetime.strptime(arguments.date, "%Y-%m-%d").strftime("%d/%m/%Y") fly_from = arguments.origin fly_to = arguments.to sort = arguments.sort if arguments.days_in_destination == 'oneway': # constructing search query for ONEWAY flight type_flight = 'oneway' query_string = '&flyFrom=' + fly_from + \ '&to=' + fly_to + \ '&dateFrom=' + date + \ '&dateTo=' + date + \ '&typeFlight=' + type_flight + \ '&adults=' + adults + \ '&sort=' + sort + \ '&asc=1' else: # constructing search query for RETURN flight days_in_destination = arguments.days_in_destination type_flight = 'round' query_string = 'daysInDestinationFrom=' + days_in_destination + \ '&daysInDestinationTo=' + days_in_destination + \ '&flyFrom=' + fly_from + \ '&to=' + fly_to + \ '&dateFrom=' + date + \ '&dateTo=' + date + \ '&typeFlight=' + type_flight + \ '&adults=' + adults + \ '&sort=' + sort + \ '&asc=1' if arguments.verbose: print(query_string) get_data = requests.get(api_url + query_string) json_data = json.loads(get_data.content) flights = json_data['data'] # return first flight in the sorted list if arguments.verbose: print(flights[0]) return flights[0]
24,649
def use_ip_alt(request): """ Fixture that gives back 2 instances of UseIpAddrWrapper 1) use ip4, dont use ip6 2) dont use ip4, use ip6 """ use_ipv4, use_ipv6 = request.param return UseIPAddrWrapper(use_ipv4, use_ipv6)
24,650
def radius_gaussian(sq_r, sig, eps=1e-9): """Compute a radius gaussian (gaussian of distance) Args: sq_r: input radiuses [dn, ..., d1, d0] sig: extents of gaussians [d1, d0] or [d0] or float Returns: gaussian of sq_r [dn, ..., d1, d0] """ return torch.exp(-sq_r / (2 * sig**2 + eps))
24,651
def index_papers_to_geodata(papers: List[Paper]) -> Dict[str, Any]: """ :param papers: list of Paper :return: object """ geodata = {} for paper in papers: for file in paper.all_files(): for location in file.locations.all(): if location.id not in geodata: geodata[location.id] = { "id": location.id, "name": location.description, "coordinates": location.geometry, "papers": {}, } if paper.id not in geodata[location.id]["papers"]: if paper.paper_type: paper_type = paper.paper_type.paper_type else: paper_type = _("Paper") geodata[location.id]["papers"][paper.id] = { "id": paper.id, "name": paper.name, "type": paper_type, "url": reverse("paper", args=[paper.id]), "files": [], } geodata[location.id]["papers"][paper.id]["files"].append( { "id": file.id, "name": file.name, "url": reverse("file", args=[file.id]), } ) return geodata
24,652
def _get_all_prefixed_mtds( prefix: str, groups: t.Tuple[str, ...], update_groups_by: t.Optional[t.Union[t.FrozenSet[str], t.Set[str]]] = None, prefix_removal: bool = False, custom_class_: t.Any = None, ) -> t.Dict[str, t.Tuple]: """Get all methods prefixed with ``prefix`` in predefined feature ``groups``. The predefined metafeature groups are inside ``VALID_GROUPS`` attribute. Args: prefix (:obj:`str`): gather methods prefixed with this value. groups (:obj:`Tuple` of :obj:`str`): a tuple of feature group names. It can assume value :obj:`NoneType`, which is interpreted as ``no filter`` (i.e. all features of all groups will be returned). return_groups (:obj:`bool`, optional): if True, then the returned value will be a :obj:`dict` (instead of a :obj:`tuple`) which maps each group (as keys) with its correspondent values (as :obj:`tuple`s). update_groups_by (:obj:`set` of :obj:`str`, optional): values to filter ``groups``. This function also returns a new version of ``groups`` with all its elements that do not contribute with any new method for the final output. It other words, it is removed any group which do not contribute to the output of this function. This is particu- larly useful for precomputations, as it helps avoiding unecessary precomputation methods from feature groups not related with user selected features. prefix_removal (:obj:`bool`, optional): if True, then the returned method names will not have the ``prefix``. custom_class_ (Class, optional): used for inner testing purposes. If not None, the given class will be used as reference to extract the prefixed methods. Returns: If ``filter_groups_by`` argument is :obj:`NoneType` or empty: tuple: with all filtered methods by ``group``. Else: tuple(tuple, tuple): the first field is the output described above, the second field is a new version of ``groups``, with all ele- ments that do not contribute with any element listed in the set ``update_groups_by`` removed. """ groups = tuple(set(VALID_GROUPS).intersection(groups)) if not groups and custom_class_ is None: return {"methods": tuple(), "groups": tuple()} if custom_class_ is None: verify_groups = tuple(VALID_GROUPS) verify_classes = tuple(VALID_MFECLASSES) else: verify_groups = ("test_methods", ) verify_classes = (custom_class_, ) methods_by_group = { ft_type_id: get_prefixed_mtds_from_class( class_obj=mfe_class, prefix=prefix, prefix_removal=prefix_removal) for ft_type_id, mfe_class in zip(verify_groups, verify_classes) if ft_type_id in groups or custom_class_ is not None } gathered_methods = [] # type: t.List[t.Union[str, TypeMtdTuple]] new_groups = [] # type: t.List[str] for group_name in methods_by_group: group_mtds = methods_by_group[group_name] gathered_methods += group_mtds if update_groups_by: group_mtds_names = { remove_prefix(mtd_pack[0], prefix=MTF_PREFIX) if not prefix_removal else mtd_pack[0] for mtd_pack in group_mtds } if not update_groups_by.isdisjoint(group_mtds_names): new_groups.append(group_name) ret_val = { "methods": tuple(gathered_methods), } # type: t.Dict[str, t.Tuple] if update_groups_by: ret_val["groups"] = tuple(new_groups) return ret_val
24,653
def _extract_values_from_certificate(cert): """ Gets Serial Number, DN and Public Key Hashes. Currently SHA1 is used to generate hashes for DN and Public Key. """ logger = getLogger(__name__) # cert and serial number data = { u'cert': cert, u'issuer': cert.get_issuer().der(), u'serial_number': cert.get_serial_number(), u'algorithm': rfc2437.id_sha1, u'algorithm_parameter': univ.Any(hexValue='0500') # magic number } # DN Hash data[u'name'] = cert.get_subject() cert_der = data[u'name'].der() sha1_hash = hashlib.sha1() sha1_hash.update(cert_der) data[u'name_hash'] = sha1_hash.hexdigest() # public key Hash data['key_hash'] = _get_pubickey_sha1_hash(cert).hexdigest() # CRL and OCSP data['crl'] = None ocsp_uris0 = [] for idx in range(cert.get_extension_count()): e = cert.get_extension(idx) if e.get_short_name() == b'authorityInfoAccess': for line in str(e).split(u"\n"): m = OCSP_RE.match(line) if m: logger.debug(u'OCSP URL: %s', m.group(1)) ocsp_uris0.append(m.group(1)) elif e.get_short_name() == b'crlDistributionPoints': for line in str(e).split(u"\n"): m = CRL_RE.match(line) if m: logger.debug(u"CRL: %s", m.group(1)) data['crl'] = m.group(1) if len(ocsp_uris0) == 1: data['ocsp_uri'] = ocsp_uris0[0] elif len(ocsp_uris0) == 0: data['ocsp_uri'] = u'' else: raise OperationalError( msg=u'More than one OCSP URI entries are specified in ' u'the certificate', errno=ER_FAILED_TO_GET_OCSP_URI, ) data[u'is_root_ca'] = cert.get_subject() == cert.get_issuer() return data
24,654
def cartesian_product(arrays): """Create a cartesian product array from a list of arrays. It is used to create x-y coordinates array from x and y arrays. Stolen from stackoverflow http://stackoverflow.com/a/11146645 """ broadcastable = np.ix_(*arrays) broadcasted = np.broadcast_arrays(*broadcastable) rows, cols = reduce(np.multiply, broadcasted[0].shape), len(broadcasted) out = np.empty(rows * cols, dtype=broadcasted[0].dtype) start, end = 0, rows for a in broadcasted: out[start:end] = a.reshape(-1) start, end = end, end + rows return out.reshape(cols, rows).T
24,655
def advanced_split(string, *symbols, contain=False, linked='right'): """ Split a string by symbols If contain is True, the result will contain symbols The choice of linked decides symbols link to which adjacent part of the result """ if not isinstance(string, str): raise Exception('String must be str!') for each in symbols: if not isinstance(each, str): raise Exception('Symbol must be str!') linked = linked.lower() if linked not in ['left', 'right']: raise Exception('Linked must be left or right!') if not len(symbols): return [string] result = [] symbols_len = tuple([len(each) for each in symbols]) if contain: tail = '' while 1: index = len(string) num = -1 for _num, each in enumerate(symbols): _index = string.find(each) if _index < index and _index + 1: index = _index num = _num if num == -1: temp = tail + string if contain and linked == 'right' and tail else string if temp: result.append(temp) break temp = string[:index] if contain and linked == 'left': tail = symbols[num] if contain: if tail: if linked == 'left': temp = temp + tail if linked == 'right': temp = tail + temp if contain and linked == 'right': tail = symbols[num] string = string[index+symbols_len[num]:] if temp: result.append(temp) return result
24,656
def _get_resource_info( resource_type="pod", labels={}, json_path=".items[0].metadata.name", errors_to_ignore=("array index out of bounds: index 0",), verbose=False, ): """Runs 'kubectl get <resource_type>' command to retrieve info about this resource. Args: resource_type (string): "pod", "service", etc. labels (dict): (eg. {'name': 'phenotips'}) json_path (string): a json path query string (eg. ".items[0].metadata.name") errors_to_ignore (list): verbose (bool): Returns: (string) resource value (eg. "postgres-410765475-1vtkn") """ l_arg = "" if labels: l_arg = "-l" + ",".join(["%s=%s" % (key, value) for key, value in labels.items()]) output = run( "kubectl get %(resource_type)s %(l_arg)s -o jsonpath={%(json_path)s}" % locals(), errors_to_ignore=errors_to_ignore, print_command=False, verbose=verbose, ) return output.strip('\n') if output is not None else None
24,657
def rotate_line_about_point(line, point, degrees): """ added 161205 This takes a line and rotates it about a point a certain number of degrees. For use with clustering veins. :param line: tuple contain two pairs of x,y values :param point: tuple of x, y :param degrees: number of degrees to rotate by :return: line (now rotated) """ # point will serve as axis axis = point # unpack line p0, p1 = line # and get the line's degrees and length line_deg = line_to_angle(line) d = (abs(p0[0] - p1[0]), abs(p0[1] - p1[1])) line_length = sqrt(d[0] ^ 2 + d[1] ^ 2) # calculate radius between points and axis d = (abs(p0[0] - axis[0]), abs(p0[1] - axis[1])) r0 = sqrt(d[0] ^ 2 + d[1] ^ 2) # r1 = float((p1[0] - axis[0]) ^ 2 + (p1[1] - axis[1]) ^ 2) ^ 0.5 # find degrees that first line is above x-axis p0_deg = line_to_angle((axis, p0)) # now rotate line one to be level to degrees p0_cos = cos(degrees * (pi / 180.0)) p0_sin = sin(degrees * (pi / 180.0)) p0_n = (r0 * p0_cos, r0 * p0_sin) # and move p1 to be in respect to p0 new_deg = line_deg - p0_deg # normalize degrees while new_deg > 360: new_deg -= 360 while new_deg < 0: new_deg += 360 # get second point of line now since all variables are known p1_cos = cos(new_deg * (pi / 180.0)) p1_sin = sin(new_deg * (pi / 180.0)) # get new p1 p1_n = (p1_cos * line_length + p0_n[0], p1_sin * line_length + p0_n[1]) # return new line return p0_n, p1_n
24,658
def arith_relop(a, t, b): """ arith_relop(a, t, b) This is (arguably) a hack. Represents each function as an integer 0..5. """ return [(t == 0).implies(a < b), (t == 1).implies(a <= b), (t == 2).implies(a == b), (t == 3).implies(a >= b), (t == 4).implies(a > b), (t == 5).implies(a != b) ]
24,659
def test_normal_errors(): """ Test the reconstruction of normal vector errors from PCA and conversion back to hyperbolic errors """ o = random_pca() hyp_axes = sampling_axes(o) v = to_normal_errors(hyp_axes) axes_reconstructed = from_normal_errors(v) assert N.allclose(hyp_axes, axes_reconstructed)
24,660
def initialise_framework(options): """This function initializes the entire framework :param options: Additional arguments for the component initializer :type options: `dict` :return: True if all commands do not fail :rtype: `bool` """ logging.info("Loading framework please wait..") # No processing required, just list available modules. if options["list_plugins"]: show_plugin_list(db, options["list_plugins"]) finish() target_urls = load_targets(session=db, options=options) load_works(session=db, target_urls=target_urls, options=options) start_proxy() start_transaction_logger() return True
24,661
def docmd(cmd): """Execute a command.""" if flag_echo: sys.stderr.write("executing: " + cmd + "\n") if flag_dryrun: return u.docmd(cmd)
24,662
def get_registration_form() -> ConvertedDocument: """ Вернуть параметры формы для регистрации :return: Данные формы профиля + Логин и пароль """ form = [ gen_field_row('Логин', 'login', 'text', validate_rule='string'), gen_field_row('Пароль', 'password', 'password'), gen_field_row('Токен', 'token', 'text', validate_rule='token') ] + convert_mongo_model(Profile) return form
24,663
def get_docker_stats(dut): """ Get docker ps :param dut: :return: """ command = 'docker stats -a --no-stream' output = st.show(dut, command) return output
24,664
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser(description='Run object detection inference on input image.') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument('-d', '--data', help="Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument.") args, _ = parser.parse_known_args() data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data if data_dir is None: raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.") PATHS.set_data_dir_path(data_dir) # Set workspace dir path if passed by user if args.workspace_dir: PATHS.set_workspace_dir_path(args.workspace_dir) try: os.makedirs(PATHS.get_workspace_dir_path()) except: pass # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths() return args
24,665
def fetch_latency(d: str, csim: bool = False): """Fetch the simulated latency, measured in cycles.""" tb_sim_report_dir = os.path.join( d, "tb" if not csim else "tb.csim", "solution1", "sim", "report" ) if not os.path.isdir(tb_sim_report_dir): return None tb_sim_report = get_single_file_with_ext(tb_sim_report_dir, "rpt") if not tb_sim_report: return None tb_sim_report = os.path.join(tb_sim_report_dir, tb_sim_report) if not os.path.isfile(tb_sim_report): return None latency = None with open(tb_sim_report, "r") as f: for line in reversed(f.readlines()): if latency: break comps = [x.strip() for x in line.strip().split("|")] # there are 9 columns, +2 before & after | # the 2nd column should give PASS. if len(comps) == 11 and comps[2].upper() == "PASS": latency = comps[-2] # from the last column. # The report is malformed. if not latency: return None try: # Will raise error if latency is not an integer. return int(latency) except: return None
24,666
def enthalpyvap(temp=None,pres=None,dvap=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False, mathargs=None): """Calculate ice-vapour vapour enthalpy. Calculate the specific enthalpy of water vapour for ice and water vapour in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dvap: Water vapour density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dvap: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dvap0: Initial guess for the water vapour density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dvap0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Enthalpy in J/kg. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> enthalpyvap(temp=270.) 2495132.21977 >>> enthalpyvap(pres=100.) 2463525.19629 """ temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd, mathargs=mathargs) hv = flu2.enthalpy(temp,dvap) return hv
24,667
async def async_setup_entry(hass, entry, async_add_entities): """Setup binary_sensor platform.""" coordinator = hass.data[DOMAIN] entities = [] for binary_sensor in BINARY_SENSOR_TYPES: _LOGGER.debug("Adding %s binary sensor", binary_sensor) entity = GrocyBinarySensor(coordinator, entry, binary_sensor) coordinator.entities.append(entity) entities.append(entity) async_add_entities(entities, True)
24,668
async def get_eng_hw(module: tuple[str, ...], task: str) -> Message: """ Стандартный запрос для английского """ return await _get_eng_content('zadanie-{}-m-{}-z'.format(*module), task)
24,669
def _choose_split_axis(v: Variable) -> Axis: """ For too-large texture `v`, choose one axis which is the best one to reduce texture size by splitting `v` in that axis. Args: v: Variable, whose size is too large (= this variable has :code:`SplitTarget` attribute) Returns: axis """ ops = list(v.input_to) if v.output_from is not None: ops += [v.output_from] splittable_axes = list(v.order.axes) for op in ops: _op_splittable_axes = _listup_splittable_axis(v, op) + [attr.axis for attr in op.get_attribute(Tensorwise)] for a in list(splittable_axes): if a not in _op_splittable_axes: splittable_axes.remove(a) if len(splittable_axes) == 0: raise ValueError("No axis is splittable") # Calculate the size of a side of texture which will be changed when each axis is split # # ex) OrderNC, N=512, C=2048, texture(width=2048, height=512) # => If axis `N` is split, then height will be changed => N: 512 (=height) # If axis `C` is split, then width will be changed => C: 2048 (=width) # # ex) OrderNCHW, N=1, C=512, H=13, W=13, texture(width=2048, height=43) # => TexW == W*H*(partial of C) texture width consists of axis W, H and C. # TexH == (partial of C)*N texture height consists of axis C and N. # => N cannot be split => N: -1 # C is related both width and height. In this case, use large one. => C: 2048 # H is included in width => H: 2048 # W is also included in width => W: 2048 axis_corresponding_texture_size = AxisKeyDict() element_per_pixel = ChannelMode.elements_per_pixel(v) tex_h, tex_w = TextureShape.get(v) tex_w = (tex_w + element_per_pixel - 1) // element_per_pixel for a in v.order.axes: if v.shape_dict[a] == 1: # This axis cannot be split axis_corresponding_texture_size[a] = -1 elif v.stride_dict[a] >= tex_w * element_per_pixel: axis_corresponding_texture_size[a] = tex_h elif v.stride_dict[a] * v.shape_dict[a] >= tex_w * element_per_pixel: axis_corresponding_texture_size[a] = max(tex_h, tex_w) else: axis_corresponding_texture_size[a] = tex_w splittable_axes.sort(key=lambda a: axis_corresponding_texture_size[a], reverse=True) target_axis = splittable_axes[0] console.debug(f"===========================================================================") console.debug(f"{v}") console.debug(f" original order: {v.order}") console.debug(f" original shape: {v.shape}") console.debug(f" texture shape: {TextureShape.get(v)}") console.debug(f"") console.debug(f" splittable axis: {splittable_axes}") console.debug(f" split axis: {target_axis}") console.debug(f"") console.debug(f" related operators:") for related_op in ops: console.debug(f"---------------------------------------------------------------------------") traverse.dump_op(related_op) console.debug(f"") if axis_corresponding_texture_size[target_axis] <= 0: raise NotImplementedError(f"Variable is too large to handle in WebGL backend: {v}") return target_axis
24,670
def import_ratings(LIKED_RATING): """ Load ratings csv files to ratings and user classes """ id_list = [] import csv with open('lessRatings.csv', encoding='utf-8') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 # Count the rows and discount the header for row in csv_reader: if line_count == 0: # Header row line_count += 1 else: line_count += 1 """*** Add the imported data to the Rating class ***""" # Add the imported data to the Rating class Ratings(row[0], row[1], row[2]) """*** Add the imported data to the User class ***""" if row[0] in id_list: # Add liked and disliked movies to the user instance if row[2] >= LIKED_RATING: # If the rating is above the liked rating add it to the user's liked movies set users[int(row[0])-1].add_liked(row[1]) else: # Otherwise add it to the disliked movies set users[int(row[0])-1].add_disliked(row[1]) # If the user ID changes, create new user else: User(row[0]) id_list.append(row[0]) if row[2] >= LIKED_RATING: users[int(row[0])-1].add_liked(row[1]) else: users[int(row[0])-1].add_disliked(row[1])
24,671
def get_image_blobs(pb): """ Get an image from the sensor connected to the MicroPython board, find blobs and return the image, a list of blobs, and the time it took to find the blobs (in [ms]) """ raw = json.loads(run_on_board(pb, script_get_image, no_print=True)) img = np.flip(np.transpose(np.reshape(raw, (8, 8)))) time_str = run_on_board(pb, script_get_blob_list, no_print=True) t_ms = float(time_str.split("= ")[1].split("m")[0]) blobs_str = run_on_board(pb, script_print_blob_list, no_print=True) blobs_str = blobs_str.replace("nan", "0") blobs = json.loads(blobs_str.replace('(', '[').replace(')', ']')) return img, blobs, t_ms
24,672
def update_alliances_from_esi(): """ Updates the alliances properties from the ESI. """ for alliance in Alliance.objects: scheduler.add_job(update_alliance_from_esi, args=(alliance,))
24,673
def classification_report(y_true, y_pred, digits=2, suffix=False): """Build a text report showing the main classification metrics. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a classifier. digits : int. Number of digits for formatting output floating point values. Returns: report : string. Text summary of the precision, recall, F1 score for each class. Examples: >>> from seqeval.metrics import classification_report >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> print(classification_report(y_true, y_pred)) precision recall f1-score support <BLANKLINE> MISC 0.00 0.00 0.00 1 PER 1.00 1.00 1.00 1 <BLANKLINE> avg / total 0.50 0.50 0.50 2 <BLANKLINE> """ true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) name_width = 0 d1 = defaultdict(set) d2 = defaultdict(set) for e in true_entities: d1[e[0]].add((e[1], e[2])) name_width = max(name_width, len(e[0])) for e in pred_entities: d2[e[0]].add((e[1], e[2])) last_line_heading = 'avg / total' width = max(name_width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers) report = head_fmt.format(u'', *headers, width=width) report += u'\n\n' row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n' ps, rs, f1s, s = [], [], [], [] for type_name, true_entities in d1.items(): pred_entities = d2[type_name] nb_correct = len(true_entities & pred_entities) nb_pred = len(pred_entities) nb_true = len(true_entities) p = 100 * nb_correct / nb_pred if nb_pred > 0 else 0 r = 100 * nb_correct / nb_true if nb_true > 0 else 0 f1 = 2 * p * r / (p + r) if p + r > 0 else 0 report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits) ps.append(p) rs.append(r) f1s.append(f1) s.append(nb_true) report += u'\n' # compute averages report += row_fmt.format(last_line_heading, np.average(ps, weights=s), np.average(rs, weights=s), np.average(f1s, weights=s), np.sum(s), width=width, digits=digits) return report
24,674
def test_open_and_close(get_camera): """ Tests the uvccamera's open, open_count, and close methods. """ camera = get_camera connected = camera.is_device_connected() assert connected is True orig_count = camera.open_count() assert isinstance(orig_count, int) count = camera.close() assert isinstance(count, int) assert camera.open_count() == (orig_count - 1) assert camera.open_count() == count # TODO(EB/SR) Update this when the uvccamera device has other methods # with pytest.raises(PySproutError) as execinfo: # # Any call should fail # camera.white_balance() # assert execinfo.value.message == 'Device is not open' new_count = camera.open() assert isinstance(new_count, int) assert new_count == orig_count assert camera.open_count() == (count + 1) # TODO(EB/SR) Update this when the uvccamera device has other methods # # Any call should now work # camera.white_balance()
24,675
def tidy_conifer(ddf: DataFrame) -> DataFrame: """Tidy up the raw conifer output.""" result = ddf.drop(columns=["marker", "identifier", "read_lengths", "kraken"]) result[["name", "taxonomy_id"]] = result["taxa"].str.extract( r"^(?P<name>[\w ]+) \(taxid (?P<taxonomy_id>\d+)\)$", expand=True ) return result.drop(columns=["taxa"]).categorize( columns=["name", "taxonomy_id"], index=False )
24,676
def test_set_illegal_backing_type(): """The backing type for a set MUST be one of S/N/B, not BOOL""" for typedef in [Boolean, Set(Integer)]: with pytest.raises(TypeError): Set(typedef)
24,677
def load(name=None): """ Loads or initialises a convolutional neural network. """ if name is not None: path = os.path.join(AmfConfig.get_appdir(), 'trained_networks', name) else: path = AmfConfig.get('model') if path is not None and os.path.isfile(path): print(f'* Trained model: {path}') model = keras.models.load_model(path) if model.name == CNN1_NAME: AmfConfig.set('level', 1) print('* Classes: colonised (M+), non-colonised (M−), background (Other).') else: # elif model.name == CNN2_NAME AmfConfig.set('level', 2) print('* Classes: arbuscules (A), vesicles (V), ' 'hyphopodia (H), intraradical hyphae (IH).') return model else: if AmfConfig.get('run_mode') == 'train': print('* Initializes a new network.') if AmfConfig.get('level') == 1: return create_cnn1() else: return create_cnn2() else: # missing pre-trained model in prediction mode. AmfLog.error('A pre-trained model is required in prediction mode', exit_code=AmfLog.ERR_NO_PRETRAINED_MODEL)
24,678
def get_duplicates(lst): """Return a list of the duplicate items in the input list.""" return [item for item, count in collections.Counter(lst).items() if count > 1]
24,679
def relu(x, alpha=0): """ Rectified Linear Unit. If alpha is between 0 and 1, the function performs leaky relu. alpha values are commonly between 0.1 and 0.3 for leaky relu. Parameters ---------- x : numpy array Values to be activated. alpha : float, optional The scale factor for the linear unit. Typical values are between 0.1 and 0.3. The default is 0.1. Returns ------- z : numpy array The activated values. """ z = x.copy() z[x < 0] = z[x < 0]*alpha return z
24,680
def build_unique_dict(controls): """Build the disambiguated list of controls Separated out to a different function so that we can get the control identifiers for printing. """ name_control_map = UniqueDict() # collect all the possible names for all controls # and build a list of them for ctrl in controls: ctrl_names = get_control_names(ctrl, controls) # for each of the names for name in ctrl_names: name_control_map[name] = ctrl return name_control_map
24,681
def serialize_thrift_object(thrift_obj, proto_factory=Consts.PROTO_FACTORY): """Serialize thrift data to binary blob :param thrift_obj: the thrift object :param proto_factory: protocol factory, set default as Compact Protocol :return: string the serialized thrift payload """ return Serializer.serialize(proto_factory(), thrift_obj)
24,682
def signal( fn: Optional[WorkflowSignalFunc] = None, *, name: Optional[str] = None, dynamic: Optional[bool] = False, ): """Decorator for a workflow signal method. This is set on any async or non-async method that you wish to be called upon receiving a signal. If a function overrides one with this decorator, it too must be decorated. Signal methods can only have positional parameters. Best practice for non-dynamic signal methods is to only take a single object/dataclass argument that can accept more fields later if needed. Return values from signal methods are ignored. Args: fn: The function to decorate. name: Signal name. Defaults to method ``__name__``. Cannot be present when ``dynamic`` is present. dynamic: If true, this handles all signals not otherwise handled. The parameters of the method must be self, a string name, and a ``*args`` positional varargs. Cannot be present when ``name`` is present. """ def with_name(name: Optional[str], fn: WorkflowSignalFunc) -> WorkflowSignalFunc: if not name: _assert_dynamic_signature(fn) # TODO(cretz): Validate type attributes? setattr(fn, "__temporal_signal_definition", _SignalDefinition(name=name, fn=fn)) return fn if name is not None or dynamic: if name is not None and dynamic: raise RuntimeError("Cannot provide name and dynamic boolean") return partial(with_name, name) if fn is None: raise RuntimeError("Cannot create signal without function or name or dynamic") return with_name(fn.__name__, fn)
24,683
def cmap_RdBu(values, vmin = None, vmax = None): """Generates a blue/red colorscale with white value centered around the value 0 Parameters ---------- values : PandasSeries, numpy array, list or tuple List of values to be used for creating the color map vmin : type Minimum value in the color map, if None then the min(values) is used vmax : type Maximum value in the color map, if None then the max(values) is used Returns ------- type Description of returned object. """ if vmin != None: scoremin = vmin else: scoremin = min(values) if vmax != None: scoremax = vmax else: scoremax = max(values) from matplotlib.colors import LinearSegmentedColormap cmap2 = LinearSegmentedColormap.from_list('mycmap', [(0, 'blue'), (-scoremin/(scoremax-scoremin), 'white'), (1, 'red')]) return(cmap2)
24,684
def _add_noise(audio, snr): """ Add complex gaussian noise to signal with given SNR. :param audio(np.array): :param snr(float): sound-noise-ratio :return: audio with added noise """ audio_mean = np.mean(audio**2) audio_mean_db = 10 * np.log10(audio_mean) noise_mean_db = snr - audio_mean_db noise_mean = 10 ** (noise_mean_db/10) return audio + np.random.normal(0, np.sqrt(noise_mean), len(audio))
24,685
def mlp_net(): """The MLP test from Relay. """ from tvm.relay.testing import mlp return mlp.get_net(1)
24,686
def build_ind_val_dsets(dimensions, is_spectral=True, verbose=False, base_name=None): """ Creates VirtualDatasets for the position or spectroscopic indices and values of the data. Remember that the contents of the dataset can be changed if need be after the creation of the datasets. For example if one of the spectroscopic dimensions (e.g. - Bias) was sinusoidal and not linear, The specific dimension in the Spectroscopic_Values dataset can be manually overwritten. Parameters ---------- dimensions : Dimension or array-like of Dimension objects Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values datasets is_spectral : bool, optional. default = True Spectroscopic (True) or Position (False) verbose : Boolean, optional Whether or not to print statements for debugging purposes base_name : str / unicode, optional Prefix for the datasets. Default: 'Position_' when is_spectral is False, 'Spectroscopic_' otherwise Returns ------- ds_inds : VirtualDataset Reduced Spectroscopic indices dataset ds_vals : VirtualDataset Reduces Spectroscopic values dataset Notes ----- `steps`, `initial_values`, `labels`, and 'units' must be the same length as `dimensions` when they are specified. Dimensions should be in the order from fastest varying to slowest. """ warn('build_ind_val_dsets is available only for legacy purposes and will be REMOVED in a future release.\n' 'Please consider using write_ind_val_dsets in hdf_utils instead', DeprecationWarning) if isinstance(dimensions, Dimension): dimensions = [dimensions] if not isinstance(dimensions, (list, np.ndarray, tuple)): raise TypeError('dimensions should be array-like ') if not np.all([isinstance(x, Dimension) for x in dimensions]): raise TypeError('dimensions should be a sequence of Dimension objects') if base_name is not None: if not isinstance(base_name, (str, unicode)): raise TypeError('base_name should be a string') if not base_name.endswith('_'): base_name += '_' else: base_name = 'Position_' if is_spectral: base_name = 'Spectroscopic_' unit_values = [x.values for x in dimensions] indices, values = build_ind_val_matrices(unit_values, is_spectral=is_spectral) if verbose: print('Indices:') print(indices) print('Values:') print(values) # Create the slices that will define the labels region_slices = get_aux_dset_slicing([x.name for x in dimensions], is_spectroscopic=is_spectral) # Create the VirtualDataset for both Indices and Values ds_indices = VirtualDataset(base_name + 'Indices', indices, dtype=INDICES_DTYPE) ds_values = VirtualDataset(base_name + 'Values', VALUES_DTYPE(values), dtype=VALUES_DTYPE) for dset in [ds_indices, ds_values]: dset.attrs['labels'] = region_slices dset.attrs['units'] = [x.units for x in dimensions] return ds_indices, ds_values
24,687
def chunks(lst, n): """Chunks. Parameters ---------- lst : list or dict The list or dictionary to return chunks from. n : int The number of records in each chunk. """ if isinstance(lst, list): for i in tqdm(range(0, len(lst), n)): yield lst[i:i + n] elif isinstance(lst, dict): keys = list(lst.keys()) for i in tqdm(range(0, len(keys), n)): yield {k: lst[k] for k in keys[i:i + n]}
24,688
def groupByX(grp_fn, messages): """ Returns a dictionary keyed by the requested group. """ m_grp = {} for msg in getIterable(messages): # Ignore messages that we don't have all the timing for. if msg.isComplete() or not ignore_incomplete: m_type = grp_fn(msg) if m_type in m_grp: m_grp[m_type].append(msg) else: m_grp[m_type] = [msg] return m_grp
24,689
def regular_poly_circ_rad_to_side_length(n_sides, rad): """Find side length that gives regular polygon with `n_sides` sides an equivalent area to a circle with radius `rad`.""" p_n = math.pi / n_sides return 2 * rad * math.sqrt(p_n * math.tan(p_n))
24,690
def dbl_colour(days): """ Return a colour corresponding to the number of days to double :param days: int :return: str """ if days >= 28: return "orange" elif 0 < days < 28: return "red" elif days < -28: return "green" else: return "yellow"
24,691
def error(s, buffer=''): """Error msg""" prnt(buffer, '%s%s %s' % (weechat.prefix('error'), script_nick, s)) if weechat.config_get_plugin('debug'): import traceback if traceback.sys.exc_type: trace = traceback.format_exc() prnt('', trace)
24,692
def _infraslow_pac_model( alg, kernel, permute=False, seed=None, output_dir=None, rois=None): """Predict DCCS using infraslow PAC.""" if not output_dir: output_dir = os.path.abspath(os.path.dirname(__file__)) if not rois: rois = glasser_rois infraslow_pac = utils.load_phase_amp_coupling(rois=rois) latent_vars = ml_tools.plsc(infraslow_pac, card_sort_task_data) feature_selection_grid = { 'C': (.01, 1, 10, 100), "gamma": np.logspace(-2, 2, 5) } regression_grid = None if alg == 'SVM': regression_grid = { 'C': (.01, 1, 10, 100, 1000), "gamma": (1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1), # 'degree': (2, 3, 4, 5) } ML_pipe = ml_tools.ML_pipeline( # predictors=infraslow_pac, predictors=latent_vars, targets=card_sort_task_data, run_PLSC=False, feature_selection_gridsearch=feature_selection_grid, model_gridsearch=regression_grid, feature_names=glasser_rois, session_names=meg_sessions, random_state=seed, debug=True) ML_pipe.run_predictions(model=alg, model_kernel=kernel) if not permute: ml_tools.save_outputs(ML_pipe, output_dir) else: ML_pipe.debug = False perm_dict = ml_tools.perm_tests(ML_pipe, n_iters=permute) utils.save_xls( perm_dict, os.path.join(output_dir, 'permutation_tests.xlsx'))
24,693
def create_model(data_format): """Model to recognize digits in the MNIST data set. Network structure is equivalent to: https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py and https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py But uses the tf.keras API. Args: data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is typically faster on GPUs while 'channels_last' is typically faster on CPUs. See https://www.tensorflow.org/performance/performance_guide#data_formats Returns: A tf.keras.Model. """ # pylint: disable=no-member if data_format == 'channels_first': input_shape = [1, 28, 28] else: assert data_format == 'channels_last' input_shape = [28, 28, 1] return Sequential( [ Reshape(target_shape=input_shape, input_shape=(28 * 28,)), Conv2D(32, 5, padding='same', data_format=data_format, activation=tf.nn.relu, kernel_initializer='random_uniform'), MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format), Conv2D(64, 5, padding='same', data_format=data_format, activation=tf.nn.relu, kernel_initializer='random_uniform'), MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format), Flatten(), Dense(1024, activation=tf.nn.relu, kernel_initializer='random_uniform'), Dropout(0.4), Dense(10, kernel_initializer='random_uniform') ])
24,694
def run_noncentered_hmc(model_config, num_samples=2000, burnin=1000, num_leapfrog_steps=4, num_adaptation_steps=500, num_optimization_steps=2000): """Given a (centred) model, this function transforms it to a fully non-centred one, and runs HMC on the reparametrised model. """ tf.reset_default_graph() return run_parametrised_hmc( model_config=model_config, interceptor=ed_transforms.ncp, num_samples=num_samples, burnin=burnin, num_leapfrog_steps=num_leapfrog_steps, num_adaptation_steps=num_adaptation_steps, num_optimization_steps=num_optimization_steps)
24,695
def get_project_apps(in_app_list): """ Application definitions for app name. Args: in_app_list: (list) - names of applications Returns: tuple (list, dictionary) - list of dictionaries with apps definitions dictionary of warnings """ apps = [] warnings = collections.defaultdict(list) if not in_app_list: return apps, warnings missing_app_msg = "Missing definition of application" application_manager = ApplicationManager() for app_name in in_app_list: if application_manager.applications.get(app_name): apps.append({"name": app_name}) else: warnings[missing_app_msg].append(app_name) return apps, warnings
24,696
def hang_me(timeout_secs=10000): """Used for debugging tests.""" print("Sleeping. Press Ctrl-C to continue...") try: time.sleep(timeout_secs) except KeyboardInterrupt: print("Done sleeping")
24,697
def read(G): """ Wrap a NetworkX graph class by an ILPGraph class The wrapper class is used store the graph and the related variables of an optimisation problem in a single entity. :param G: a `NetworkX graph <https://networkx.org/documentation/stable/reference/introduction.html#graphs>`__ :return: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph` """ result = ILPGraph() result.set_nx_graph(G) return result
24,698
def slog_det(obs, **kwargs): """Computes the determinant of a matrix of Obs via np.linalg.slogdet.""" def _mat(x): dim = int(np.sqrt(len(x))) if np.sqrt(len(x)) != dim: raise Exception('Input has to have dim**2 entries') mat = [] for i in range(dim): row = [] for j in range(dim): row.append(x[j + dim * i]) mat.append(row) (sign, logdet) = anp.linalg.slogdet(np.array(mat)) return sign * anp.exp(logdet) if isinstance(obs, np.ndarray): return derived_observable(_mat, (1 * (obs.ravel())).tolist(), **kwargs) elif isinstance(obs, list): return derived_observable(_mat, obs, **kwargs) else: raise TypeError('Unproper type of input.')
24,699