content
stringlengths
22
815k
id
int64
0
4.91M
def test_thresholds(numba_conditional): """Test automatic threshold calculations.""" # within subjects rng = np.random.RandomState(0) X = rng.randn(10, 1, 1) + 0.08 want_thresh = -stats.t.ppf(0.025, len(X) - 1) assert 0.03 < stats.ttest_1samp(X[:, 0, 0], 0)[1] < 0.05 my_fun = partial(ttest_1samp_no_p) with catch_logging() as log: with pytest.warns(RuntimeWarning, match='threshold is only valid'): out = permutation_cluster_1samp_test( X, stat_fun=my_fun, seed=0, verbose=True, out_type='mask') log = log.getvalue() assert str(want_thresh)[:6] in log assert len(out[1]) == 1 # 1 cluster assert_allclose(out[2], 0.033203, atol=1e-6) # between subjects Y = rng.randn(10, 1, 1) Z = rng.randn(10, 1, 1) - 0.7 X = [X, Y, Z] want_thresh = stats.f.ppf(1. - 0.05, 2, sum(len(a) for a in X) - len(X)) p = stats.f_oneway(*X)[1] assert 0.03 < p < 0.05 my_fun = partial(f_oneway) # just to make the check fail with catch_logging() as log: with pytest.warns(RuntimeWarning, match='threshold is only valid'): out = permutation_cluster_test(X, tail=1, stat_fun=my_fun, seed=0, verbose=True, out_type='mask') log = log.getvalue() assert str(want_thresh)[:6] in log assert len(out[1]) == 1 # 1 cluster assert_allclose(out[2], 0.041992, atol=1e-6) with pytest.warns(RuntimeWarning, match='Ignoring argument "tail"'): permutation_cluster_test(X, tail=0, out_type='mask') # nan handling in TFCE X = np.repeat(X[0], 2, axis=1) X[:, 1] = 0 with pytest.warns(RuntimeWarning, match='invalid value'): # NumPy out = permutation_cluster_1samp_test( X, seed=0, threshold=dict(start=0, step=0.1), out_type='mask') assert (out[2] < 0.05).any() assert not (out[2] < 0.05).all() X[:, 0] = 0 with pytest.raises(RuntimeError, match='finite'): with np.errstate(invalid='ignore'): permutation_cluster_1samp_test( X, seed=0, threshold=dict(start=0, step=0.1), buffer_size=None, out_type='mask')
5,327,700
def upper_bounds_max_ppr_target(adj, alpha, fragile, local_budget, target): """ Computes the upper bound for x_target for any teleport vector. Parameters ---------- adj : sp.spmatrix, shape [n, n] Sparse adjacency matrix. alpha : float (1-alpha) teleport[v] is the probability to teleport to node v. fragile : np.ndarray, shape [?, 2] Fragile edges that are under our control. local_budget : np.ndarray, shape [n] Maximum number of local flips per node. target : int Target node. Returns ------- upper_bounds: np.ndarray, shape [n] Computed upper bounds. """ n = adj.shape[0] z = np.zeros(n) z[target] = 1 opt_fragile, _ = policy_iteration(adj=adj, alpha=alpha, fragile=fragile, local_budget=local_budget, reward=z, teleport=z) adj_flipped = flip_edges(adj, opt_fragile) # gets one column from the PPR matrix # corresponds to the PageRank score value of target for any teleport vector (any row) pre_inv = sp.eye(n) - alpha * sp.diags(1 / adj_flipped.sum(1).A1) @ adj_flipped ppr = (1 - alpha) * gmres(pre_inv, z)[0] correction = correction_term(adj, opt_fragile, fragile) upper_bounds = ppr / correction return upper_bounds
5,327,701
def import_layer_data(node, path): """Import ngLayerData from JSON file. Args: node (str): Name of the mesh. Used to find the JSON file. path (str): The parent folder where the file is saved. Returns: str: The raw ngLayer data (somehow, this is a string!) """ nice_name = node.rsplit("|")[-1].rsplit(":")[-1] weight_file = os.path.join(path, "{0}.json".format(nice_name)) if not os.path.exists(weight_file): LOG.info("%r not found.", weight_file) return None with open(weight_file, "r") as stream: ng_data = stream.read() return ng_data
5,327,702
def cramers_corrected_stat(contingency_table): """ Computes corrected Cramer's V statistic for categorial-categorial association """ try: chi2 = chi2_contingency(contingency_table)[0] except ValueError: return np.NaN n = contingency_table.sum().sum() phi2 = chi2/n r, k = contingency_table.shape r_corrected = r - (((r-1)**2)/(n-1)) k_corrected = k - (((k-1)**2)/(n-1)) phi2_corrected = max(0, phi2 - ((k-1)*(r-1))/(n-1)) return (phi2_corrected / min( (k_corrected-1), (r_corrected-1)))**0.5
5,327,703
def p_domain_def(p): """domain_def : LPAREN DOMAIN_KEY NAME RPAREN""" p[0] = p[3]
5,327,704
def test_utils_return_htmldir_pathname_htmldir_name_not_given(tmp_path): """Raises exception if HTML directory name is deliberately omitted.""" rd = tmp_path dd = Path(tmp_path).joinpath("a") dd.mkdir() os.chdir(dd) with pytest.raises(SystemExit): get_htmldir_path(rootdir=rd, htmldir=None)
5,327,705
def frame_comps_from_set(frame_set): """ A `set` of all component names every defined within any frame class in this `TransformGraph`. Broken out of the class so this can be called on a temporary frame set to validate new additions to the transform graph before actually adding them. """ result = set() for frame_cls in frame_set: rep_info = frame_cls._frame_specific_representation_info for mappings in rep_info.values(): for rep_map in mappings: result.update([rep_map.framename]) return result
5,327,706
def generate_keyframe_chunks(animated_rotations, animated_locations, animated_scales, num_frames, chunksize): """ This function has a very high bug potential... """ # These lines create lists of length num_frames with None for frames with no data rotations = populate_frames(num_frames, animated_rotations) locations = populate_frames(num_frames, animated_locations) scales = populate_frames(num_frames, animated_scales) # The above is done so that the frames can be easily chunked by the following three lines: rotations = chunk_frames(rotations, chunksize) locations = chunk_frames(locations, chunksize) scales = chunk_frames(scales, chunksize) # And now we can iterate through the chunks and strip out the None values, and save the results # We also might need to perform some interpolation inside these functions in order to satisfy the requirements of # the DSCS animation format # Also need to isolate the final frame in here for the same reasons rotation_keyframe_chunks_data, rotation_bitvector_data = strip_and_validate_all_bones(rotations, chunksize, slerp) location_keyframe_chunks_data, location_bitvector_data = strip_and_validate_all_bones(locations, chunksize, lerp) scale_keyframe_chunks_data, scale_bitvector_data = strip_and_validate_all_bones(scales, chunksize, lerp) # Now we can bundle all the chunks into a sequential list, ready for turning into KeyframeChunks instances chunk_data = [[{}, {}, {}] for _ in range((num_frames // chunksize) + 1)] for bone_idx, rotation_chunks in rotation_keyframe_chunks_data.items(): for i, rotation_data in enumerate(rotation_chunks): chunk_data[i][0][bone_idx] = rotation_data for bone_idx, location_chunks in location_keyframe_chunks_data.items(): for i, location_data in enumerate(location_chunks): chunk_data[i][1][bone_idx] = location_data for bone_idx, scale_chunks in scale_keyframe_chunks_data.items(): for i, scale_data in enumerate(scale_chunks): chunk_data[i][2][bone_idx] = scale_data # We also need the final elements of each animation final_rotations = {bone_id: [list(data.values())[-1]] for bone_id, data in animated_rotations.items()} final_locations = {bone_id: [list(data.values())[-1]] for bone_id, data in animated_locations.items()} final_scales = {bone_id: [list(data.values())[-1]] for bone_id, data in animated_scales.items()} chunks = [] for chunk_idx, chunk_datum in enumerate(chunk_data[:-1]): r_bitvecs = [rotation_bitvector_data[bone_id][chunk_idx] for bone_id in rotation_bitvector_data] l_bitvecs = [location_bitvector_data[bone_id][chunk_idx] for bone_id in location_bitvector_data] s_bitvecs = [scale_bitvector_data[bone_id][chunk_idx] for bone_id in scale_bitvector_data] chunks.append(ChunkHolder(*chunk_datum, r_bitvecs, l_bitvecs, s_bitvecs, chunksize)) pen_r_bitvecs = [rotation_bitvector_data[bone_id][-1] for bone_id in rotation_bitvector_data] pen_l_bitvecs = [location_bitvector_data[bone_id][-1] for bone_id in location_bitvector_data] pen_s_bitvecs = [scale_bitvector_data[bone_id][-1] for bone_id in scale_bitvector_data] chunks.append(ChunkHolder.init_penultimate_chunk(*chunk_data[-1], pen_r_bitvecs, pen_l_bitvecs, pen_s_bitvecs, len(pen_r_bitvecs[0]))) chunks.append(ChunkHolder(final_rotations, final_locations, final_scales, ['1' for _ in final_rotations], ['1' for _ in final_locations], ['1' for _ in final_scales], 1)) return chunks
5,327,707
def collection_tail(path_string): """Walk the path, return the tail collection""" # pylint: disable=consider-using-enumerate coll = None parts = extract_path(path_string) if parts: try: last_i = len(parts) - 1 coll = bpy.data.collections[parts[0]] for i in range(1, len(parts)): if i != last_i or \ is_path_terminated(path_string) or \ coll.children.get(parts[i]): coll = coll.children[parts[i]] # Collection else: break # Blender Object except KeyError: return None return coll
5,327,708
def create_raw(df_dev, most_likely_values=None): """ return df: | time | dev_1 | .... | dev_n | -------------------------------- | ts1 | 1 | .... | 0 | """ df_dev = df_dev.copy() df = df_dev.pivot(index=TIME, columns=DEVICE, values=VAL) df = df.reset_index() dev_dtypes = _infer_types(df) dev_cat = dev_dtypes['categorical'] dev_bool = dev_dtypes['boolean'] dev_num = dev_dtypes['numerical'] # set the first element for each boolean device to the opposite value of the # first occurrence for dev in dev_bool: fvi = df[dev].first_valid_index() if fvi != 0: value = df[dev].iloc[fvi] df.loc[0, dev] = not value # set the first element of each categorical device to the most likely value if len(dev_cat) != 0: if most_likely_values is None: from pyadlml.dataset.devices import most_prominent_categorical_values tmp = df_dev[df_dev[DEVICE].isin(dev_cat)] most_likely_values = most_prominent_categorical_values(tmp) mlv = most_likely_values.set_index(DEVICE) for dev in dev_cat: new_val = mlv.loc[dev]['ml_state'] df.loc[0,dev] = new_val df_num = df[dev_num] df_cat_bool = df[dev_bool + dev_cat] # fill from start to end NaNs with the preceeding correct value df_cat_bool = df_cat_bool.ffill() df = pd.concat([df[TIME], df_num, df_cat_bool], axis=1) return df
5,327,709
def p1_marker_loc(p1_input, board_list, player1): """Take the location of the marker for Player 1.""" # verify if the input is not in range or in range but in a already taken spot while p1_input not in range(1, 10) or ( p1_input in range(1, 10) and board_list[p1_input] != " " ): try: p1_input = int( input("Player 1: Where would you like to place the marker (1 - 9)? ") ) # if a marker is already placed on that board location, display a message # warning player 1 and ask for their input again if board_list[p1_input] != " ": print( "There is already a marker there, please choose another location." ) input("Press Enter to continue. ") print() # input the player for another location for the marker continue except ValueError: print("This is not a number, please try again!") print() print(f"Player 1 is placing {player1} in position {p1_input}.") # return the variable to reassign it locally on the game_logic() function return p1_input
5,327,710
def send_welcome_ephemeral_message_to_participant( participant_email: str, incident_id: int, db_session: SessionLocal ): """Sends an ephemeral message to the participant.""" # we load the incident instance incident = incident_service.get(db_session=db_session, incident_id=incident_id) # we send the ephemeral message convo_plugin = plugins.get(INCIDENT_PLUGIN_CONVERSATION_SLUG) message_kwargs = { "name": incident.name, "title": incident.title, "status": incident.status, "type": incident.incident_type.name, "type_description": incident.incident_type.description, "priority": incident.incident_priority.name, "priority_description": incident.incident_priority.description, "commander_fullname": incident.commander.name, "commander_weblink": incident.commander.weblink, "document_weblink": incident.incident_document.weblink, "storage_weblink": incident.storage.weblink, "ticket_weblink": incident.ticket.weblink, "conference_weblink": incident.conference.weblink, "conference_challenge": incident.conference.conference_challenge, } faq_doc = document_service.get_incident_faq_document(db_session=db_session) if faq_doc: message_kwargs.update({"faq_weblink": faq_doc.weblink}) conversation_reference = document_service.get_conversation_reference_document( db_session=db_session ) if conversation_reference: message_kwargs.update( {"conversation_commands_reference_document_weblink": conversation_reference.weblink} ) convo_plugin.send_ephemeral( incident.conversation.channel_id, participant_email, "Incident Welcome Message", INCIDENT_PARTICIPANT_WELCOME_MESSAGE, MessageType.incident_participant_welcome, **message_kwargs, ) log.debug(f"Welcome ephemeral message sent to {participant_email}.")
5,327,711
def validateEpirr(jsonObj): """Ensure that IHEC Data Hub metadata matches with EpiRR record.""" print() datasets = jsonObj['datasets'] samples = jsonObj['samples'] for dataset_name in datasets: dataset = datasets[dataset_name] exp_attr = dataset['experiment_attributes'] exp_name = exp_attr['experiment_type'] if isinstance(dataset['sample_id'], list): ds_names = dataset['sample_id'] else: ds_names = [dataset['sample_id']] #If dataset has an EpiRR id, validate that metadata matches if 'reference_registry_id' in exp_attr: epirr_id = exp_attr['reference_registry_id'] logging.getLogger().info('Validating dataset "%s" against EpiRR record "%s"...' % (dataset_name, epirr_id)) try: request = urllib2.Request('http://www.ebi.ac.uk/vg/epirr/view/' + epirr_id, headers={"Accept": "application/json"}) response = urllib2.urlopen(request).read() except urllib2.HTTPError as e: print('Unexpected error: %s' % (e.message)) continue epirr_json = json.loads(response) epirr_sample_metadata = epirr_json['meta_data'] #Validate that each sample that this dataset refers to holds the correct metadata for ds_name in ds_names: hub_sample_metadata = samples[ds_name] logging.getLogger().debug('Validating sample "%s" properties...' % (ds_name)) validateSample(epirr_sample_metadata, hub_sample_metadata, dataset_name) #Case-insensitive check that each experiment that has an EpiRR id is actually registered at EpiRR. raw_data_per_exp = {rd['experiment_type'].lower(): rd for rd in epirr_json['raw_data']} if exp_name.lower() not in raw_data_per_exp: logging.getLogger().error('-Experiment "%s" could not be found in EpiRR record %s.' % (exp_name, epirr_id)) print()
5,327,712
def chord(tones, dur, phrasing="", articulation="", ornamentation="", dynamics="", markup="", markdown="", prefix="", suffix=""): """ Returns a list containing a single Point that prints as a chord with the specified tones and duration. """ tones = flatten([tonify(tones)]) return [Point(tones, dur, phrasing, articulation, ornamentation, dynamics, markup, markdown, prefix, suffix)]
5,327,713
def build_gem_graph(): """Builds a gem graph, F4,1. Ref: http://mathworld.wolfram.com/GemGraph.html""" graph = build_5_cycle_graph() graph.new_edge(1, 3) graph.new_edge(1, 4) return graph
5,327,714
async def get_forecasts_by_user_year_epic( user_id, epic_id, year, month, session: Session = Depends(get_session) ): """Get forecast by user, epic, year, month""" statement = ( select(Forecast.id, Forecast.month, Forecast.year, Forecast.days) .where(Forecast.user_id == user_id) .where(Forecast.epic_id == epic_id) .where(Forecast.year == year) .where(Forecast.month == month) ) results = session.exec(statement).all() return results
5,327,715
def face_xyz_to_uv(face, p): """(face, XYZ) to UV see :cpp:func:`S2::FaceXYZtoUV` """ if face < 3: if p[face] <= 0: return False, 0, 0 else: if p[face - 3] >= 0: return False, 0, 0 u, v = valid_face_xyz_to_uv(face, p) return True, u, v
5,327,716
def cleanupString(string, replacewith="_", regex="([^A-Za-z0-9])"): """Remove all non-numeric or alphanumeric characters""" # Please don't use the logging system here. The logging system # needs this method, using the logging system here would # introduce a circular dependency. Be careful not to call other # functions that use the logging system. return re.sub(regex, replacewith, string)
5,327,717
def send_message(data, header_size=8): """Send data over socket.""" @_retry() def _connect(socket_path): """Connect socket.""" sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(socket_path) sock.settimeout(SOCKET_TIMEOUT) return sock def _check_response(response): if response is None: raise RuntimeError( "No response received when sending message: {}.".format(data) ) if "type_data" not in response: raise ValueError( "Response {} does not contain key 'type_data'.".format(response) ) return response["type_data"] == "OK" try: sock = _connect(str(COMMUNICATOR_SOCKET)) message = json.dumps(data).encode() message_length = len(message).to_bytes(header_size, byteorder="big") sock.sendall(message_length) sock.sendall(message) response = _receive_data(sock) if not _check_response(response): logger.error("Error in respone to %s: %s.", data, response) raise RuntimeError("Wrong response received, terminating processing.") finally: sock.close()
5,327,718
def _get_mapping_keys_in_condition( condition: Expression, column_name: str ) -> Optional[Set[str]]: """ Finds the top level conditions that include filter based on the arrayJoin. This is meant to be used to find the keys the query is filtering the arrayJoin on. We can only apply the arrayFilter optimization to arrayJoin conditions that are not in OR with other columns. To simplify the problem, we only consider those conditions that are included in the first level of the query: [['tagskey' '=' 'a'],['col' '=' 'b'],['col2' '=' 'c']] works [[['tagskey' '=' 'a'], ['col2' '=' 'b']], ['tagskey' '=' 'c']] does not If we encounter an OR condition we return None, which means we cannot safely apply the optimization. Empty set means we did not find any suitable arrayJoin for optimization in this condition but that does not disqualify the whole query in the way the OR condition does. """ keys_found = set() conditions = get_first_level_and_conditions(condition) for c in conditions: if is_binary_condition(c, BooleanFunctions.OR): return None match = FunctionCall( String(ConditionFunctions.EQ), (array_join_pattern(column_name), Literal(Param("key", Any(str)))), ).match(c) if match is not None: keys_found.add(match.string("key")) match = is_in_condition_pattern(array_join_pattern(column_name)).match(c) if match is not None: function = match.expression("tuple") assert isinstance(function, FunctionCallExpr) keys_found |= { lit.value for lit in function.parameters if isinstance(lit, LiteralExpr) and isinstance(lit.value, str) } return keys_found
5,327,719
def make_request(method, url, **kwargs): """Make HTTP request, raising an exception if it fails. """ request_func = getattr(requests, method) response = request_func(url, **kwargs) # raise an exception if request is not successful if not response.status_code == requests.codes.ok: response.raise_for_status() return BeautifulSoup(response.text)
5,327,720
def hold(source): """Place the active call on the source phone on hold""" print("Holding call on {0}".format(source.Name)) return operation(source,'Hold')
5,327,721
def make_grid(batch, grid_height=None, zoom=1, old_buffer=None, border_size=1): """Creates a grid out an image batch. Args: batch: numpy array of shape [batch_size, height, width, n_channels]. The data can either be float in [0, 1] or int in [0, 255]. If the data has only 1 channel it will be converted to a grey 3 channel image. grid_height: optional int, number of rows to have. If not given, it is set so that the output is a square. If -1, then tiling will only be vertical. zoom: optional int, how much to zoom the input. Default is no zoom. old_buffer: Buffer to write grid into if possible. If not set, or if shape doesn't match, we create a new buffer. border_size: int specifying the white spacing between the images. Returns: A numpy array corresponding to the full grid, with 3 channels and values in the [0, 255] range. Raises: ValueError: if the n_channels is not one of [1, 3]. """ batch_size, height, width, n_channels = batch.shape if grid_height is None: n = int(math.ceil(math.sqrt(batch_size))) grid_height = n grid_width = n elif grid_height == -1: grid_height = batch_size grid_width = 1 else: grid_width = int(math.ceil(batch_size/grid_height)) if n_channels == 1: batch = np.tile(batch, (1, 1, 1, 3)) n_channels = 3 if n_channels != 3: raise ValueError('Image batch must have either 1 or 3 channels, but ' 'was {}'.format(n_channels)) # We create the numpy buffer if we don't have an old buffer or if the size has # changed. shape = (height * grid_height + border_size * (grid_height - 1), width * grid_width + border_size * (grid_width - 1), n_channels) if old_buffer is not None and old_buffer.shape == shape: buf = old_buffer else: buf = np.full(shape, 255, dtype=np.uint8) multiplier = 1 if np.issubdtype(batch.dtype, np.integer) else 255 for k in range(batch_size): i = k // grid_width j = k % grid_width arr = batch[k] x, y = i * (height + border_size), j * (width + border_size) buf[x:x + height, y:y + width, :] = np.clip(multiplier * arr, 0, 255).astype(np.uint8) if zoom > 1: buf = buf.repeat(zoom, axis=0).repeat(zoom, axis=1) return buf
5,327,722
def _get_patterns_map(resolver, default_args=None): """ Cribbed from http://www.djangosnippets.org/snippets/1153/ Recursively generates a map of (pattern name or path to view function) -> (view function, default args) """ patterns_map = {} if default_args is None: default_args = {} for pattern in resolver.url_patterns: pattern_args = default_args.copy() if isinstance(pattern, RegexURLResolver): pattern_args.update(pattern.default_kwargs) patterns_map.update(_get_patterns_map(pattern, pattern_args)) else: pattern_args.update(pattern.default_args) if pattern.name is not None: patterns_map[pattern.name] = (pattern.callback, pattern_args) # HACK: Accessing private attribute of RegexURLPattern callback_str = getattr(pattern, '_callback_str', None) if callback_str is not None: patterns_map[pattern._callback_str] = (pattern.callback, pattern_args) return patterns_map
5,327,723
def encoder_package_to_options(encoder_package, post_url=None, extra_numerics=None, extra_categoricals=None, omitted_fields=None): """ :param encoder_package: one hot encoder package :param post_url: url to send form data to on submission default is '' for testing purposes, you may use PUBLIC and it will use "http://httpbin.org/post" which prints the result this is not secure so don't do that with sensitive data :return: """ extra_numerics, extra_categoricals, omitted_fields = process_extras(extra_numerics, extra_categoricals, omitted_fields) if post_url is None: post_url = '' if post_url == 'PUBLIC': post_url = "http://httpbin.org/post" fields = {} numeric_cols = encoder_package['numeric_cols'] + list(extra_numerics.keys()) for field in numeric_cols: if field in omitted_fields: continue fields[field] = { "size": 20 } encoder_dicts = encoder_package['one_hot_encoder_dicts'] for field, value_dicts in encoder_dicts.items(): if field in omitted_fields: continue values = sorted(value_dicts.items(), key=lambda x: x[1]) levels = [v[0] for v in values] n_levels = len(levels) levels = levels + [unknown_level_value] if n_levels < LEVELS_MAX_FOR_DROP_DOWN: fields[field] = { "type": "select", "optionLabels": levels, "sort": False} else: fields[field] = {"size": 20} for field, levels in extra_categoricals.items(): fields[field] = { "type": "select", "optionLabels": levels, "sort": False } options = { "form": { "attributes": { "action": post_url, "method": "post" }, "buttons": { "submit": {} } }, "helper": "Hit submit to update the prediction", "fields": fields} return options
5,327,724
def parse_files(fnames): """Parses all given files for seiscomp xml""" j = 0 out = Catalog() for i, fname in enumerate(fnames): print('read ' + fname) out += readSeisComPEventXML0_6(fname) if (i + 1) % 100 == 0 or i == len(fnames) - 1: out_fname = str(j) + '.xml' print('write %d events to %s\n' % (len(out), out_fname)) out.write(out_fname, 'QUAKEML') out = Catalog() j += 1
5,327,725
def train(model, network_input, network_output): """ Train the Neural Network """ filepath = "weights_"+name+"/weights-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint( filepath, monitor='loss', verbose=0, save_best_only=True, mode='min' ) callbacks_list = [checkpoint] if os.path.exists('weights_'+name+'/') and len(os.listdir('weights_'+name+'/') ) != 0: files = [] for each in os.listdir('weights_'+name+'/'): files.append(os.path.join('weights_'+name+'/',each)) files.sort(key=lambda x:float(x[-11:-5])) model.load_weights(files[0]) print("Weight ", files[0]," Loaded...............") elif not os.path.exists('weights_'+name+'/'): os.mkdir('weights_'+name+'/') model.fit(network_input, network_output, epochs=epoch, batch_size=128, callbacks=callbacks_list)
5,327,726
def get_subset_values(request, pk): """Return the numerical values of a subset as a formatted list.""" values = models.NumericalValue.objects.filter( datapoint__subset__pk=pk).select_related( 'error').select_related('upperbound').order_by( 'qualifier', 'datapoint__pk') total_len = len(values) y_len = total_len # With both x- and y-values, the y-values make up half the list. if values.last().qualifier == models.NumericalValue.SECONDARY: y_len = int(y_len/2) response = [] for i in range(y_len): response.append({'y': values[i].formatted()}) for i in range(y_len, total_len): response[i-y_len]['x'] = values[i].formatted() return JsonResponse(response, safe=False)
5,327,727
def timestamp(format_key: str) -> str: """ 格式化时间 :Args: - format_key: 转化格式方式, STR TYPE. :Usage: timestamp('format_day') """ format_time = { 'default': { 'format_day': '%Y-%m-%d', 'format_now': '%Y-%m-%d-%H_%M_%S', 'unix_now': '%Y-%m-%d %H:%M:%S', } } return time.strftime(format_time['default'][format_key], time.localtime(time.time()))
5,327,728
def _save_conn_form( request: HttpRequest, form: SQLConnectionForm, template_name: str, ) -> JsonResponse: """Save the connection provided in the form. :param request: HTTP request :param form: form object with the collected information :param template_name: To render the response :return: AJAX response """ # Type of event to record if form.instance.id: event_type = Log.SQL_CONNECTION_EDIT is_add = False else: event_type = Log.SQL_CONNECTION_CREATE is_add = True # If it is a POST and it is correct if request.method == 'POST' and form.is_valid(): if not form.has_changed(): return JsonResponse({'html_redirect': None}) conn = form.save() # Log the event Log.objects.register( request.user, event_type, None, { 'name': conn.name, 'description': conn.description_text, 'conn_type': conn.conn_type, 'conn_driver': conn.conn_driver, 'db_user': conn.db_user, 'db_passwd': _('<PROTECTED>') if conn.db_password else '', 'db_host': conn.db_host, 'db_port': conn.db_port, 'db_name': conn.db_name, 'db_table': conn.db_table, }, ) return JsonResponse({'html_redirect': ''}) # Request is a GET return JsonResponse({ 'html_form': render_to_string( template_name, { 'form': form, 'id': form.instance.id, 'add': is_add}, request=request, ), })
5,327,729
def client() -> GivEnergyClient: """Supply a client with a mocked modbus client.""" # side_effects = [{1: 2, 3: 4}, {5: 6, 7: 8}, {9: 10, 11: 12}, {13: 14, 15: 16}, {17: 18, 19: 20}] return GivEnergyClient(host='foo')
5,327,730
def test_bus(test_system): """Create the test system.""" test_system.run_load_flow() return test_system.buses["bus3"]
5,327,731
def add_exptime(inlist, exptime, exptkey='exptime', hext=0, verbose=True): """ Given a list of fits files, adds to each one an EXPTIME header keyword with a value based on the passed exptime parameter. ** NOTE ** The exptime parameter can either be a numerical value, in which case it is interpreted as the value for EXPTIME, or it can be a string in which case it is interpreted as the name of a reference file that contains a valid exposure time keyword (designated by the passed exptkey parameter) that will be copied into the input files. Inputs: inlist - list of fits files to which the keyword will be added exptime - can take one of two forms: (1) the value of the exposure time to add to each file (2) the name of a reference file that contains a valid exposure time in its header. This exposure time (designated by the exptkey keyword in the header) will be copied into the input list. exptkey - if the exptime parameter is the name of a reference file, then this designates the keyword in the reference file header that contains the exposure time value. Default is 'exptime' hext - HDU to modify (default = 0) verbose - Set to True (the default) for some status """ """ Check format of the exptime parameter """ if type(exptime) is str: try: hdr = pf.getheader(exptime) except: print('') print('ERROR: Unable to open fits file %s' % exptime) print('') return try: texp = hdr[exptkey] except: print('ERROR: Unable to read %s keyword in %s fits file' % (exptkey,exptime)) del hdr print('') return elif type(exptime) is float: texp = exptime elif type(exptime) is int: texp = float(exptime) else: print('') print('ERROR: exptime needs to be a number or name of a reference ' 'file') print('') return """ Put the desired exposure time into the input files """ for i in inlist: hdu = pf.open(i,mode='update') hdr = hdu[hext].header hdr.update('exptime',texp) hdu.flush() if verbose: print('Updated %s with EXPTIME=%.2f' % (i,texp))
5,327,732
def mask_target(y_true, bbox_true, mask_true, mask_regress, proposal, assign = cls_assign, sampling_count = 256, positive_ratio = 0.25, mean = [0., 0., 0., 0.], std = [0.1, 0.1, 0.2, 0.2], method = "bilinear"): """ y_true = label #(padded_num_true, 1 or num_class) bbox_true = [[x1, y1, x2, y2], ...] #(padded_num_true, bbox) mask_true = mask #(padded_num_true, h, w) mask_regress = mask regress #(num_proposals, h, w, num_class) proposal = [[x1, y1, x2, y2], ...] #(num_proposals, bbox) mask_true = targeted mask true #(sampling_count, h, w) mask_pred = targeted mask regress #(sampling_count, h, w) """ if tf.keras.backend.ndim(mask_true) == 3: mask_true = tf.expand_dims(mask_true, axis = -1) pred_count = tf.shape(proposal)[0] valid_true_indices = tf.where(tf.reduce_max(tf.cast(0 < bbox_true, tf.int32), axis = -1)) y_true = tf.gather_nd(y_true, valid_true_indices) bbox_true = tf.gather_nd(bbox_true, valid_true_indices) valid_pred_indices = tf.where(tf.reduce_max(tf.cast(0 < proposal, tf.int32), axis = -1)) proposal = tf.gather_nd(proposal, valid_pred_indices) mask_true = tf.gather_nd(mask_true, valid_true_indices) mask_regress = tf.gather_nd(mask_regress, valid_pred_indices) true_indices, positive_indices, negative_indices = assign(bbox_true, proposal) if isinstance(sampling_count, int) and 0 < sampling_count: positive_count = tf.cast(sampling_count * positive_ratio, tf.int32) indices = tf.range(tf.shape(positive_indices)[0]) indices = tf.random.shuffle(indices)[:positive_count] positive_indices = tf.gather(positive_indices, indices) true_indices = tf.gather(true_indices, indices) positive_count = tf.cast(tf.shape(positive_indices)[0], tf.float32) negative_count = tf.cast(1 / positive_ratio * positive_count - positive_count, tf.int32) negative_indices = tf.random.shuffle(negative_indices)[:negative_count] else: sampling_count = pred_count pred_indices = tf.concat([positive_indices, negative_indices], axis = 0) y_true = tf.gather(y_true, true_indices) proposal = tf.gather(proposal, positive_indices) mask_true = tf.gather(mask_true, true_indices) mask_pred = tf.gather(mask_regress, positive_indices) n_class = tf.shape(y_true)[-1] if tf.keras.backend.int_shape(true_indices)[0] != 0: label = tf.cond(tf.equal(n_class, 1), true_fn = lambda: y_true, false_fn = lambda: tf.expand_dims(tf.cast(tf.argmax(y_true, axis = -1), y_true.dtype), axis = -1)) indices = tf.stack([tf.range(tf.shape(label)[0]), tf.cast(label[:, 0], tf.int32)], axis = -1) if mask_true is not None and mask_regress is not None: x1, y1, x2, y2 = tf.split(proposal, 4, axis = -1) mask_bbox = tf.concat([y1, x1, y2, x2], axis = -1) mask_shape = tf.shape(mask_pred) mask_true = tf.image.crop_and_resize(image = tf.cast(mask_true, mask_pred.dtype), boxes = mask_bbox, box_indices = tf.range(0, tf.cast(positive_count, tf.int32)), crop_size = mask_shape[1:3], method = method) mask_true = mask_true[..., 0] mask_true = tf.clip_by_value(tf.round(mask_true), 0., 1.) mask_pred = tf.transpose(mask_pred, [0, 3, 1, 2]) mask_pred = tf.gather_nd(mask_pred, indices) else: mask_pred = mask_pred[..., 0] mask_true = tf.zeros_like(mask_pred, dtype = mask_pred.dtype) negative_count = tf.shape(negative_indices)[0] pad_count = tf.maximum(sampling_count - tf.shape(pred_indices)[0], 0) mask_true = tf.pad(mask_true, [[0, negative_count + pad_count], [0, 0], [0, 0]]) mask_pred = tf.pad(mask_pred, [[0, negative_count + pad_count], [0, 0], [0, 0]]) return mask_true, mask_pred
5,327,733
def display_context(doc): """Create a Jinja context for display""" from rowgenerators.exceptions import DownloadError # Make a naive dictionary conversion context = {s.name.lower(): s.as_dict() for s in doc if s.name.lower() != 'schema'} mandatory_sections = ['documentation', 'contacts'] # Remove section names deletes = [] for k, v in context.items(): try: del v['@value'] except KeyError: pass # Doesn't have the value except TypeError: # Is actually completely empty, and has a scalar value. Delete and re-create deletes.append(k) if isinstance(v, str): # Shouldn't ever happen, but who knows ? deletes.append(k) for d in deletes: try: del context[d] except KeyError: # Fails in TravisCI, no idea why. pass for ms in mandatory_sections: if ms not in context: context[ms] = {} # Load inline documentation inline = '' for d in context.get('documentation', {}).get('documentation', []): try: u = parse_app_url(d['url']) except TypeError: continue if u.target_format == 'md': # The README.md file inline = '' if u.proto == 'file': # File really ought to be relative t = doc.package_url.join_target(u).get_resource().get_target() else: try: t = u.get_resource().get_target() except DownloadError as e: raise e try: with open(t.fspath) as f: inline += f.read() except FileNotFoundError: pass del d['title'] # Will cause it to be ignored in next section # Strip off the leading title, if it exists, because it will be re-applied # by the templates lines = inline.strip().splitlines() if lines and lines[0].startswith('# '): lines = lines[1:] context['inline_doc'] = '\n'.join(lines) # Convert doc section doc_links = {} images = {} for term_name, terms in context['documentation'].items(): if term_name == 'note': context['notes'] = terms elif terms: for i, term in enumerate(terms): try: if term_name == 'image': images[term['title']] = term else: doc_links[term['title']] = term except AttributeError: # A scalar pass # There should not be any scalars in the documentation section except KeyError: pass # ignore entries without titles except TypeError: pass # Also probably a ascalar context['doc_links'] = doc_links context['images'] = images del context['documentation'] # # Update contacts origin = None for term_name, terms in context['contacts'].items(): if isinstance(terms, dict): origin = terms # Origin is a scalar in roort, must be converted to sequence here else: for t in terms: try: t.update(process_contacts_html(t)) except AttributeError: pass # Probably got a scalar if origin: origin.update(process_contacts_html(origin)) context['contacts']['origin'] = [origin] # For resources and references, convert scalars into lists of dicts, which are the # default for Datafiles and References. for section in ('references', 'resources'): if section not in context: context[section] = {} for term_key, term_vals in context[section].items(): if isinstance(term_vals, dict): if '@value' in term_vals: term_vals['url'] = term_vals['@value'] del term_vals['@value'] new_term_vals = [term_vals] elif isinstance(term_vals, list): new_term_vals = None else: new_term_vals = [{'url': term_vals, 'name': term_vals}] if new_term_vals: context[section][term_key] = new_term_vals # Add in other properties to the resources for term in context.get('resources', {}).get('datafile', []): r = doc.resource(term['name']) if r is not None: term['isgeo'] = r.isgeo context['distributions'] = {} for dist in doc.find('Root.Distribution'): context['distributions'][dist.type] = dist.value if doc.find('Root.Giturl'): context['distributions']['source'] = doc.get_value('Root.Giturl') context['schema'] = {} if 'Schema' in doc: for t in doc['Schema'].find('Root.Table'): context['schema'][t.name] = [] for c in t.find('Table.Column'): context['schema'][t.name].append(c.as_dict()) return context
5,327,734
def _get_triplet_mask(labels: torch.Tensor) -> torch.BoolTensor: """Return a 3D mask where mask[a, p, n] is True if the triplet (a, p, n) is valid. A triplet (i, j, k) is valid if: - i, j, k are distinct - labels[i] == labels[j] and labels[i] != labels[k] Args: labels (torch.Tensor): `Tensor` with shape [batch_size] Returns: torch.BoolTensor: `Tensor` with shape [batch_size] """ # Check that i, j and k are distinct indices = torch.logical_not(torch.eye(labels.size(0)).bool()).to(labels.device) i_not_equal_j = indices.unsqueeze(2) i_not_equal_k = indices.unsqueeze(1) j_not_equal_k = indices.unsqueeze(0) distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k label_equal = labels.unsqueeze(0) == labels.unsqueeze(1) i_equal_j = label_equal.unsqueeze(2) i_equal_k = label_equal.unsqueeze(1) valid_labels = ~i_equal_k & i_equal_j return valid_labels & distinct_indices
5,327,735
def test_function_words_removal(): """Test function words are properly deleted from title strings""" key_formatter = KeyFormatter({}) # a list of function words as defined by JabRef # (cf. https://docs.jabref.org/setup/bibtexkeypatterns) function_words_list = [ "a", "an", "the", "above", "about", "across", "against", "along", "among", "around", "at", "before", "behind", "below", "beneath", "beside", "between", "beyond", "by", "down", "during", "except", "for", "from", "in", "inside", "into", "like", "near", "of", "off", "on", "onto", "since", "to", "toward", "through", "under", "until", "up", "upon", "with", "within", "without", "and", "but", "for", "nor", "or", "so", "yet" ] title_string = " ".join(function_words_list) title = key_formatter.remove_function_words(title_string) assert title == "" # check that the match is really case insensitive function_words_upper = [f.upper() for f in function_words_list] title_string = " ".join(function_words_list) title = key_formatter.remove_function_words(title_string) assert title == "" # test for a real title title_string = (r"A climbing image nudged elastic band method for " "finding saddle points and minimum energy paths") title = key_formatter.remove_function_words(title_string) expected_string = (r"climbing image nudged elastic band method " "finding saddle points minimum energy paths") assert title == expected_string
5,327,736
def analyse_registration_output(output_string): """Parse the registration command output and return appropriate error""" parse_error="ERROR:Unable to parse error message:" + output_string success=0 fail=1 status_regex = re.compile("Status\s*:\s*(?P<status>[A-Z]+).*") try: status = status_regex.search(output_string).groupdict()['status'] except: return fail, parse_error if status == "FAILED": return_exit = fail code_regex = re.compile("Result Code\s*:\s*CLI_(?P<code>[0-9]).*") try: code = code_regex.search(output_string).groupdict()['code'] except: return fail, parse_error if code == '0': message = "CLI_0: Authentication error" elif code == '1': message = "CLI_1: Error reading file references from the properties file" elif code == '2': message = "CLI_2: Invalid user input" elif code == '3': message = "CLI_3: No input files to process" elif code == '4': message = "CLI_4: Failed to process collection" elif code == '5': message = "CLI_5: Failed to process data file" else: message = "Unknown error" elif status == "COMPLETED": return_exit = success message = "Successful registration" else: return_exit = fail message = parse_error return return_exit, message
5,327,737
def randomrandrange(x, y=None): """Method randomRandrange. return a randomly selected element from range(start, stop). This is equivalent to choice(range(start, stop)), but doesnt actually build a range object. """ if isinstance(y, NoneType): return random.randrange(x) # nosec else: return random.randrange(x, y)
5,327,738
def difficulties(prefix="data"): """ Helper function that returns a list of template files. """ print("Loading difficulties ...") difficulties = [ ] os.path.walk(os.path.join(prefix, "templ_difficulties/"), processor, difficulties) if (len(difficulties) == 0): die("FATAL: No difficulties to use!") return difficulties
5,327,739
def int_domains(ecoords: np.ndarray, qpos: np.ndarray, qweight: np.ndarray, dshpfnc: Callable): """ Returns the measure (length, area or volume in 1d, 2d and 3d) of several domains. """ nE = ecoords.shape[0] res = np.zeros(nE, dtype=ecoords.dtype) nG = len(qweight) for iG in prange(nG): dshp = dshpfnc(qpos[iG]) for i in prange(nE): jac = ecoords[i].T @ dshp djac = np.linalg.det(jac) res[i] += qweight[iG] * djac return res
5,327,740
def pg_conn(postgresql): """Runs the sqitch plan and loads seed data before returning db connection. """ with postgresql: # Loads data from blogdb fixture data with postgresql.cursor() as cur: cur.execute( """ create table users ( userid serial not null primary key, username varchar(32) not null, firstname varchar(255) not null, lastname varchar(255) not null );""" ) cur.execute( """ create table blogs ( blogid serial not null primary key, userid integer not null references users(userid), title varchar(255) not null, content text not null, published date not null default CURRENT_DATE );""" ) with postgresql.cursor() as cur: with USERS_DATA_PATH.open() as fp: cur.copy_from(fp, "users", sep=",", columns=["username", "firstname", "lastname"]) with BLOGS_DATA_PATH.open() as fp: cur.copy_from( fp, "blogs", sep=",", columns=["userid", "title", "content", "published"] ) return postgresql
5,327,741
def test_list_unsigned_long_min_length_2_nistxml_sv_iv_list_unsigned_long_min_length_3_5(mode, save_output, output_format): """ Type list/unsignedLong is restricted by facet minLength with value 7. """ assert_bindings( schema="nistData/list/unsignedLong/Schema+Instance/NISTSchema-SV-IV-list-unsignedLong-minLength-3.xsd", instance="nistData/list/unsignedLong/Schema+Instance/NISTXML-SV-IV-list-unsignedLong-minLength-3-5.xml", class_name="NistschemaSvIvListUnsignedLongMinLength3", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,327,742
def check_ocsp_response_status(single_response_map, ocsp_response): """ Checks the OCSP response status """ ret = [] for hkey, data in single_response_map.items(): if data['status'] == 'good': ret.append(_process_good_status( hkey, data, ocsp_response)) elif data['status'] == 'revoked': # revoked _process_revoked_status(hkey, data) else: # unknown _process_unknown_status(hkey) if len(ret) != len(single_response_map): raise OperationalError( msg=u"Not all OCSP Response was returned", errno=ER_INVALID_OCSP_RESPONSE, )
5,327,743
def checkParameter(): """ function: check parameter for different action input: NA output: NA """ # check mpprc file path g_opts.mpprcFile = DefaultValue.getMpprcFile() # the value of "-t" can not be "" if g_opts.action == "": GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] % "t" + ".") # check the value of "-t" if g_opts.action in [const.ACTION_SWITCH_PROCESS, const.ACTION_COPY_CERTS] and \ (not g_opts.newClusterAppPath or not g_opts.oldClusterAppPath): GaussLog.exitWithError( ErrorCode.GAUSS_500["GAUSS_50001"] % "-new_cluster_app_path and --old_cluster_app_path") elif g_opts.action in \ [const.ACTION_SYNC_CONFIG, const.ACTION_RESTORE_CONFIG] and not g_opts.newClusterAppPath: GaussLog.exitWithError( ErrorCode.GAUSS_500["GAUSS_50001"] % "-new_cluster_app_path") elif g_opts.action in \ [const.ACTION_SWITCH_BIN, const.ACTION_CLEAN_INSTALL_PATH] and not g_opts.appPath: GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] % "R") # Check the incoming parameter -U if g_opts.user == "": g_opts.user = pwd.getpwuid(os.getuid()).pw_name # Check the incoming parameter -l if g_opts.logFile == "": g_opts.logFile = DefaultValue.getOMLogPath(DefaultValue.LOCAL_LOG_FILE, g_opts.user, "") global g_gausshome g_gausshome = DefaultValue.getInstallDir(g_opts.user) if g_gausshome == "": GaussLog.exitWithError( ErrorCode.GAUSS_518["GAUSS_51800"] % "$GAUSSHOME") g_gausshome = os.path.normpath(g_gausshome)
5,327,744
def partial_with_hound_context(hound, func, *args, **kwargs): """ Retuns a partially bound function Propagates the currently active hound reason (if any) Useful for capturing the current contextual hound reason when queueing a background action """ if hound is not None: reason = hound.get_current_reason() return partial( call_with_context, partial(hound.with_reason, reason), func, *args, _context_callable=True, **kwargs ) return partial( func, *args, **kwargs )
5,327,745
def rssfeed_edit(request, feed, ret_path): """ Eigenschaften des RSS-Feeds aendern """ def save_values(feed, old, new): """ geaenderte Werte des RSS-Feeds speichern """ has_changed = False key = 'title' if old[key] != new[key]: feed.title = encode_html(new[key]) has_changed = True key = 'text' if old[key] != new[key]: feed.description = encode_html(new[key]) has_changed = True key = 'url_more' if old[key] != new[key]: feed.link = new[key] has_changed = True key = 'section' if old[key] != new[key]: feed.general_mode = new[key] has_changed = True if has_changed: feed.last_modified = get_last_modified() feed.save() class DmsItemForm(forms.Form): """ Elemente des Eingabeformulars """ title = forms.CharField(max_length=240, widget=forms.TextInput(attrs={'size':60}) ) text = forms.CharField(max_length=180, widget=forms.TextInput(attrs={'size':60}) ) url_more = forms.CharField(required=False, max_length=200, widget=forms.TextInput(attrs={'size':60}) ) section = forms.ChoiceField(choices=get_global_choices(), widget=forms.RadioSelect() ) data_init = { 'title' : decode_html(feed.title), 'text' : remove_link_icons(feed.description), 'url_more' : feed.link, 'section' : feed.general_mode, } app_name = 'rssfeed' if request.method == 'POST' : data = request.POST.copy() else : data = data_init f = DmsItemForm(data) my_title = _(u'RSS-Feed ändern') tabs = [ ('tab_base', [ 'title', 'text', 'url_more', 'section', ]), ] content = get_tabbed_form(tabs, help_form, app_name, f) if request.method == 'POST' and not f.errors : save_values(feed, data_init, f.data) return HttpResponseRedirect(ret_path) else: path = request.path n_pos = path[:-1].rfind('/') path = path[:n_pos] n_pos = path.rfind('/') path = path[:n_pos+1] item_container = get_item_container(path, '') vars = get_item_vars_edit(request, item_container, app_name, my_title, content, f) return render_to_response ( 'app/base_edit.html', vars )
5,327,746
def disassemble_pretty(self, addr=None, insns=1, arch=None, mode=None): """ Wrapper around disassemble to return disassembled instructions as string. """ ret = "" disas = self.disassemble(addr, insns, arch, mode) for i in disas: ret += "0x%x:\t%s\t%s\n" % (i.address, i.mnemonic, i.op_str) return ret
5,327,747
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False): """ Performs the detection """ custom_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) custom_image = cv2.resize(custom_image, (lib.network_width( net), lib.network_height(net)), interpolation=cv2.INTER_LINEAR) im, arr = array_to_image(custom_image) num = c_int(0) pnum = pointer(num) predict_image(net, im) dets = get_network_boxes( net, image.shape[1], image.shape[0], thresh, hier_thresh, None, 0, pnum, 0) num = pnum[0] if nms: do_nms_sort(dets, num, meta.classes, nms) res = [] if debug: print("about to range") for j in range(num): for i in range(meta.classes): if dets[j].prob[i] > 0: b = dets[j].bbox if altNames is None: nameTag = meta.names[i] else: nameTag = altNames[i] res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h), i)) res = sorted(res, key=lambda x: -x[1]) free_detections(dets, num) return res
5,327,748
def overlay_boxes(image, predictions): """ Adds the predicted boxes on top of the image Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `labels`. """ labels = predictions.get_field("labels") boxes = predictions.bbox colors = compute_colors_for_labels(labels).tolist() for box, color in zip(boxes, colors): box = box.to(torch.int64) top_left, bottom_right = box[:2].tolist(), box[2:].tolist() image = cv2.rectangle( image, tuple(top_left), tuple(bottom_right), tuple(color), 2 ) return image
5,327,749
async def mux_data(dut): """It should multiplex between two data channels""" # Reset dut.reset.value = 1 dut.io_in1_req.value = 0 dut.io_in2_req.value = 0 dut.io_sel_req.value = 0 dut.io_in1_data.value = 42 dut.io_in2_data.value = 84 dut.io_sel_data.value = 0 dut.io_out_ack.value = 0 await Timer(1, "ns") dut.reset.value = 0 await Timer(1, "ns") # Attempt to forward the data on first channel # Must bring both in1.req and sel.req high to signal new data dut.io_in1_req.value = 1 dut.io_sel_req.value = 1 await Edge(dut.io_out_req) assert dut.io_out_data.value == 42 await Timer(1, "ns") dut.io_out_ack.value = 1 await Timer(1, "ns") # Attempt to forward data on the second channel # Must toggle both in2.req and sel.req dut.io_in2_req.value = 1 dut.io_sel_req.value = 0 dut.io_sel_data.value = 1 await Edge(dut.io_out_req) assert dut.io_out_data.value == 84 dut.io_out_ack.value = 0 await Timer(2, "ns") # Drive on the first channel # It shouldn't forward the output-request until both in.req and sel.req have triggered dut.io_in1_req.value = 0 dut.io_in1_data.value = 13 dut.io_sel_data.value = 0 await Timer(5, "ns") assert dut.io_out_req.value == 0 dut.io_sel_req.value = 1 await Edge(dut.io_out_req) assert dut.io_out_data.value == 13 dut.io_out_ack.value = 1 await Timer(2, "ns") # Drive some more on the second channel dut.io_in2_data.value = 10 dut.io_sel_req.value = 0 dut.io_sel_data = 1 await Timer(5, "ns") assert dut.io_out_req.value == 1 dut.io_in2_req.value = 0 await Edge(dut.io_out_req) assert dut.io_out_data == 10
5,327,750
def convert_tilt_convention(iconfig, old_convention, new_convention): """ convert the tilt angles from an old convention to a new convention This should work for both configs with statuses and without """ if new_convention == old_convention: return def _get_tilt_array(data): # This works for both a config with statuses, and without if isinstance(data, dict): return data.get('value') return data def _set_tilt_array(data, val): # This works for both a config with statuses, and without if isinstance(data, dict): data['value'] = val else: data.clear() data.extend(val) old_axes, old_extrinsic = old_convention new_axes, new_extrinsic = new_convention det_keys = iconfig['detectors'].keys() if old_axes is not None and old_extrinsic is not None: # First, convert these to the matrix invariants rme = RotMatEuler(np.zeros(3), old_axes, old_extrinsic) for key in det_keys: tilts = iconfig['detectors'][key]['transform']['tilt'] rme.angles = np.array(_get_tilt_array(tilts)) phi, n = angleAxisOfRotMat(rme.rmat) _set_tilt_array(tilts, (phi * n.flatten()).tolist()) if new_axes is None or new_extrinsic is None: # We are done return # Update to the new mapping rme = RotMatEuler(np.zeros(3), new_axes, new_extrinsic) for key in det_keys: tilts = iconfig['detectors'][key]['transform']['tilt'] tilt = np.array(_get_tilt_array(tilts)) rme.rmat = makeRotMatOfExpMap(tilt) # Use np.ndarray.tolist() to convert back to native python types _set_tilt_array(tilts, np.array(rme.angles).tolist())
5,327,751
def setAllPorts(descriptor, delayed): """ setAllPorts(descriptor: ModuleDescriptor) -> None Traverse descriptor and all of its children/grand-children to add all ports """ addPorts(descriptor.module, delayed) for child in descriptor.children: setAllPorts(child, delayed)
5,327,752
def _type_of_plot(orientation, n_var, i, j): """internal helper function for determining plot type in a corner plot Parameters ---------- orientation : str the orientation options: 'lower left', 'lower right', 'upper left', 'upper right' i, j : int the row, column index Returns ------- plot type : str 'remove' : do not show this plot 'same' : the axes are the same 'compare' : compare the two different axes """ if orientation == "lower left": if j > i: return i, j, "remove" elif j == i: return i, j, "same" else: # j < i return i, j, "compare" elif orientation == "lower right": raise ValueError("not yet supported orientation") # if i + j < n_var - 1: # return i, j, 'remove' # elif i + j == n_var - 1: # return i, j, 'same' # else: # j < i # return i, j, 'compare' elif orientation == "upper left": raise ValueError("not yet supported orientation") # if i + j < n_var - 1: # return i, j, 'compare' # elif i + j == n_var - 1: # return i, j, 'same' # else: # j < i # return i, j, 'remove' elif orientation == "upper right": raise ValueError("not yet supported orientation") # if j < i: # return i, j, 'remove' # elif j == i: # return i, j, 'same' # else: # j < i # return i, j, 'compare' else: raise ValueError("not supported orientation")
5,327,753
def main(gene_files, pos_files, neg_files): """Analize repeat data for genes. Main method that runs the repeat analysis. Parameters ---------- gene_files : List List of file names containing gene information for a given chromosome in JSON format. pos_files : List List of repeats found in the sense strand (+). neg_files : List List of repeats found in the antisense strand (-). Returns ------- None. """ genes_with_repeats = [] for gene_file, pos_file, neg_file in zip(gene_files, pos_files, neg_files): # open gene file genes_dictionary = open_genes(gene_file) print('Gene file {} opened \n'.format(gene_file)) # open repeats files pos_repeats = open_repeat_analysis(pos_file, '+') neg_repeats = open_repeat_analysis(neg_file, '-') print('Repeat files opened: {} {}\n'.format(pos_file, neg_file)) # Analysis of repeats on sense strand pos_gene_list= repeat_analysis(genes_dictionary, pos_repeats) # update counters genes_with_repeats += pos_gene_list # Analysis of repeats on anti-sense strand neg_gene_list = repeat_analysis(genes_dictionary, neg_repeats) # update counters genes_with_repeats += neg_gene_list # Print detailed results for repeats in intron and exons print_info_exon_intron(genes_with_repeats, '_Intron-Exon_results') # print analysis for mRNA print_info_mRNA(genes_with_repeats, '_mRNA_result.txt') # print distance analysis for intron distance_analysis(genes_with_repeats, 'intron', '_Distance_analysis_intron.txt')
5,327,754
def t2_function(t, M_0, T2, p): """Calculate stretched or un-stretched (p=1) exponential T2 curve .. math:: f(t) = M_{0} e^{(-2(t/T_{2})^{p}} Args: t (array): time series M_{0} (float): see equation T_{2} (float): T2 value p (float): see equation Returns: array: T2 curve """ return M_0 * _np.exp(-2.0 * (t / T2) ** p)
5,327,755
def ry(phi): """Returns the rotational matrix for an angle phi around the y-axis """ if type(phi) == np.ndarray: m11 = np.cos(phi) m12 = np.full(len(phi), 0) m13 = np.sin(phi) m22 = np.full(len(phi), 1) m1 = np.stack((m11, m12, m13), axis=0) m2 = np.stack((m12, m22, m12), axis=0) m3 = np.stack((-m13, m12, m11), axis=0) y_rot_mat = np.stack((m1, m2, m3), axis=0) else: y_rot_mat = np.array(([np.cos(phi), 0, np.sin(phi)], [0, 1, 0], [-np.sin(phi), 0, np.cos(phi)])) return y_rot_mat
5,327,756
def hashed_embedding_lookup_sparse(params, sparse_values, dimension, combiner="mean", default_value=None, name=None): """Looks up embeddings of a sparse feature using parameter hashing. See `tf.contrib.layers.hashed_embedding_lookup` for embedding with hashing. Args: params: A `Tensor` or `list` of `Tensors`. Each tensor must be of rank 1 with fully-defined shape. sparse_values: A 2-D `SparseTensor` containing the values to be embedded. Some rows may be empty. dimension: Embedding dimension combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_value: The value to use for an entry with no features. name: An optional name for this op. Returns: Dense tensor with shape [N, dimension] with N the number of rows in sparse_values. Raises: TypeError: If sparse_values is not a SparseTensor. ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}. """ if not isinstance(params, list): params = [params] if not isinstance(sparse_values, ops.SparseTensor): raise TypeError("sparse_values must be SparseTensor") with ops.op_scope(params + [sparse_values], name, "hashed_sparse_embedding_lookup") as scope: # Fill in the empty rows. if default_value is None: # Random default values to reduce the risk of collision. if sparse_values.dtype == dtypes.string: default_value = "6ZxWzWOHxZ" else: default_value = 1288896567 sparse_values, _ = sparse_ops.sparse_fill_empty_rows( sparse_values, default_value) segment_ids = sparse_values.indices[:, 0] if segment_ids.dtype != dtypes.int32: segment_ids = math_ops.cast(segment_ids, dtypes.int32) values = sparse_values.values values, idx = array_ops.unique(values) embeddings = hashed_embedding_lookup(params, values, dimension) if combiner == "sum": embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids, name=scope) elif combiner == "mean": embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids, name=scope) elif combiner == "sqrtn": embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids, name=scope) else: raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.") return embeddings
5,327,757
def DelfFeaturePostProcessing(boxes, descriptors, use_pca, pca_parameters=None): """Extract DELF features from input image. Args: boxes: [N, 4] float array which denotes the selected receptive box. N is the number of final feature points which pass through keypoint selection and NMS steps. descriptors: [N, input_dim] float array. use_pca: Whether to use PCA. pca_parameters: Only used if `use_pca` is True. Dict containing PCA parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening', 'variances'. Returns: locations: [N, 2] float array which denotes the selected keypoint locations. final_descriptors: [N, output_dim] float array with DELF descriptors after normalization and (possibly) PCA/whitening. """ # Get center of descriptor boxes, corresponding to feature locations. locations = CalculateKeypointCenters(boxes) final_descriptors = PostProcessDescriptors(descriptors, use_pca, pca_parameters) return locations, final_descriptors
5,327,758
def render_field(field, **kwargs): """Render a field to a Bootstrap layout.""" renderer_cls = get_field_renderer(**kwargs) return renderer_cls(field, **kwargs).render()
5,327,759
def bisection(a, b, poly, tolerance): """ Assume that poly(a) <= 0 and poly(b) >= 0. Modify a and b so that abs(b-a) < tolerance and poly(b) >= 0 and poly(a) <= 0. Return (a+b)/2 :param a: poly(a) <= 0 :param b: poly(b) >= 0 :param poly: polynomial coefficients, low order first :param tolerance: greater than 0 :return: an approximate root of the polynomial """ if evaluate(a, poly) > 0: raise Exception("poly(a) must be <= 0") if evaluate(b,poly) < 0: raise Exception("poly(b) must be >= 0") mid = (a+b) / 2 if abs(b-a) <= tolerance: return mid else: val = evaluate(mid,poly) if val <= 0: return bisection(mid, b, poly, tolerance) else: return bisection(a, mid, poly, tolerance)
5,327,760
def decrypt_message(key, message): """ returns the decrypted message """ return translate_message(key, message, 'decrypt')
5,327,761
def IOU(a_wh, b_wh): """ Intersection over Union Args: a_wh: (width, height) of box A b_wh: (width, height) of box B Returns float. """ aw, ah = a_wh bw, bh = b_wh I = min(aw, bw) * min(ah, bh) area_a = aw * ah area_b = bw * bh U = area_a + area_b - I return I / U
5,327,762
def decode_image(img_b64): """Decode image from base64. https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ """ img_bytes = base64.b64decode(img_b64) im_arr = np.frombuffer(img_bytes, dtype=np.uint8) img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR) img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) return img
5,327,763
def test_to_yaml_bugfix_419(): """Ensure that GH#419 is fixed""" # pylint: disable=no-self-use class CheckedSchemaModel(pandera.SchemaModel): """Schema with a global check""" a: pat.Series[pat.Int64] b: pat.Series[pat.Int64] @pandera.dataframe_check() def unregistered_check(self, _): """sample unregistered check""" ... with pytest.warns(UserWarning, match=".*registered checks.*"): CheckedSchemaModel.to_yaml()
5,327,764
def date_from_string(date_str, format_str): """ returns a date object by a string """ return datetime.strptime(date_str, format_str).date()
5,327,765
def fix_sensor_name(name): """Cleanup sensor name, returns str.""" name = re.sub(r'^(\w+)-(\w+)-(\w+)', r'\1 (\2 \3)', name, re.IGNORECASE) name = name.title() name = name.replace('Acpi', 'ACPI') name = name.replace('ACPItz', 'ACPI TZ') name = name.replace('Coretemp', 'CoreTemp') name = name.replace('Cpu', 'CPU') name = name.replace('Id ', 'ID ') name = name.replace('Isa ', 'ISA ') name = name.replace('Pci ', 'PCI ') name = name.replace('Smc', 'SMC') name = re.sub(r'(\D+)(\d+)', r'\1 \2', name, re.IGNORECASE) name = re.sub(r'^K (\d+)Temp', r'AMD K\1 Temps', name, re.IGNORECASE) name = re.sub(r'T(ccd\s+\d+|ctl|die)', r'CPU (T\1)', name, re.IGNORECASE) name = re.sub(r'\s+', ' ', name) return name
5,327,766
def handle_request_text(message): """ Obtenha a segunda mensagem, altere o estado do usuário para 2. Retorne a música completa do usuário. """ if get_user_state(message.chat.id) == 1: get_user_state(message.chat.id) bot.send_message( message.chat.id, "Escreva o nome da música!" ) update_user_state(message.chat.id, 2) author_song = message.text update_author_song(author_song, message.chat.id) else: title_song = message.text update_title_song(title_song, message.chat.id) author = get_author_song(message.chat.id) title = get_title_song(message.chat.id) update_user_state(message.chat.id, 0) try: bot.send_message( message.chat.id, get_song_text_from_genius(author, title) ) # Message was too long. Current maximum length is 4096 UTF8 characters except ApiException: bot.send_message( message.chat.id, "A música não está disponível, desculpe." ) render_initial_keyboard(message)
5,327,767
def get_relevant_coordinates(): """Returns a numpy ndarray specifying the pixel a lidar ray hits when shot through the near plane.""" coords_and_angles = np.genfromtxt('coords_and_angles.csv', delimiter=',') return np.hsplit(coords_and_angles,2)
5,327,768
def smart_split(text): """ Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks and escaped quotes will remain escaped (the results can then be further processed with unescape_string_literal()). >> list(smart_split(r'This is "a person\'s" test.')) ['This', 'is', '"a person\\\'s"', 'test.'] >> list(smart_split(r"Another 'person\'s' test.")) ['Another', "'person\\'s'", 'test.'] >> list(smart_split(r'A "\"funky\" style" test.')) ['A', '"\\"funky\\" style"', 'test.'] """ for bit in split_re.finditer(text): yield bit.group(0)
5,327,769
def load_helpers(): """Try to import ``helpers.py`` from each app in INSTALLED_APPS.""" # We want to wait as long as possible to load helpers so there aren't any # weird circular imports with jingo. global _helpers_loaded if _helpers_loaded: return _helpers_loaded = True from jingo import helpers # noqa for app in settings.INSTALLED_APPS: try: app_path = import_module(app).__path__ except AttributeError: continue try: imp.find_module('helpers', app_path) except ImportError: continue import_module('%s.helpers' % app)
5,327,770
def _expand_host_port_user(lst): """ Input: list containing hostnames, (host, port)-tuples or (host, port, user)-tuples. Output: list of (host, port, user)-tuples. """ def expand(v): if isinstance(v, basestring): return (v, None, None) elif len(v) == 1: return (v[0], None, None) elif len(v) == 2: return (v[0], v[1], None) return v return [expand(x) for x in lst]
5,327,771
def noise(line, wl=11): """ Return the noise after smoothing. """ signal = smooth_and_trim(line, window_len=wl) noise = np.sqrt((line - signal) ** 2) return noise
5,327,772
def _evaluate( limit_batches: Optional[int], train_pipeline: TrainPipelineSparseDist, iterator: Iterator[Batch], next_iterator: Iterator[Batch], stage: str, ) -> Tuple[float, float]: """ Evaluates model. Computes and prints metrics including AUROC and Accuracy. Helper function for train_val_test. Args: limit_batches (Optional[int]): number of batches. train_pipeline (TrainPipelineSparseDist): pipelined model. iterator (Iterator[Batch]): Iterator used for val/test batches. next_iterator (Iterator[Batch]): Iterator used for the next phase (either train if there are more epochs to train on or test if all epochs are complete). Used to queue up the next TRAIN_PIPELINE_STAGES - 1 batches before train_val_test switches to the next phase. This is done so that when the next phase starts, the first output train_pipeline generates an output for is the 1st batch for that phase. stage (str): "val" or "test". Returns: Tuple[float, float]: auroc and accuracy result """ model = train_pipeline._model model.eval() device = train_pipeline._device if limit_batches is not None: limit_batches -= TRAIN_PIPELINE_STAGES - 1 # Because TrainPipelineSparseDist buffer batches internally, we load in # TRAIN_PIPELINE_STAGES - 1 batches from the next_iterator into the buffers so that # when train_val_test switches to the next phase, train_pipeline will start # producing results for the TRAIN_PIPELINE_STAGES - 1 buffered batches (as opposed # to the last TRAIN_PIPELINE_STAGES - 1 batches from iterator). combined_iterator = itertools.chain( iterator if limit_batches is None else itertools.islice(iterator, limit_batches), itertools.islice(next_iterator, TRAIN_PIPELINE_STAGES - 1), ) auroc = metrics.AUROC(compute_on_step=False).to(device) accuracy = metrics.Accuracy(compute_on_step=False).to(device) # Infinite iterator instead of while-loop to leverage tqdm progress bar. for _ in tqdm(iter(int, 1), desc=f"Evaluating {stage} set"): try: _loss, logits, labels = train_pipeline.progress(combined_iterator) preds = torch.sigmoid(logits) auroc(preds, labels) accuracy(preds, labels) except StopIteration: break auroc_result = auroc.compute().item() accuracy_result = accuracy.compute().item() if dist.get_rank() == 0: print(f"AUROC over {stage} set: {auroc_result}.") print(f"Accuracy over {stage} set: {accuracy_result}.") return auroc_result, accuracy_result
5,327,773
def superimpose(fname, params): """ function to add wavefronts """ ### TODO: FIX HARDCODING OF FILENAMES ETC. memMap = readMap(params['global'] + params['train'], shape = params['train_shape']) wfr = Wavefront() wfr.load_hdf5(params['indir'] + fname) memMap += wfr.data.arrEhor
5,327,774
def check_pre_release(tag_name): """ Check the given tag to determine if it is a release tag, that is, whether it is of the form rX.Y.Z. Tags that do not match (e.g., because they are suffixed with someting like -beta# or -rc#) are considered pre-release tags. Note that this assumes that the tag name has been validated to ensure that it starts with something like rX.Y.Z and nothing else. """ release_re = re.compile('^r[0-9]+\\.[0-9]+\\.[0-9]+') return False if release_re.match(tag_name) else True
5,327,775
def _get_target_connection_details(target_connection_string): """ Returns a tuple with the raw connection details for the target machine extracted from the connection string provided in the application arguments. It is a specialized parser of that string. :param target_connection_string: the connection string provided in the arguments for the application. :return: A tuple in the form of (user, password, host, port) if a password is present in the connection string or (user, host, port) if a password is not present """ password = None connection_string_format_error = 'Invalid connection string provided. Expected: user[/password]@host[:port]' if '@' not in target_connection_string: raise TypeError(connection_string_format_error) connection_string_parts = target_connection_string.split('@') if len(connection_string_parts) != 2: raise TypeError(connection_string_parts) authentication_part = connection_string_parts[0] target_part = connection_string_parts[1] if '/' in authentication_part: auth_parts = authentication_part.split('/') if len(auth_parts) != 2: raise TypeError(connection_string_format_error) user, password = auth_parts else: user = authentication_part if ':' in target_part: conn_parts = target_part.split(':') if len(conn_parts) != 2: raise TypeError(connection_string_format_error) host, port = conn_parts try: port = int(port) except ValueError: raise TypeError(connection_string_format_error) else: host = target_part port = 22 if not len(user) or not len(host): raise TypeError(connection_string_format_error) if password: return user, password, host, int(port) else: return user, host, int(port)
5,327,776
def download_audio(cache_audio_dir, cache_text_file): """ This method takes the sentences from the text file generated using generate_cache_text() and performs TTS inference on mimic2-api. The wav files and phonemes are stored in 'cache_audio_dir' Args: cache_audio_dir (path): path to store .wav files cache_text_file (file): file containing the sentences """ if os.path.isfile(cache_text_file) and \ os.path.exists(cache_audio_dir): if not os.listdir(cache_audio_dir): session = FuturesSession() with open(cache_text_file, 'r') as fp: all_dialogs = fp.readlines() for each_dialog in all_dialogs: each_dialog = each_dialog.strip() key = str(hashlib.md5( each_dialog.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(cache_audio_dir, key + '.wav') each_dialog = parse.quote(each_dialog) mimic2_url = MIMIC2_URL + each_dialog + '&visimes=True' try: req = session.get(mimic2_url) results = req.result().json() audio = base64.b64decode(results['audio_base64']) vis = results['visimes'] if audio: with open(wav_file, 'wb') as audiofile: audiofile.write(audio) if vis: pho_file = os.path.join(cache_audio_dir, key + ".pho") with open(pho_file, "w") as cachefile: cachefile.write(json.dumps(vis)) # Mimic2 # cachefile.write(str(vis)) # Mimic except Exception as e: # Skip this dialog and continue LOG.error("Unable to get pre-loaded cache " "due to ({})".format(repr(e))) LOG.debug("Completed getting cache for {}".format(TTS)) else: LOG.debug("Pre-loaded cache for {} already exists". format(TTS)) else: missing_path = cache_text_file if not \ os.path.isfile(cache_text_file)\ else cache_audio_dir LOG.error("Path ({}) does not exist for getting the cache" .format(missing_path))
5,327,777
def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up an Arlo IP sensor.""" arlo = hass.data.get(DATA_ARLO) if not arlo: return False sensors = [] for sensor_type in config.get(CONF_MONITORED_CONDITIONS): if sensor_type == 'total_cameras': sensors.append(ArloSensor(hass, SENSOR_TYPES[sensor_type][0], arlo, sensor_type)) else: for camera in arlo.cameras: name = '{0} {1}'.format(SENSOR_TYPES[sensor_type][0], camera.name) sensors.append(ArloSensor(hass, name, camera, sensor_type)) async_add_devices(sensors, True) return True
5,327,778
def tfr_array_multitaper(epoch_data, sfreq, freqs, n_cycles=7.0, zero_mean=True, time_bandwidth=None, use_fft=True, decim=1, output='complex', n_jobs=1, verbose=None): """Compute Time-Frequency Representation (TFR) using DPSS tapers. Same computation as `~mne.time_frequency.tfr_multitaper`, but operates on :class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects. Parameters ---------- epoch_data : array of shape (n_epochs, n_channels, n_times) The epochs. sfreq : float | int Sampling frequency of the data. freqs : array-like of float, shape (n_freqs,) The frequencies. n_cycles : float | array of float Number of cycles in the wavelet. Fixed number or one per frequency. Defaults to 7.0. zero_mean : bool If True, make sure the wavelets have a mean of zero. Defaults to True. time_bandwidth : float If None, will be set to 4.0 (3 tapers). Time x (Full) Bandwidth product. The number of good tapers (low-bias) is chosen automatically based on this to equal floor(time_bandwidth - 1). Defaults to None. use_fft : bool Use the FFT for convolutions or not. Defaults to True. decim : int | slice To reduce memory usage, decimation factor after time-frequency decomposition. Defaults to 1. If `int`, returns tfr[..., ::decim]. If `slice`, returns tfr[..., decim]. .. note:: Decimation may create aliasing artifacts, yet decimation is done after the convolutions. output : str, default 'complex' * 'complex' : single trial complex. * 'power' : single trial power. * 'phase' : single trial phase. * 'avg_power' : average of single trial power. * 'itc' : inter-trial coherence. * 'avg_power_itc' : average of single trial power and inter-trial coherence across trials. %(n_jobs)s The number of epochs to process at the same time. The parallelization is implemented across channels. Defaults to 1. %(verbose)s Returns ------- out : array Time frequency transform of epoch_data. If output is in ['complex', 'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs, n_times), else it is (n_chans, n_freqs, n_times). If output is 'avg_power_itc', the real values code for 'avg_power' and the imaginary values code for the 'itc': out = avg_power + i * itc. See Also -------- mne.time_frequency.tfr_multitaper mne.time_frequency.tfr_morlet mne.time_frequency.tfr_array_morlet mne.time_frequency.tfr_stockwell mne.time_frequency.tfr_array_stockwell Notes ----- .. versionadded:: 0.14.0 """ from .tfr import _compute_tfr return _compute_tfr(epoch_data, freqs, sfreq=sfreq, method='multitaper', n_cycles=n_cycles, zero_mean=zero_mean, time_bandwidth=time_bandwidth, use_fft=use_fft, decim=decim, output=output, n_jobs=n_jobs, verbose=verbose)
5,327,779
def card(id: int): """ Show the selected card data (by id). """ for card in cards["cards"]: if card["id"] == id: logging.info("card") return card logging.info("card") return "Card not found."
5,327,780
def build_norm_layer(cfg, num_features, postfix=""): """ Build normalization layer Args: cfg (dict): cfg should contain: type (str): identify norm layer type. layer args: args needed to instantiate a norm layer. requires_grad (bool): [optional] whether stop gradient updates num_features (int): number of channels from input. postfix (int, str): appended into norm abbreviation to create named layer. Returns: name (str): abbreviation + postfix layer (nn.Module): created norm layer """ norm_cfg = { # format: layer_type: (abbreviation, module) "BN": ("bn", nn.BatchNorm2d), "BN1d": ("bn1d", nn.BatchNorm1d), "GN": ("gn", nn.GroupNorm), } assert isinstance(cfg, dict) and "type" in cfg cfg_ = cfg.copy() layer_type = cfg_.pop("type") if layer_type not in norm_cfg: raise KeyError("Unrecognized norm type {}".format(layer_type)) else: abbr, norm_layer = norm_cfg[layer_type] if norm_layer is None: raise NotImplementedError assert isinstance(postfix, (int, str)) name = abbr + str(postfix) requires_grad = cfg_.pop("requires_grad", True) cfg_.setdefault("eps", 1e-5) if layer_type != "GN": layer = norm_layer(num_features, **cfg_) # if layer_type == 'SyncBN': # layer._specify_ddp_gpu_num(1) else: assert "num_groups" in cfg_ layer = norm_layer(num_channels=num_features, **cfg_) for param in layer.parameters(): param.requires_grad = requires_grad return name, layer
5,327,781
def _add_response_tree ( tree , *args ) : """Specific action to ROOT.TChain """ import ostap.trees.trees from ostap.core.core import Ostap, ROOTCWD from ostap.io.root_file import REOPEN tdir = tree.GetDirectory() with ROOTCWD () , REOPEN ( tdir ) as tfile : tdir.cd() sc = Ostap.TMVA.addResponse ( tree , *args ) if sc.isFailure() : logger.error ( 'Error from Ostap::TMVA::addResponse %s' % sc ) if tfile.IsWritable() : tfile.Write( "" , ROOT.TFile.kOverwrite ) return sc , tdir.Get ( tree.GetName() ) else : logger.warning ( "Can't write TTree back to the file" ) return sc , tree
5,327,782
def data_collection(dataframe): """ Return statistical data of sentences with label, which is 0 for negative and 1 for positive. """ sentences = dataframe['question_text'].values # punctuations punc = dict((key, []) for key in PUNCT_DICT.keys()) # punc_count = dict((key, 0) for key in PUNCT_DICT.keys()) pos = dict((pos, []) for pos in POS_LIST) # pos_count = dict((pos, 0) for pos in POS_LIST) ent = dict((ent, []) for ent in ENT_LIST) data_container = [punc, pos, ent] bar = Bar("Collecting data over sentences", max=len(sentences)) for s in sentences: # punctuations punc_dict, pos_dict, ent_dict = build_count_dict(s) data = [punc_dict, pos_dict, ent_dict] for i in range(len(data)): for key, value in data[i].items(): data_container[i][key].append(value) bar.next() bar.finish() for container in data_container: for key, value in container.items(): dataframe[key] = pd.Series(value, index=dataframe.index) sentiment_df = se.sentence_processing(dataframe)[2] dataframe['sentiment'] = sentiment_df['sentiment'] dataframe['polarity'] = sentiment_df['polarity']
5,327,783
def scalarProd(v,w): """ A sum of 2 vectors in n-space. Params: A 2 tuple point (V) another 2 tuple point (W) returns: Distance of (V,W) """ v = x[0] + x[1] w = y[0] + y[1] return np.array(v*w)
5,327,784
def rename_genbank(file: str, out: str, new_locus_tag_prefix: str, old_locus_tag_prefix: str = None, validate: bool = False): """ Change the locus tags in a GenBank file :param file: input file :param out: output file :param new_locus_tag_prefix: desired locus tag :param old_locus_tag_prefix: locus tag to replace :param validate: if true, perform sanity check """ GenBankFile( file=file ).rename( out=out, new_locus_tag_prefix=new_locus_tag_prefix, old_locus_tag_prefix=old_locus_tag_prefix, validate=validate )
5,327,785
def sample_summary(df, extra_values=None, params=SummaryParams()): """ Returns table showing statistical summary from the sample parameters: mean, std, mode, hpdi. Parameters ------------ df : Panda's dataframe Contains parameter sample values: each column is a parameter. extra_values : Panda's dataframe Additional values to be shown for parameters. Indexes are parameter names, and columns contain additional values to be shown in summary. Returns ------- Panda's dataframe Panda's dataframe containing the summary for all parameters. str text of the summary table """ rows = [] for column in df: values = df[column].to_numpy() mean = df[column].mean() std = df[column].std() mode = get_mode(df[column]) summary_values = [column, mean, std, mode] for i, probability in enumerate(params.hpdis): hpdi_value = hpdi(values, probability=probability) if i == 0: # For the first interval, calculate upper and # lower uncertainties uncert_plus = hpdi_value[1] - mode uncert_minus = mode - hpdi_value[0] summary_values.append(uncert_plus) summary_values.append(uncert_minus) summary_values.append(hpdi_value[0]) summary_values.append(hpdi_value[1]) if extra_values is not None: # Add extra columns summary_values += extra_values.loc[column].values.tolist() rows.append(summary_values) headers = ['Name', 'Mean', 'Std', 'Mode', '+', '-'] for hpdi_percent in params.hpdi_percent(): headers.append(f'{hpdi_percent}CI-') headers.append(f'{hpdi_percent}CI+') if extra_values is not None: headers += extra_values.columns.values.tolist() formats = [".2f"] * len(headers) if 'N_Eff' in headers: formats[headers.index('N_Eff')] = ".0f" table = tabulate(rows, headers=headers, floatfmt=formats, tablefmt="pipe") df_summary = pd.DataFrame(rows, columns=headers, index=df.columns.values) df_summary.drop('Name', axis=1, inplace=True) return df_summary, table
5,327,786
def classFactory(iface): # pylint: disable=invalid-name """Load GNAT class from file GNAT. :param iface: A QGIS interface instance. :type iface: QgsInterface """ # from .gnat import GNAT return GNAT(iface)
5,327,787
def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argument """Determine the password to be used as default for the Postgres connection in `verdi quicksetup` If a value is explicitly passed, that value is returned. If there is no value, the current username in the context will be scanned for in currently existing profiles. If it does, the corresponding password will be used. If no such user already exists, a random password will be generated. :param ctx: click context which should contain the contextual parameters :return: the password """ from aiida.common.hashing import get_random_string if value is not None: return value username = ctx.params['db_username'] config = get_config() for available_profile in config.profiles: if available_profile.storage_config['database_username'] == username: value = available_profile.storage_config['database_password'] break else: value = get_random_string(16) return value
5,327,788
def convert(day_input: List[str]) -> List[List[str]]: """Breaks down the input into a list of directions for each tile""" def dirs(line: str) -> List[str]: dirs, last_c = [], '' for c in line: if c in ['e', 'w']: dirs.append(last_c + c) last_c = '' else: last_c = c return dirs return [dirs(line) for line in day_input]
5,327,789
def assert_allclose( actual: Tuple[numpy.uint8, numpy.uint8, numpy.uint8, numpy.uint8, numpy.uint8], desired: List[numpy.uint8], ): """ usage.matplotlib: 1 """ ...
5,327,790
def read(filepath): """Read file content from provided filepath.""" with codecs.open(filepath, encoding='utf-8') as f: return f.read()
5,327,791
def trend_indicator(trend, style): """Get the trend indicator and corresponding color.""" if trend == 0.00042 or np.isnan(trend): return '?', (0, 0, 0, 0) arrows = ('→', '↗', '↑', '↓', '↘') trend = min(max(trend, -1), 1) # limit the trend trend_color = (1, 0, 0, trend * trend) if (trend > 0) != ("_up" in style) else (0, 1, 0, trend * trend) return arrows[round(trend * 2)], trend_color
5,327,792
async def wait_all_tasks_blocked() -> None: """Wait until all other tasks are waiting for something.""" await _get_asynclib().wait_all_tasks_blocked()
5,327,793
def then_cfb_t(result: dict, note: str): """ check the relative error of the ros """ check_metric('CFB_t', result['fuel_type'], result['python'].cfb_t, result['expected']['cfb'], acceptable_margin_of_error, note)
5,327,794
def space(fn: Callable[[State], T], verbose: bool=False) -> Iterable[T]: """ Return an iterable that generates values from ``fn`` fully exhausting the state space. During iteration, the function ``fn`` is called repeatedly with a :class:`~exhaust.State` instance as only argument. :param fn: The function to generate values from. :param verbose: If True, print the state of the generator. """ return SpaceIterable(fn, verbose=verbose)
5,327,795
def get_menu_as_json(menu): """Build Tree-like JSON structure from the top menu. From the top menu items, its children and its grandchildren. """ top_items = menu.items.filter(parent=None) menu_data = [] for item in top_items: top_item_data = get_menu_item_as_dict(item) top_item_data["child_items"] = [] children = item.children.all() for child in children: child_data = get_menu_item_as_dict(child) grand_children = child.children.all() grand_children_data = [ get_menu_item_as_dict(grand_child) for grand_child in grand_children ] child_data["child_items"] = grand_children_data top_item_data["child_items"].append(child_data) menu_data.append(top_item_data) return json.dumps(menu_data)
5,327,796
def vortex_contribution_normal(panels): """ Builds the vortex contribution matrix for the normal velocity. Parameters ---------- panels: 1D array of Panel objects List of panels. Returns ------- A: 2D Numpy array of floats Vortex contribution matrix. """ A = numpy.empty((panels.size, panels.size), dtype=float) # vortex contribution on a panel from itself numpy.fill_diagonal(A, 0.0) # vortex contribution on a panel from others for i, panel_i in enumerate(panels): for j, panel_j in enumerate(panels): if i != j: A[i, j] = -0.5/numpy.pi*integral(panel_i.xc, panel_i.yc, panel_j, numpy.sin(panel_i.beta), -numpy.cos(panel_i.beta)) print(A) return A
5,327,797
def find_template(raw, name): """Return Template node with given name or None if there is no such template""" e=Expander('', wikidb=DictDB()) todo = [parse(raw, replace_tags=e.replace_tags)] while todo: n = todo.pop() if isinstance(n, basestring): continue if isinstance(n, Template) and isinstance(n[0], basestring): if n[0] == name: return n todo.extend(n)
5,327,798
def Doxygenate(text): """Change commenting style to the one recognized by Doxygen.""" # /* -> /** text = re.sub(r'(/\*)([ \t\n]) ?', r'/**\2', text) # // -> /// text = re.sub(r'(//)([ \t\n]) ?', r'///\2', text) # // Author:-> /** \author */ text = re.sub(r'(//[ ]+Author:?[ ]*)([^\n]+)', r'/** \\author \2 */', text) print text
5,327,799