content
stringlengths
22
815k
id
int64
0
4.91M
def process_docstrings(docstrings): """Process the docstrings into a proper structure""" docs = {} # First we'll find all of the modules and prepare the docs structure for chunk in docstrings: if chunk[2].startswith("==="): # This is a module definition modulename = chunk[CHUNK_SIGN].strip("= ") dbg("process_docstrings: Module: %s at %s:%s" % ( modulename, chunk[CHUNK_FILE], chunk[CHUNK_LINE])) docs[modulename] = {} docs[modulename]["header"] = chunk docs[modulename]["items"] = {} # Now we'll get all of the item definitions for chunk in docstrings: if not chunk[2].startswith("==="): # This is an item definition itemname = find_itemname_from_signature(chunk[CHUNK_SIGN]) dbg("process_docstrings: Found item: %s at %s:%s" % ( itemname, chunk[CHUNK_FILE], chunk[CHUNK_LINE])) modulename = find_module_for_item(list(docs.keys()), itemname) dbg("process_docstrings: Assigning item to module: %s" % modulename) if modulename != None: docs[modulename]["items"][itemname] = chunk return docs
5,333,100
def create_deck(request: HttpRequest): """ Форма для кода колоды + ее отображение """ deck, deckstring_form, deck_save_form = None, None, None title = _('Hearthstone | Decoding the deck code') if request.method == 'POST': if 'deckstring' in request.POST: # код колоды отправлен с формы DeckstringForm deckstring_form = DeckstringForm(request.POST) if deckstring_form.is_valid(): try: deckstring = deckstring_form.cleaned_data['deckstring'] deckstring = get_clean_deckstring(deckstring) with transaction.atomic(): deck = Deck.create_from_deckstring(deckstring) deck_name_init = f'{deck.deck_class}-{deck.pk}' deck_save_form = DeckSaveForm(initial={'string_to_save': deckstring, 'deck_name': deck_name_init}) title = deck except DecodeError as de: msg = _('Error: %(error)s') % {'error': de} deckstring_form.add_error(None, msg) except UnsupportedCards as u: msg = _('%(error)s. The database will be updated shortly.') % {'error': u} deckstring_form.add_error(None, msg) if 'deck_name' in request.POST: # название колоды отправлено с формы DeckSaveForm deck = Deck.create_from_deckstring(request.POST['string_to_save'], named=True) deck.author = request.user.author deck.name = request.POST['deck_name'] deck.save() return redirect(deck) else: deckstring_form = DeckstringForm() context = {'title': title, 'deckstring_form': deckstring_form, 'deck_save_form': deck_save_form, 'deck': deck, 'similar': find_similar_decks(deck)} return render(request, template_name='decks/deck_detail.html', context=context)
5,333,101
def read_completed_flag(uarm, flag_type): """ Read Complete Flag from EEPROM :param uarm: uArm instance :param flag_type: protocol.CALIBRATION_FLAG, protocol.CALIBRATION_LINEAR_FLAG, procotol.CALIBRATION_SERVO_FLAG :return: """ if flag_type == CALIBRATION_FLAG: if uarm.get_rom_data(CALIBRATION_FLAG) == CONFIRM_FLAG: return True else: return False elif flag_type == CALIBRATION_LINEAR_FLAG: if uarm.get_rom_data(CALIBRATION_LINEAR_FLAG) == CONFIRM_FLAG: return True else: return False elif flag_type == CALIBRATION_SERVO_FLAG: if uarm.get_rom_data(CALIBRATION_SERVO_FLAG) == CONFIRM_FLAG: return True else: return False
5,333,102
def update_collections(parameters, session=None): """ update collections. :param parameters: list of dictionary of parameters. :param session: The database session in use. :raises NoObject: If no content is founded. :raises DatabaseException: If there is a database error. """ try: for parameter in parameters: parameter['updated_at'] = datetime.datetime.utcnow() session.bulk_update_mappings(models.Collection, parameters) except sqlalchemy.orm.exc.NoResultFound as error: raise exceptions.NoObject('Collection cannot be found: %s' % (error))
5,333,103
def zpves(output_string): """ Reads the zero-point energies for each of the hindered rotors from MESS output file string. :param output_string: string of lines of MESS output file :type output_string: str :return zpves: zero-point energy for each of the rotors :rtype: list(float) """ # Patterns for the ZPVE of a rotor num_patterns = (app.EXPONENTIAL_FLOAT, app.FLOAT) pattern1 = (app.escape('minimum energy[kcal/mol]') + app.one_or_more(app.SPACE) + '=' + app.one_or_more(app.SPACE) + app.capturing(app.one_of_these(num_patterns))) pattern2 = (app.escape('ground energy [kcal/mol]') + app.one_or_more(app.SPACE) + '=' + app.one_or_more(app.SPACE) + app.capturing(app.one_of_these(num_patterns))) # Obtain each ZPVE from the output string tmp1 = [-float(val) for val in apf.all_captures(pattern1, output_string)] tmp2 = [float(val) for val in apf.all_captures(pattern2, output_string)] tors_zpes = [sum(tmp) for tmp in zip(tmp1, tmp2)] # print('tors_zpes calc test:', tmp1, tmp2, tors_zpes) return tors_zpes
5,333,104
def _get_cast_type(field_type: type, value: Any) -> Optional[Callable]: """Get a casting callable for a field type/value.""" if type(value) is dict: return _get_cast_type_for_dict(field_type) if type(value) is str: return _get_cast_type_for_str(field_type) return None
5,333,105
def read_data(filepath , strict_lang='en'): """Read data in csv format in order to preprocess. Args: filepath (str): a filepath to a csv file with twitter data. strict_lang (str, optional): whether to select only tweets with explicit language metadata.Defaults to 'en'. Returns: a pandas DataFrame: [description] """ data = pd.read_csv(filepath , names=["id", "user", "language", "text", "date", "favs"]) # Apply language selection if specified. if strict_lang != None: data = data.loc[data['language'] == strict_lang] # # drop duplicate tweets. data.drop_duplicates(subset=['text'] , inplace=True) # # Anonymize mentions in tweets mention = re.compile("@\w+") data.text = data.text.str.replace(mention, '@USER') # # Anonymize urls in tweets. data.text = data.text.str.replace(URL, 'URL') return data
5,333,106
async def absent(hub, ctx, name, resource_group, connection_auth=None, **kwargs): """ .. versionadded:: 4.0.0 Ensure the specified disk does not exist in a resource group. :param name: The name of the disk. :param resource_group: The name of the resource group containing the disk. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure disk absent: azurerm.compute.disk.absent: - name: test_disk - resource_group: test_group """ ret = {"name": name, "result": False, "comment": "", "changes": {}} if not isinstance(connection_auth, dict): if ctx["acct"]: connection_auth = ctx["acct"] else: ret[ "comment" ] = "Connection information must be specified via acct or connection_auth dictionary!" return ret disk = await hub.exec.azurerm.compute.disk.get( ctx, name, resource_group, azurerm_log_level="info", **connection_auth ) if "error" in disk: ret["result"] = True ret["comment"] = "Disk {0} was not found.".format(name) return ret if ctx["test"]: ret["comment"] = "Disk {0} would be deleted.".format(name) ret["result"] = None ret["changes"] = { "old": disk, "new": {}, } return ret deleted = await hub.exec.azurerm.compute.disk.delete( ctx, name, resource_group, **connection_auth ) if deleted: ret["result"] = True ret["comment"] = "Disk {0} has been deleted.".format(name) ret["changes"] = {"old": disk, "new": {}} return ret ret["comment"] = "Failed to delete disk {0}!".format(name) return ret
5,333,107
def MakeCommandName(name): """adds '.exe' if on Windows""" if os.name == 'nt': return name + '.exe' return name
5,333,108
def read_meta(path=path): """Correctly reads a .tsv file into a numpy array""" with open(path) as fd: rd = csv.reader(fd, delimiter="\t", quotechar='"') data = [] for idx, row in enumerate(rd): if idx == 0: continue data.append(row) return np.array(data)
5,333,109
def _update_dicts(name_scope, model_layer, input_to_in_layer, model_name_to_output, prev_node_name): """Updates input_to_in_layer, model_name_to_output, and prev_node_name based on the model_layer. Args: name_scope: a string representing a scope name, similar to that of tf.name_scope. model_layer: a dict representing a Keras model configuration. input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. Returns: A tuple of (input_to_in_layer, model_name_to_output, prev_node_name). input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. """ layer_config = model_layer.get('config') if not layer_config.get('layers'): raise ValueError('layer is not a model.') node_name = _scoped_name(name_scope, layer_config.get('name')) input_layers = layer_config.get('input_layers') output_layers = layer_config.get('output_layers') inbound_nodes = model_layer.get('inbound_nodes') is_functional_model = bool(input_layers and output_layers) # In case of [1] and the parent model is functional, current layer # will have the 'inbound_nodes' property. is_parent_functional_model = bool(inbound_nodes) if is_parent_functional_model and is_functional_model: for (input_layer, inbound_node) in zip(input_layers, inbound_nodes): input_layer_name = _scoped_name(node_name, input_layer) inbound_node_name = _scoped_name(name_scope, inbound_node[0]) input_to_in_layer[input_layer_name] = inbound_node_name elif is_parent_functional_model and not is_functional_model: # Sequential model can take only one input. Make sure inbound to the # model is linked to the first layer in the Sequential model. prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0]) elif not is_parent_functional_model and prev_node_name and is_functional_model: assert len(input_layers) == 1, ( 'Cannot have multi-input Functional model when parent model ' 'is not Functional. Number of input layers: %d' % len(input_layer)) input_layer = input_layers[0] input_layer_name = _scoped_name(node_name, input_layer) input_to_in_layer[input_layer_name] = prev_node_name if is_functional_model and output_layers: layers = _norm_to_list_of_layers(output_layers) layer_names = [_scoped_name(node_name, layer[0]) for layer in layers] model_name_to_output[node_name] = layer_names else: last_layer = layer_config.get('layers')[-1] last_layer_name = last_layer.get('config').get('name') output_node = _scoped_name(node_name, last_layer_name) model_name_to_output[node_name] = [output_node] return (input_to_in_layer, model_name_to_output, prev_node_name)
5,333,110
def test_disabling_topology_loadbalancing(): """ Title: Balances traffic correctly when loadbalancer topology elements are disabled. New connections to the VIP should fail when any of the elements are disabled. In the case of pool members, connections should fail when *all* pool members are disabled. In all cases, connections should succeed when the device is re-enabled. Scenario: When: A VM sends TCP packets to a VIP's IP address / port. And: We have 3 backends of equal weight, different devices are disabled. Then: The loadbalancer sends traffic to a backend when the topology is fully enabled (admin state up) and connections fail when elements are disabled. """ sender = BM.get_iface_for_port('bridge-000-003', 1) # For each device in the L4LB topology: # - Disable the device, test hitting VIP fails # - Re-enable the device, test hitting VIP succeeds disable_and_assert_traffic_fails(sender, action_pool_members) enable_and_assert_traffic_succeeds(sender, action_pool_members) disable_and_assert_traffic_fails(sender, action_pool) enable_and_assert_traffic_succeeds(sender, action_pool) # Disabled due to MN-1536 disable_and_assert_traffic_fails(sender, action_vips) enable_and_assert_traffic_succeeds(sender, action_vips) # Disabled due to MN-1536 disable_and_assert_traffic_fails(sender, action_loadbalancer) enable_and_assert_traffic_succeeds(sender, action_loadbalancer)
5,333,111
def geomance_results(session_key): """ Looks in the Redis queue to see if the worker has finished yet. """ rv = DelayedResult(session_key) if rv.return_value is None: return jsonify(ready=False) redis.delete(session_key) result = rv.return_value return jsonify(ready=True, result=result['result'], status=result['status'])
5,333,112
def _return_dataframe_type(dataframe, dataframe_type): """ Helper method for returning te dataframe in spark/pandas/numpy/python, depending on user preferences Args: :dataframe: the spark dataframe to convert :dataframe_type: the type to convert to (spark,pandas,numpy,python) Returns: The dataframe converted to either spark, pandas, numpy or python. """ if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK: return dataframe if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_PANDAS: return dataframe.toPandas() if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY: return np.array(dataframe.collect()) if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_PYTHON: return dataframe.collect()
5,333,113
def split_dataset( raw: pd.DataFrame, train_ratio: float, val_ratio: float, lags: int, verbose: bool = False ) -> Tuple[np.ndarray]: """ Generate and split the prepared dataset for RNN training into training, testing and validation sets. Args: raw: A dataframe containing the prepared dataset returned from prepare_dataset method. train_ratio: A float representing the ratio of the whole dataset to be used as training set. val_ratio: A float denoting the ratio of the whole dataset to be used as validation set. ** Note the sum of train_ratio and val_ratio should between 0 and 1. lags: An integer indicating Returns: return a 6-tuple. Univariate case format: X: (num_samples, lags, 1) y: (num_samples, 1) Tuple format: (X_train, X_val, X_test, y_train, y_test, y_val) """ # ======== Args Check ======== assert isinstance( raw, pd.DataFrame), "Raw dataset should be a pandas dataframe." assert type(train_ratio) in [float, np.float_]\ and 0 < train_ratio <= 1,\ f"train_ratio should be a float within range (0,1], received: {train_ratio}" assert type(val_ratio) in [float, np.float_]\ and 0 < val_ratio <= 1,\ f"val_ratio should be a float within range (0,1], received: {val_ratio}" assert type(lags) in [int, np.int_]\ and lags >= 1,\ f"lags should be an integer at least 1, received: {lags}" # ======== Core ======== test_ratio = 1 - train_ratio - val_ratio df = normalize( raw, train_ratio ) X_raw, y_raw = gen_supervised_sequence( df, lags, df.columns[0], sequential_label=False) (X_train, X_test, y_train, y_test) = train_test_split( X_raw, y_raw, test_size=test_ratio, shuffle=False) (X_train, X_val, y_train, y_val) = train_test_split( X_train, y_train, test_size=val_ratio / (val_ratio + train_ratio), shuffle=False ) def trans(x): return x.reshape(-1, 1) y_train = trans(y_train) y_test = trans(y_test) y_val = trans(y_val) if verbose: print( f"Training and testing set generated,\ \nX_train shape: {X_train.shape}\ \ny_train shape: {y_train.shape}\ \nX_test shape: {X_test.shape}\ \ny_test shape: {y_test.shape}\ \nX_validation shape: {X_val.shape}\ \ny_validation shape: {y_val.shape}") return ( X_train, X_val, X_test, y_train, y_val, y_test )
5,333,114
def main(): """Main""" parser = _argparse.ArgumentParser( formatter_class=_argparse.RawTextHelpFormatter, epilog=_COMMANDS_HELP_STR) parser.add_argument( "-V", "--version", action='version', version=_VERSION_STR) parser.add_argument('-p', '--port', required=True, help="serial port") parser.add_argument( '-d', '--debug', default=0, action='count', help='set debug level') parser.add_argument( '-v', '--verbose', default=0, action='count', help='verbose output') parser.add_argument( "-e", "--exclude-dir", type=str, action='append', help='exclude dir, ' 'by default are excluded directories: __pycache__, .git, .svn') parser.add_argument('commands', nargs='*', help='commands') args = parser.parse_args() log = SimpleColorLogger(args.debug + 1) try: conn = _mpytool.ConnSerial( port=args.port, baudrate=115200, log=log) except _mpytool.ConnError as err: log.error(err) return mpy_tool = MpyTool( conn, log=log, verbose=args.verbose, exclude_dirs=args.exclude_dir) mpy_tool.process_commands(args.commands)
5,333,115
def test_n_ary_crossover_bad_crossover_points(): """ Test assertions for having more crossover points than genome length """ pop = [Individual([0, 0]), Individual([1, 1])] i = ops.naive_cyclic_selection(pop) with pytest.raises(RuntimeError): new_pop = list(itertools.islice(ops.n_ary_crossover(i, num_points=3), 2))
5,333,116
def test_contributor_affiliations_invalid( running_app, minimal_record_with_contributor ): """Should fail on invalid id's and invalid structure.""" minimal_record = minimal_record_with_contributor # The id "invalid" does not exists. minimal_record["metadata"]["contributors"][0]["affiliations"] = ( [{"id": "invalid"}] ) pytest.raises(InvalidRelationValue, RDMDraft.create(minimal_record).commit) # Not a list of objects minimal_record["metadata"]["contributors"][0]["affiliations"] = ( {"id": "cern"} ) pytest.raises(ValidationError, RDMDraft.create, minimal_record) # no additional keys are allowed minimal_record["metadata"]["contributors"][0]["affiliations"] = ( [{"test": "cern"}] ) pytest.raises(ValidationError, RDMDraft.create, minimal_record) # non-string types are not allowed as id values minimal_record["metadata"]["contributors"][0]["affiliations"] = [{"id": 1}] pytest.raises(ValidationError, RDMDraft.create, minimal_record) # No duplicates minimal_record["metadata"]["contributors"][0]["affiliations"] = ( [{"id": "cern"}, {"id": "cern"}] ) pytest.raises(ValidationError, RDMDraft.create, minimal_record)
5,333,117
def test_platform(): """Run windows only tests""" build() test_setup() sh("%s psutil\\tests\\test_windows.py" % PYTHON)
5,333,118
def CompileRes(target, src, opts): """Compiles a Windows .rc file into a .res file.""" ipath = GetListOption(opts, "DIR:") if (COMPILER == "MSVC"): cmd = "rc" cmd += " /Fo" + BracketNameWithQuotes(target) for x in ipath: cmd += " /I" + x for (opt,dir) in INCDIRECTORIES: if (opt=="ALWAYS") or (opt in opts): cmd += " /I" + BracketNameWithQuotes(dir) for (opt,var,val) in DEFSYMBOLS: if (opt=="ALWAYS") or (opt in opts): cmd += " /D" + var + "=" + val cmd += " " + BracketNameWithQuotes(src) else: cmd = "windres" for x in ipath: cmd += " -I" + x for (opt,dir) in INCDIRECTORIES: if (opt=="ALWAYS") or (opt in opts): cmd += " -I" + BracketNameWithQuotes(dir) for (opt,var,val) in DEFSYMBOLS: if (opt=="ALWAYS") or (opt in opts): cmd += " -D" + var + "=" + val cmd += " -i " + BracketNameWithQuotes(src) cmd += " -o " + BracketNameWithQuotes(target) oscmd(cmd)
5,333,119
def create_ranking_model() -> tf.keras.Model: """Create ranking model using Functional API.""" context_keras_inputs, example_keras_inputs, mask = create_keras_inputs() context_features, example_features = preprocess_keras_inputs( context_keras_inputs, example_keras_inputs, mask) (flattened_context_features, flattened_example_features) = tfr.keras.layers.FlattenList()( inputs=(context_features, example_features, mask)) # Concatenate flattened context and example features along `list_size` dim. context_input = [ tf.keras.layers.Flatten()(flattened_context_features[name]) for name in sorted(flattened_context_features) ] example_input = [ tf.keras.layers.Flatten()(flattened_example_features[name]) for name in sorted(flattened_example_features) ] input_layer = tf.concat(context_input + example_input, 1) # User can create a custom scoring logic as a sequence of layers. dnn = tf.keras.Sequential() # Input batch normalization. if FLAGS.use_batch_norm: dnn.add( tf.keras.layers.BatchNormalization( momentum=FLAGS.batch_normalization_moment)) for layer_size in FLAGS.hidden_layer_dims: dnn.add(tf.keras.layers.Dense(units=layer_size)) if FLAGS.use_batch_norm: dnn.add( tf.keras.layers.BatchNormalization( momentum=FLAGS.batch_normalization_moment)) dnn.add(tf.keras.layers.Activation(activation=tf.nn.relu)) dnn.add(tf.keras.layers.Dropout(rate=FLAGS.dropout_rate)) dnn.add(tf.keras.layers.Dense(units=1)) logits = tfr.keras.layers.RestoreList()(inputs=(dnn(input_layer), mask)) return tf.keras.Model( inputs=dict( list(context_keras_inputs.items()) + list(example_keras_inputs.items()) + [(_MASK, mask)]), outputs=logits, name="din_ranking_model")
5,333,120
def to_count_matrix(pair_counts, vocab_size): """ transforms the counts into a sparse matrix """ cols = [] rows = [] data = [] for k, v in pair_counts.items(): rows.append(k[0]) cols.append(k[1]) data.append(v) # setting to float is important, +1 for UNK # COO matrix is the fastest for constructing the matrix since we have all # the data already count_matrix = coo_matrix( (data, (rows, cols)), shape=(vocab_size + 1, vocab_size + 1), dtype=np.float32 ) # CSR matrices support more arithmetic operations and are more efficient return count_matrix.tocsr()
5,333,121
def create_dataset(genres_types: List[str], sp: spotipy.Spotify, genius: Genius, limit: Optional[int] = 50, how_many_in_genre: Optional[int] = 2_000, sleep_time: Optional[int] = 30, path_to_save: Optional[str] = None, verbose: Optional[bool] = True, save_progess: Optional[bool] = True) -> pd.DataFrame: """ Function scrapes fixed number of songs by genre from Spotify API and their lyrics from Genius API. Parameters ---------- genres_types (list): list of selected genres to scrape. sp (spotipy.Spotify): connector to Spotify API. It should be defined with credentials specified. genius (Genius) : connector to Genius API. It should be defined with credentials specified. limit (int) : How many observations get with single request. The max is 50. Needs to be smaller than how_many_in_genre and limit % how_many_in_genre == 0 how_many_in_genre (int) : how many observations should be in single genre class. Then, the whole dataset has len(genre_types) * how_many_in_genre observations. sleep_time (int) : seconds of function sleep after each spotipy request. Set this up to avoid too many request error path_to_save (str) : path to save resulted data frame verbose (bool) : if true then it will print progress of our function save_progess (bool) : if ture then after each spotipy request the current result will be saved Returns ------- df (pd.DataFrame) : scraped dataframe with author, title, popularity, genre and lyrics columns """ # iterator needed when verbose is true i_sample = 0 max_sample = len(genres_types) * how_many_in_genre # Define empty lists to locate scraped data artist_names = [] track_names = [] popularities = [] genres = [] lyrics = [] # If verbose then print progress if verbose: print(f"Number of scraped samples: {i_sample}/{max_sample}") for genre in genres_types: # offset is the start index of API results # maximum limit of request is 50 observations so we need to change offset in each iteration for offset in range(0, how_many_in_genre, limit): t1 = time() # Scraped data from spotify track_results = sp.search(q=f'genre:"{genre}"', type='track', limit=limit, offset=offset) # We will iterate over our results for i, t in enumerate(track_results['tracks']['items']): # Extract artist name artist_name = t['artists'][0]['name'] # Extract title of song track_name = t['name'] # Save results artist_names.append(artist_name) track_names.append(track_name) popularities.append(t['popularity']) genres.append(genre) # We use try/except because genius has limited timeout to single request # and raise error when there is timeout # If this situation occurs wie will need to rescrape certain lyrics try: # Searching for lyrics to scraped song text = genius.search_song(track_name, artist_name).lyrics except: text = f"Error in {artist_name} - {track_name}" lyrics.append(text) # After each iteration the function will sleep to avoid too many request error sleep(sleep_time) # Monitoring progess i_sample += limit if verbose: t2 = time() print(f"Number of scraped samples: {i_sample}/{max_sample}. Time: {(t2 - t1) / 60:.2f} min") # We can save result after each request if save_progess: df = pd.DataFrame({'artist_name': artist_names, 'track_name': track_names, 'popularity': popularities, "genre": genres, "lyrics": lyrics}) if path_to_save: df.to_csv(path_to_save) # Creating data frame df = pd.DataFrame({'artist_name': artist_names, 'track_name': track_names, 'popularity': popularities, "genre": genres, "lyrics": lyrics}) # Shuffle pandas df rows, reset index and drop duplicates df = df.sample(frac=1, random_state=7).reset_index(drop=True).drop_duplicates() # Save to specified path if path_to_save: df.to_csv(path_to_save) return df
5,333,122
def extract(config): """Extract zip files.""" try: cfg = config.config except AttributeError: click.echo(MSGNOCFG) return pth = os.path.abspath(cfg['path']) click.echo(f'Extracting files at {pth}') extract_files(**cfg)
5,333,123
def get_enum_type_definition(ua_graph: UAGraph, data_type_id: int): """Given a UAGraph object and a internal id of an enum type, the definition of the enumeration will be produced. The form of the definition may vary based on the enumeration. Args: ua_graph (UAGraph): UAGraph where the enum defintion is found data_type_id (int): The internal id for data type enum Return: The content of the 'Values' column in the node definition """ enum_type_row = ua_graph.nodes[ua_graph.nodes["id"] == data_type_id] enum_type_id = enum_type_row["id"].values[0] # The enum type points to the enum definition via HasProperty ReferenceType. # The enum type does not contain the enum definition itself, it points to # an EnumStrings or EnumValues enum_neighbors = ua_graph._get_neighboring_nodes_by_id(enum_type_id, "outgoing") # Ensuring we are getting the HasProperty reference enum_neighbors_has_property = enum_neighbors[ enum_neighbors["ReferenceType"] == "HasProperty" ] # Getting the node which actually defines the enum enum_definition_id = enum_neighbors_has_property["Trg"].values[0] enum_definition_node = ua_graph.nodes[ua_graph.nodes["id"] == enum_definition_id] enum_ua_list_of = enum_definition_node["Value"].values[0] return enum_ua_list_of
5,333,124
def create_classification_of_diseases(): """ :param qtty: number of objects to create """ fake = Factory.create() return ClassificationOfDiseases.objects.create( code=randint(1, 1000), description=fake.text(), abbreviated_description=fake.text(max_nb_chars=100), parent=None )
5,333,125
def is_path_parent(possible_parent, *paths): """ Return True if a path is the parent of another, False otherwise. Multiple paths to test can be specified in which case all specified test paths must be under the parent in order to return True. """ def abs_path_trailing(pth): pth_abs = os.path.abspath(pth) if not pth_abs.endswith(os.sep): pth_abs += os.sep return pth_abs possible_parent_abs = abs_path_trailing(possible_parent) if not paths: return False for path in paths: path_abs = abs_path_trailing(path) if not path_abs.startswith(possible_parent_abs): return False return True
5,333,126
def create_measurement(coordinate, timestamp, velocity, altitude, user_id): #TODO: Rename to create_location_measurement """ Creates location entry. """ new_loc = ecwl.Location( ts = timestamp, latitude = coordinate[0], longitude = coordinate[1], sensed_speed = velocity, accuracy = 0, bearing = 0, filter = 'distance', fmt_time = arrow.get(timestamp).to('UTC').format(), #This should not be neseceary. TODO: Figure out how we can avoind this. loc = gj.Point( (coordinate[1], coordinate[0]) ), local_dt = ecwld.LocalDate.get_local_date(timestamp, 'UTC'), altitude = altitude ) entry = ecwe.Entry.create_entry(user_id,"background/filtered_location", new_loc, create_id=True) #This field ('type') is required by the server when we push the entry to the user cache # so we add it here. Also we just chose an abritrary formater. In the future we might want to # create a fromater group called fake user. entry['metadata']['type'] = 'sensor-data' entry['metadata']['platform'] = 'android' #entry['data']['bearing'] = 0 return entry
5,333,127
def cmpversion(a, b): """Compare versions the way chrome does.""" def split_version(v): """Get major/minor of version.""" if '.' in v: return v.split('.', 1) if '_' in v: return v.split('_', 1) return (v, '0') a_maj, a_min = split_version(a) b_maj, b_min = split_version(b) if a_maj == b_maj: return cmpversion(a_min, b_min) return int(a_maj) > int(b_maj)
5,333,128
def migrate_work_dir_structure_v2(server_id: str) -> None: """Function to reorganize working directory data to be sorted by server_id. This assumes no migration has previously run Args: server_id: server id to move user dirs into """ __migrate_repository_data(server_id) __migrate_sensitive_file_data(server_id) __migrate_dataset_file_cache_data(server_id)
5,333,129
def get_dict_json(attr_name: str, possible_dict: dict) -> dict: """ returns a {key : item}. If the item is a np.ndarray then it is converted to a list Parameters ---------- attr_name possible_dict Returns ------- """ if type(possible_dict[0]) == np.ndarray: output_tail = dict_ndarray_to_list(possible_dict) return {attr_name: output_tail} else: return {attr_name: possible_dict}
5,333,130
def eulers_totient_phi(num): """ Euler's totient (a.k.a. phi) function, φ(n). Count the number of positive integers less than or equal to "n" that are relatively prime (coprimes) to "n". Coprimes: if the only positive integer that evenly divides two numbers is 1. This is the same thing as their greatest common divisor is 1. https://secure.wikimedia.org/wikipedia/en/wiki/Totient_function """ dpd = set(prime_divisors(num)) # distinct_prime_divisors phi = num for p in dpd: phi *= (1 - (1.0 / float(p))) return phi
5,333,131
def create_nav_btn(soup,date,text): """ Helper functions for month_calendar, generates a navigation button for calendar :param soup: BeautifulSoup parser of document :param date: Date to create nav button :param text: Text for button """ nav_th = soup.new_tag('th',attrs=[('colspan','2')]) nav_th['class'] = 'month' nav_a = soup.new_tag('a',href='/apps/orders/%s/%s' % (date.year, date.month)) nav_a.string = text if date > datetime.today(): nav_a['class'] = "btn btn-mini btn-info disabled" nav_a['href'] = '#' else: nav_a['class'] = "btn btn-mini btn-info" nav_th.insert(0,nav_a) return nav_th
5,333,132
def parse_history_event(directive_result): """Based on the type of event, parse the JSON.serializable portion of the event.""" event_type = directive_result.event_type if event_type is None: raise ValueError("EventType is not found in task object") # We provide the ability to deserialize custom objects, because the output of this # will be passed directly to the orchestrator as the output of some activity if event_type == HistoryEventType.SUB_ORCHESTRATION_INSTANCE_COMPLETED: return json.loads(directive_result.Result, object_hook=_deserialize_custom_object) if event_type == HistoryEventType.TASK_COMPLETED: return json.loads(directive_result.Result, object_hook=_deserialize_custom_object) if event_type == HistoryEventType.EVENT_RAISED: # TODO: Investigate why the payload is in "Input" instead of "Result" return json.loads(directive_result.Input, object_hook=_deserialize_custom_object) return None
5,333,133
def _remove_irrelevant_subelement(base, tag): """If element with a given tag has no children, remove it. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) Returns: The node element with the tag. """ # Check if the tag already excists elem = _get_first_child(base, tag) if elem is None: return if len(elem) == 0: base.remove(elem)
5,333,134
def runQuery(cred, structuredQuery): """ Run the structured query defined according to the Firestore rest API docs, see NetFlowPingApp getItems() function as an example @param structuredQuery: query the firestore database according to the structure defined by... https://firebase.google.com/docs/firestore/reference/rest/v1/StructuredQuery """ url = cred.base_url + "documents:runQuery" makeRequest(cred, url, 'POST', structuredQuery)
5,333,135
def pad_with_dots(msg, length=PAD_TEXT): """Pad text with dots up to given length. >>> pad_with_dots("Hello world", 20) 'Hello world ........' >>> pad_with_dots("Exceeding length", 10) 'Exceeding length' """ msg_length = len(msg) if msg_length >= length: return msg msg = msg + " " for i in range(msg_length+1, length): msg += "." return msg
5,333,136
def get_top_10_features(target_params, results, importance_type="weight"): """Gets the top 10 features of each XGBoost regressor. Parameters ---------- target_params: dictionary Should contain a dict with with params for each target label. results : dictionary Should contain a dict with the regression results. importance_type: string The score type that should be retrieved by. Either weight, gain or cover. Default weight. """ return_dict = {} for target in target_params: xg_reg = results[target[0]][2] ordered_feature = {k: v for k, v in sorted(xg_reg.get_booster().get_score(importance_type=importance_type).items(), key=lambda item: item[1], reverse=True)} return_dict[target[0]] = dict(itertools.islice(ordered_feature.items(), 10)) return return_dict, pd.DataFrame.from_dict(return_dict, orient='index')
5,333,137
def nlj2csv(infile, outfile, header, skip_failures, quoting, json_lib): """ Convert newline JSON dictionaries to a CSV. Defaults to reading from `stdin` and writing to `stdout`. CSV fields are derived from the first record and `null` values are converted to empty CSV fields. """ with nlj.open(infile, json_lib=json_lib) as src: # Get the field names from the first record first = next(src) writer = csv.DictWriter(outfile, first.keys(), quoting=quoting, escapechar='\\') if header: writer.writerow(dict((fld, fld) for fld in writer.fieldnames)) for record in chain([first], src): try: writer.writerow( dict((k, _nlj_rec_to_csv_rec(v)) for k, v in six.iteritems(record))) except Exception: if not skip_failures: raise
5,333,138
def index(): """ Class check list view """ new_class_check_form = NewClassCheckForm() upload_csv_form = UploadCSVForm() classes = ClassCheck.query.all() return render_template('index.html', new_class_check_form=new_class_check_form, upload_csv_form=upload_csv_form, classes=classes)
5,333,139
def test_use_proactoreventloop(monkeypatch): """Test that ProActorEventLoop is created""" class StopTest(Exception): pass monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles) monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader', FakeSourceFileLoader) def fake_set_event_loop(l): nonlocal loopval loopval = l raise StopTest def fake_processoptions(c, s): raise RuntimeError monkeypatch.setattr(asyncio, 'set_event_loop', fake_set_event_loop) monkeypatch.setattr(cli, 'process_options', fake_processoptions) taskfile = 'a_0.py' config = defaultdict(dict) state = Namespace() args = [cli.PROGNAME, '-d', '1s', '--no-progressbar', taskfile] loopval = None runloop = cli.RunLoop() with pytest.raises(StopTest): runloop.init(config, args, state) assert loopval == 42
5,333,140
def autoupdate(request): """ Execute auto-update if the request contains the correct `X-Hub-Signature`, depending on the shared secret key. See https://developer.github.com/webhooks/creating/. """ github_api_token, github_webhook_secret = _read_secrets() _check_signature(github_webhook_secret, request) request_json = request.get_json() try: trigger_name = request_json["repository"]["name"] print(f"Update triggered by repository '{trigger_name}'") except KeyError: print(f"Failed to get source repository name from request: {request_json}", file=sys.stderr) try: update_git_submodules(token=github_api_token) except ConnectionError as e: print(f"{e}", file=sys.stderr) abort(HTTPStatus.SERVICE_UNAVAILABLE) except (PermissionError, RuntimeError) as e: print(f"{e}", file=sys.stderr) abort(HTTPStatus.INTERNAL_SERVER_ERROR) return "ok"
5,333,141
def sync_directories( sourcedir: str, targetdir: str, action: str = 'sync', *, twoway: Optional[bool] = False, purge: Optional[bool] = False, verbose: Optional[bool] = True ) -> None: """ Parameters ---------- sourcedir : str The source directory for syncing. targetdir : str The target directory for syncing. action : str = 'sync' The syncing action. Options: diff, sync, update. twoway : bool = False Update files from sourcedir to targetdir (False) or both (True). purge : bool = False Delete files from targetdir. verbose : bool = True Provide verbose output. Example ------- >>> local_docs = 'string_to_directory' >>> sharepoint_docs = 'string_to_mapped_drive_of_sharepoint' >>> ds.sync_directories( >>> sourcedir=local_docs, >>> targetdir=sharepoint_docs, >>> action='sync', >>> twoway=False, >>> purge=False, >>> verbose=True >>> ) """ sync( sourcedir=sourcedir, targetdir=targetdir, action=action, twoway=twoway, purge=purge, verbose=verbose )
5,333,142
async def test_hmip_water_detector(hass, default_mock_hap): """Test HomematicipWaterDetector.""" entity_id = "binary_sensor.wassersensor" entity_name = "Wassersensor" device_model = "HmIP-SWD" ha_state, hmip_device = get_and_check_entity_basics( hass, default_mock_hap, entity_id, entity_name, device_model ) assert ha_state.state == STATE_OFF await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True) await async_manipulate_test_data(hass, hmip_device, "moistureDetected", False) ha_state = hass.states.get(entity_id) assert ha_state.state == STATE_ON await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True) await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True) ha_state = hass.states.get(entity_id) assert ha_state.state == STATE_ON await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", False) await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True) ha_state = hass.states.get(entity_id) assert ha_state.state == STATE_ON await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", False) await async_manipulate_test_data(hass, hmip_device, "moistureDetected", False) ha_state = hass.states.get(entity_id) assert ha_state.state == STATE_OFF
5,333,143
def fuse_gelu_tanh_approximation(prog): """ Identify the pattern that corresponds to the tanh approximate version of gelu, and replace it with a single gelu layer with mode=TANH_APPROXIMATION y = ( tanh((.0447)x^3 + x ) * (sqrt(2/pi)) + 1 ) * 0.5 * x [...] -----> pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) ----> mul (0.5) -----> mul ---> [...] | ^ ^ | | | |------------------------------------------------------------------------------------------------------------------------ """ for f_name, f in prog.functions.items(): block_changed = True while block_changed: block_changed = fuse_gelu_tanh_block(f)
5,333,144
def main(args): """Main function called when run from command line or as part of pipeline.""" usage = """ Usage: split_by_taxa.py --genomes-a=FILE file with genome GenBank Project ID and Organism name on each line for taxon A --genomes-b=FILE file with genome GenBank Project ID and Organism name on each line for taxon B --orthologs-zip=FILE archive of aligned & trimmed single copy orthologous (SICO) genes --taxon-a-zip=FILE destination file path for archive of SICO genes belonging to taxon A --taxon-b-zip=FILE destination file path for archive of SICO genes belonging to taxon B """ options = ['genomes-a', 'genomes-b', 'orthologs-zip', 'taxon-a-zip', 'taxon-b-zip'] genome_a_ids_file, genome_b_ids_file, orthologs_zip, taxon_a_zip, taxon_b_zip = parse_options(usage, options, args) # Parse file containing RefSeq project IDs to extract RefSeq project IDs with open(genome_a_ids_file) as read_handle: lines = [line.split('\t') for line in read_handle] genome_ids_a = [line[0] for line in lines] common_prefix_a = _common_prefix([line[1] for line in lines], 'taxon_a') with open(genome_b_ids_file) as read_handle: lines = [line.split('\t') for line in read_handle] genome_ids_b = [line[0] for line in lines] common_prefix_b = _common_prefix([line[1] for line in lines], 'taxon_b') # Create run_dir to hold files related to this run run_dir = tempfile.mkdtemp(prefix='split_by_taxa_') # Extract files from zip archive ortholog_files = extract_archive_of_files(orthologs_zip, create_directory('alignments', inside_dir=run_dir)) # Actually split alignments per taxon taxon_a_files, taxon_b_files = split_alignment_by_taxa(run_dir, ortholog_files, (genome_ids_a, common_prefix_a), (genome_ids_b, common_prefix_b)) # Write the produced files to command line argument filenames create_archive_of_files(taxon_a_zip, taxon_a_files) create_archive_of_files(taxon_b_zip, taxon_b_files) # Remove unused files to free disk space shutil.rmtree(run_dir) # Exit after a comforting log message log.info("Produced: \n%s\n%s", taxon_a_zip, taxon_b_zip) return taxon_a_zip, taxon_b_zip
5,333,145
def countSkipped(line, specs, skipCount): """ Keep track of the number of skipped ligands, and the criteria that got them rejected """ # If the word 'Skipping' is found, if "Skipping" in line: skipCount += 1 endLine = line.split(",")[1].strip() endll = endLine.split() # Special case for LogP, the line is not written the same way if endll[0] == "LogP": currSpec = endll[0] currVal = float(endll[1]) #print endll[0] else: currSpec = ''.join(endll[0:2]) currVal = int(endll[-3].rstrip(".")) #print ''.join(endll[0:2]) # If the key did not exist before, create it (not seen before spec) keys = specs.keys() if currSpec not in keys: specs[currSpec] = [currVal, currVal, 0] # If it had already been created else: # Increment its count by one specs[currSpec][2] += 1 # And update the MIN, MAX values if specs[currSpec][0] > currVal: specs[currSpec][0] = currVal if specs[currSpec][1] < currVal: specs[currSpec][1] = currVal # This is to pring the full line, for debugging #print endLine return specs, skipCount
5,333,146
def nrmse(y_true, y_pred, MEAN_OF_DATA): """ Calculates the normalized root mean square error of y_true and y_pred where MEAN_OF_DATA is the mean of y_pred. """ y_true = y_true.squeeze() y_pred = y_pred.squeeze() std = np.sum(np.square(y_true - MEAN_OF_DATA)) errors = np.sum(np.square(y_true - y_pred)) return np.sqrt(errors / (std + 1e-8))
5,333,147
def field_references( model_tuple, field, reference_model_tuple, reference_field_name=None, reference_field=None, ): """ Return either False or a FieldReference if `field` references provided context. False positives can be returned if `reference_field_name` is provided without `reference_field` because of the introspection limitation it incurs. This should not be an issue when this function is used to determine whether or not an optimization can take place. """ remote_field = field.remote_field if not remote_field: return False references_to = None references_through = None if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple: to_fields = getattr(field, 'to_fields', None) if ( reference_field_name is None or # Unspecified to_field(s). to_fields is None or # Reference to primary key. (None in to_fields and (reference_field is None or reference_field.primary_key)) or # Reference to field. reference_field_name in to_fields ): references_to = (remote_field, to_fields) through = getattr(remote_field, 'through', None) if through and resolve_relation(through, *model_tuple) == reference_model_tuple: through_fields = remote_field.through_fields if ( reference_field_name is None or # Unspecified through_fields. through_fields is None or # Reference to field. reference_field_name in through_fields ): references_through = (remote_field, through_fields) if not (references_to or references_through): return False return FieldReference(references_to, references_through)
5,333,148
def spsa(u, J, c, a, conv_u, conv_J, max_steps=np.inf, alpha=0.602, A=1., gamma=0.101, m=0., verbose=False): """ Uses simulataneous perturbation stochastic approximation [1]_ [2]_ to minimize a function `J` with respect to the vector `u`. Notation follows the Wikipedia_ page on the topic at the time of writing. At each step we perturb our loss function, :math:`J`, in a random direction :math:`c_n \Delta_n`, and look at the first-order approximation to the gradient in that direction. :math:`c_n` is then allowed to shrink each step. It's rather like finite difference, but we probe all dimensions simultaneously, so we need only *two* evaluations of :math:`J` instead of the number of dimensions evaluations. Pretty nifty. Parameter defaults are taken from Spall's 1998 IEEE paper_ [3]_. .. note:: The proof of convergence sketched on Wikipedia_ requires :math:`J` to be thrice differentiable, so this might not converge for your problem. .. [1] Spall, IEEE Transactions on Automatic Control 37 (1992) .. [2] Maryak, Chin, IEEE Transactions on Automatic Control 53 (2008) .. [3] Spall, IEEE Transactions on Aerospace and Electronic Systems 24 (1998) .. _Wikipedia: https://en.wikipedia.org/wiki/Simultaneous_perturbation_stochastic_approximation .. _paper: http://www.jhuapl.edu/SPSA/PDF-SPSA/Spall_Implementation_of_the_Simultaneous.PDF Args: u (np.ndarray): Initial guess for the vector that minimizes the function J. J (function): The function to minimize. Must take `u` as an argument and return a scalar. c (float): Initial magnitude of the probing step in `u`-space. a (float): Initial magnitude of the gradient scalar. Should be > `c` > 0. conv_u (float): Maximum distance of consecutive best-`J` `u`'s to be considered converged. conv_J (float): Maximum `J` difference between best-`J`'s to be considered converged. max_steps (int): Maximum number of gradient steps to take. (Default is unlimited.) alpha (float): Power at which `a` decays with iteration, i.e. :math:`a_n = a_0 / (A + n)^\\gamma.` (Default is 0.602) A (float): Offset for `a`-decay. (Default is 1.) gamma (float): Power at which c decays with iteration, i.e. :math:`c_n = c_0 / n^\\gamma.` (Default is 0.101.) m (float): Maximum momentum factor, i.e. fraction of last change to add to new change. Ramps on. (Default is 0.) verbose (bool): Whether to print the fit. Returns: 2-element tuple containing - (*np.ndarray*) -- Best found solution in `u`-space. - (*int*) -- Number of iterations. """ dims = len(u) n = 1 a0 = a # TODO: a0 in a similar manner as optimal step size for gradient descent? (Sam's idea) c0 = c last_du = 0. if verbose: print ("SPSA step 0 u =" + str(u)) u_best = u J_best = J(u) u_best_change = np.inf J_best_change = np.inf if A > 0: a = a0 / A ** alpha a1 = a while ((u_best_change > conv_u) or (J_best_change > conv_J)) and (n < max_steps): delta = rademacher(dims, size=c) Jp = J(u + delta) Jm = J(u - delta) dJ = Jp - Jm gu = dJ / (2 * delta) force = np.linalg.norm(gu) du = -a * gu + m * (1 - (a / a1)) * last_du # Ramp momentum on as we progress and a/a0 shrinks. u += du a = a0 / (A + n) ** alpha c = c0 / float(n) ** gamma losses = [Jp, Jm] loss_vecs = [u + delta, u - delta] lower_id = np.argmin(losses) new_loss = losses[lower_id] new_vec = loss_vecs[lower_id] if new_loss < J_best: u_best_change = np.linalg.norm(new_vec - u_best) J_best_change = abs(J_best - new_loss) J_best = new_loss u_best = new_vec if verbose: print("Step " + str(n) + ": best J, u, J, F, dub, dJb = " + ", ".join([str(J_best), str(new_vec), str(new_loss), str(force), str(u_best_change), str(J_best_change)])) last_du = du n += 1 if verbose: print ("Best loss" + str(J_best) + " at " + str(u_best)) return u_best, n
5,333,149
def friendlist_embed(friendlist, guild): """ :param friendlist: The friendlist of the source guild :param guild: Soruce guild :return: Embedded friendlist """ embed = Embed() embed.set_author( name=guild.name, icon_url=guild.icon_url ) friends_field = '\n'.join( [f'**{name}: {snowflake}**' for name, snowflake in friendlist.items()] ) embed.add_field( name='Friends:', value=friends_field, inline=False ) return embed
5,333,150
def helicsFederateSetFlagOption(fed: HelicsFederate, flag: int, value: bool): """ Set a flag for the federate. **Parameters** - **`fed`** - The federate to alter a flag for. - **`flag`** - The flag to change. - **`value`** - The new value of the flag. 0 for false, !=0 for true. """ f = loadSym("helicsFederateSetFlagOption") err = helicsErrorInitialize() f(fed.handle, flag, value, err) if err.error_code != 0: raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
5,333,151
def find_contours(img): """ Find all contours in the image """ _, thresh = cv2.threshold(img,127,255,0) _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours
5,333,152
def _split_binline(binline): """Internal function to read the line of bin specs Returns bin width and array of bin lower edges """ bin_strings = tabmatch.split(binline.strip()) # Grab the first float from each binspec, we'll return lower edges # Note that commas must be stripped from numbers... try: bins = numpy.fromiter( (floatmatch.findall(x)[0].replace(",", "") for x in bin_strings), dtype=numpy.float64, ) except IndexError: bins = numpy.asarray(bin_strings) return bins
5,333,153
def parse_name(content): """ Finds the name of the man page. """ # Create regular expression name_regex = re.compile(r"^([\w\.-]*)") # Get name of manual page just_name = name_regex.search(content) name_str = "" if just_name is not None: name_str = just_name.group(1) return name_str
5,333,154
def albanian_input_normal(field, text): """ Prepare a string from one of the query fields for subsequent processing: replace common shortcuts with valid Albanian characters. """ if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'): return text text = text.replace('ё', 'ë') text = text.replace('e:', 'ë') return text
5,333,155
def test_download_message_positive(mocker, request, requests_mock, client): """ Given: - GUID of existing message to download When: - Running download message commandd Then: - Ensure file name - Ensure file content """ mocker.patch.object(demisto, 'uniqueFile', return_value="test_file_result") mocker.patch.object(demisto, 'investigation', return_value={'id': '1'}) file_name = "1_test_file_result" def cleanup(): try: os.remove(file_name) except OSError: pass request.addfinalizer(cleanup) guid = 'guid' args = { 'guid': guid } api_response = open('./test_data/download_message_response').read().encode('utf8') requests_mock.get(SERVER_URL + '/quarantine?' + urlencode(args), content=api_response) result = download_message(client=client, args=args) assert result['File'] == guid + '.eml' with open(file_name, 'rb') as f: assert f.read() == api_response
5,333,156
def extract_share_id_from_url(public_base_url: str) -> str: """ Extracts the Airtable share id from the provided URL. :param public_base_url: The URL where the share id must be extracted from. :raises ValueError: If the provided URL doesn't match the publicly shared Airtable URL. :return: The extracted share id. """ result = re.search(r"https:\/\/airtable.com\/shr(.*)$", public_base_url) if not result: raise ValueError( f"Please provide a valid shared Airtable URL (e.g. " f"https://airtable.com/shrxxxxxxxxxxxxxx)" ) return f"shr{result.group(1)}"
5,333,157
def load_data(file_path): """ 读取地名文件,解析出外文和中文的字符总数(去重后),做成字符和索引映射表。 加工地名数据,首尾增加开始和结束标记。 :param file_path: 文件路径 :return: 字符和索引映射表, 地名列表 """ df = pd.read_table(file_path) df.columns = ['source', 'chinese'] # 获取外文和中文字符数组 characters_source = sorted(list(set(df.source.unique().sum()))) characters_chinese = sorted(list(set(df.chinese.unique().sum()))) # 添加的开头结尾符号 special_characters = [PADDING_TOKEN, START_TOKEN, END_TOKEN] token_to_idx_source = dict([(char, i) for i, char in enumerate(special_characters + characters_source)]) token_to_idx_zh = dict([(char, i) for i, char in enumerate(special_characters + characters_chinese)]) idx_to_token_source = dict([(i, char) for i, char in enumerate(special_characters + characters_source)]) idx_to_token_zh = dict([(i, char) for i, char in enumerate(special_characters + characters_chinese)]) # 给地名添加开始和结束符 df['source'] = df['source'].apply(lambda x: START_TOKEN + preprocess_dm_source(x) + END_TOKEN) df['chinese'] = df['chinese'].apply(lambda x: START_TOKEN + preprocess_dm_chinese(x) + END_TOKEN) # 获取地名数组,1维 dm_text_source = df.source.values.tolist() dm_text_chinese = df.chinese.values.tolist() return (dm_text_source, dm_text_chinese), (token_to_idx_source, token_to_idx_zh), (idx_to_token_source, idx_to_token_zh)
5,333,158
def train_step(opt_g, opt_h, g, h, X, Y, u, train_loss_tracker, rec_loss_tracker, lin_loss_tracker, pred_loss_tracker): """Takes one training step with the Koopman loss. Args: opt_g (tf.keras.optimizers.Optimizer): Optimizer for encoder network. opt_h (tf.keras.optimizers.Optimizer): Optimizer for decoder network. g (Net): Encoder network. h (Net): Decoder network. X (tf.Tensor): First data matrix. Y (tf.Tensor): Second data matrix. u (tf.Tensor): Input signal. train_loss_tracker (tf.keras.metrics.Mean): Tracks the total training loss over time. rec_loss_tracker (tf.keras.metrics.Mean): Tracks the reconstruction loss over time. lin_loss_tracker (tf.keras.metrics.Mean): Tracks the linearity loss over time. pred_loss_tracker (tf.keras.metrics.Mean): Tracks the prediction loss over time. """ # Compute grad w.r.t. `g` with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape: tape.watch(g.variables) tape.watch(h.variables) loss, loss_rec, loss_lin, loss_pred = koopman_loss(g, h, X, Y, u) grads_g = tape.gradient(loss, g.trainable_variables) grads_h = tape.gradient(loss, h.trainable_variables) del tape # Update weights opt_g.apply_gradients(zip(grads_g, g.trainable_variables)) opt_h.apply_gradients(zip(grads_h, h.trainable_variables)) train_loss_tracker(loss) rec_loss_tracker(loss_rec) lin_loss_tracker(loss_lin) pred_loss_tracker(loss_pred)
5,333,159
def verify_portchannel_member(dut, portchannel, members, flag='add', cli_type=""): """ This API is used to verify the members of portchannel Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com) :param dut: :param dut: :param portchannel: :param members: :param flag: :return: """ st.log("Verifying port channel members ...", dut=dut) portchannel_members = get_portchannel_members(dut, portchannel, cli_type=cli_type) if flag == 'add': if not portchannel_members: st.error("ERROR in port channel members") return False for member in utils.make_list(members): if member not in portchannel_members: return False return True elif flag == 'del': for member in utils.make_list(members): if member in portchannel_members: return False return True
5,333,160
def test_progress_ready(request, app): """Test progress API when spawner is already started e.g. a race between requesting progress and progress already being complete """ db = app.db name = 'saga' app_user = add_user(db, app=app, name=name) r = yield api_request(app, 'users', name, 'server', method='post') r.raise_for_status() r = yield api_request(app, 'users', name, 'server/progress', stream=True) r.raise_for_status() request.addfinalizer(r.close) assert r.headers['content-type'] == 'text/event-stream' ex = async_requests.executor line_iter = iter(r.iter_lines(decode_unicode=True)) evt = yield ex.submit(next_event, line_iter) assert evt['progress'] == 100 assert evt['ready'] assert evt['url'] == app_user.url
5,333,161
def test_compute_volumes(): """Check _compute_volumes for several masses.""" estimators = [AverageKLPE(k=3, novelty=True), MaxKLPE(k=3, novelty=True), OCSVM(sigma=1.), IsolationForest(n_estimators=5, random_state=2), KernelSmoothing()] alphas = rng.randint(1, 100, size=5) / 100 alphas = np.sort(alphas) for clf in estimators: clf = clf.fit(X_train) clf_test = clf.score_samples(X_test) min_test = np.min(clf_test) max_test = np.max(clf_test) score_function = clf.score_samples vols, offsets = _compute_volumes(score_function, alphas, X_test, U, vol_tot_cube) # check increasing order of volumes and decreasing order of offsets assert_array_equal(vols, np.sort(vols)) assert_array_equal(offsets, -np.sort(-offsets)) # check volumes in [0, vol_tot_cube] assert np.all(0 <= vols) and np.all(vols <= vol_tot_cube) # check offset values assert np.all(min_test <= offsets) and np.all(offsets <= max_test) proba_offsets_pos = (clf_test >= offsets[:, np.newaxis]) # this test requires to have a large number of samples because # np.percentile is an empirical quantile which uses interpolation. # this is also why we ask the values to be equal only up to the # second decimal. assert_array_almost_equal(np.mean(proba_offsets_pos, axis=1), alphas, decimal=2)
5,333,162
def adjust_learning_rate(optimizer, epoch, args): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" if args.lr_policy == 'decay': lr = args.lr * (args.lr_decay ** epoch) elif args.lr_policy == 'poly': interval = len([x for x in args.lr_custom_step if epoch >= x]) epoch = epoch if interval == 0 else epoch - args.lr_custom_step[interval-1] if interval == 0: step = args.lr_custom_step[0] elif interval >= len(args.lr_custom_step): step = args.epochs - args.lr_custom_step[interval-1] else: step = args.lr_custom_step[interval] - args.lr_custom_step[interval-1] lr = args.eta_min + (args.lr - args.eta_min) * (1 - epoch * 1.0 /step)** args.lr_decay elif args.lr_policy == 'fix': lr = args.lr elif args.lr_policy == 'fix_step': lr = args.lr * (args.lr_decay ** (epoch // args.lr_fix_step)) elif args.lr_policy in ['custom_step', 'sgdr_step']: interval = len([x for x in args.lr_custom_step if epoch >= x]) lr = args.lr *(args.lr_decay ** interval) else: return None if optimizer is not None and args.lr_policy != 'sgdr_step': for param_group in optimizer.param_groups: if param_group.get('lr_constant', None) is not None: continue param_group['lr'] = lr return lr
5,333,163
def refresh_well_known_oidc(realm): """ Refresh Open ID Connect .well-known :param django_keycloak.models.Realm realm: :rtype django_keycloak.models.Realm """ server_url = realm.server.internal_url or realm.server.url # While fetching the well_known we should not use the prepared URL openid_api_client = KeycloakRealm( server_url=server_url, realm_name=realm.name ).open_id_connect(client_id='', client_secret='') realm.well_known_oidc = openid_api_client.well_known.contents realm.save(update_fields=['_well_known_oidc']) return realm
5,333,164
def get_access_token(jwt_token: str) -> str: """ Gets an access token, used for fully-authenticated app actions """ installations = requests.get( "https://api.github.com/app/installations", headers=GH_JWT_HEADER(jwt_token) ) response = requests.post( installations.json()[0]["access_tokens_url"], headers=GH_JWT_HEADER(jwt_token) ) return response.json()["token"]
5,333,165
def m_create_identities(): """Create identities in the Trust Framework hierarchy.""" # Retrieve the Alastria account for node "ala", using the password from deployment (not for production) print(f"\n==> Retrieve Alastria account") Alastria_account = wallet.account( "Alastria", "ThePassword") alakey = Alastria_account.key print(f"Done") ################################ # IGP Lechazo print(f"\n==> Creating the IGP Lechazo identity") DID = "did:elsi:VATES-Q0901252G" parent_node = "ala" this_node = "igplechazodecastillayleon" website = "https://igplechazodecastillayleon.es" commercial_name = "IGP Lechazo - C.R. Lechazo de Castilla y Leon" didDoc = tf.create_DID(DID, parent_node, this_node, website, commercial_name, Alastria_account) if didDoc is not None: pprint(didDoc, indent=3) ################################ # Valdecuevas print(f"\n==> Creating the Valdecuevas identity") DID = "did:elsi:VATES-B47027982" parent_node = "ala" this_node = "valdecuevas" website = "https://www.valdecuevas.es" commercial_name = "GRUPO VALDECUEVAS AGRO, SLU" didDoc = tf.create_DID(DID, parent_node, this_node, website, commercial_name, Alastria_account) if didDoc is not None: pprint(didDoc) ################################ # Entrepinares print(f"\n==> Creating the Entrepinares identity") DID = "did:elsi:VATES-A47037296" parent_node = "ala" this_node = "entrepinares" website = "https://www.entrepinares.es" commercial_name = "QUESERIAS ENTREPINARES S.A.U." didDoc = tf.create_DID(DID, parent_node, this_node, website, commercial_name, Alastria_account) if didDoc is not None: pprint(didDoc) ################################ # Don Saturnino print(f"\n==> Creating the Don Saturnino identity") DID = "did:elsi:VATES-B37469509" parent_node = "ala" this_node = "donsaturnino" website = "http://donsaturnino.es" commercial_name = "INCAHER-DON SATURNINO S.L" didDoc = tf.create_DID(DID, parent_node, this_node, website, commercial_name, Alastria_account) if didDoc is not None: pprint(didDoc) ################################ # DENOMINACIÓN DE ORIGEN LEÓN print(f"\n==> Creating the DENOMINACIÓN DE ORIGEN LEÓN identity") DID = "did:elsi:VATES-Q2400564G" parent_node = "ala" this_node = "doleon" website = "https://www.doleon.es/" commercial_name = "DENOMINACION DE ORIGEN LEON" didDoc = tf.create_DID(DID, parent_node, this_node, website, commercial_name, Alastria_account) if didDoc is not None: pprint(didDoc)
5,333,166
def donation_process_subscription_deleted(event): """ :param event: :return: """ donation_manager = DonationManager() data = event['data'] subscription = data['object'] subscription_ended_at = subscription['ended_at'] subscription_canceled_at = subscription['canceled_at'] customer_id = subscription['customer'] subscription_id = subscription['id'] # At this time we are only supporting the UI for canceling subscriptions if subscription_canceled_at is not None or subscription_ended_at is not None: donation_manager.mark_subscription_canceled_or_ended(subscription_id, customer_id, subscription_ended_at, subscription_canceled_at) return None
5,333,167
def get_most_probable_strand(filenames, tolerance, sample_name): """Return most propable strand given 3 feature count files (strand of 0,1, and 2) Return the total counts by strand from featureCount matrix folder, strandness and probable strand for a single sample (using a tolerance threshold for strandness). This assumes a single sample by featureCounts file. :param filenames: a list of 3 feature counts files for a given sample corresponding to the strand 0,1,2 :param tolerance: a value below 0.5 :param sample: the name of the sample corresponding to the list in filenames Possible values returned are: * 0: unstranded * 1: stranded * 2: eversely stranded We compute the number of counts in case 1 and 2 and compute the ratio strand as :math:`RS = stranded / (stranded + reversely stranded )`. Then we decide on the possible strandness with the following criteria: * if RS < tolerance, reversely stranded * if RS in 0.5+-tolerance: unstranded. * if RS > 1-tolerance, stranded * otherwise, we cannot decided. """ fc_files = [Path(x) for x in filenames] res_dict = {} for f in fc_files: strand = str(f.parent)[-1] # Feature counts may have extra columns (not just a Series), # the count is the last columns though. So, # FeatureCounts(f).df[df.columns[-1]] is a time series df = FeatureCount(f).df df = df[df.columns[-1]] res_dict[strand] = int(df.sum()) strandness = res_dict["1"] / (res_dict["1"] + res_dict["2"]) res_dict["strandness"] = strandness if strandness < tolerance: res_dict["strand"] = 2 elif strandness > 1 - tolerance: res_dict["strand"] = 1 elif 0.5 - tolerance < strandness and strandness < 0.5 + tolerance: res_dict["strand"] = 0 else: res_dict["strand"] = None df = pd.DataFrame(res_dict, index=[sample_name]) return df
5,333,168
def main(input_file: str, output_file: str, max_words: int = 10000000, max_len: int = 20) -> None: """ Calculate ngram probabilities at different positions :param input_file: List of words, one per line :param output_file: JSON file where ngram probabilities are stored :param max_words: Max. no. of words to analyse :param max_len: Max. ngram length :return: None """ # Dicts for counting the ngrams end_ngrams = defaultdict(int) start_ngrams = defaultdict(int) in_ngrams = defaultdict(int) all_ngrams = defaultdict(int) # Gather counts print('Words analyzed of max.', str(max_words)) c = 0 # Line counter for line in open(input_file, 'r'): line = line.strip().lower() if '-' in line: line = re.sub('.*-', '', line) # Hyphen: take part following last hyphen line_middle = line[1:-1] for n in range(3, max_len+1): # "Overcount" long words # for n in range(3, len(line)+1): # Lower performance if n <= max_len: ngram = line[:n] # start_grams: max_len 3-5 start_ngrams[ngram] += 1 all_ngrams[ngram] += 1 ngram = line[-n:] # end_grams: max_len 3-5 end_ngrams[ngram] += 1 all_ngrams[ngram] += 1 for m in range(len(line_middle) - n + 1): # in_grams: max_len 3-5 ngram = line_middle[m:m+n] if not ngram == '': in_ngrams[ngram] += 1 all_ngrams[ngram] += 1 if c % 10000 == 0: sys.stderr.write('\r'+str(c)) sys.stderr.flush() c += 1 if c == max_words: break sys.stderr.write('\n') print('Calculating ngrams probabilities') start_ngrams = {k: v/all_ngrams[k] for k,v in start_ngrams.items() if v > 1} end_ngrams = {k: v/all_ngrams[k] for k,v in end_ngrams.items() if v > 1} in_ngrams = {k: v/all_ngrams[k] for k,v in in_ngrams.items() if v > 1} # Write dicts to file with open(output_file, "w") as f: json.dump({ "prefix": start_ngrams, "infix": in_ngrams, "suffix": end_ngrams }, f)
5,333,169
def listBlogs(username, password, serverURL=None): """Get a list of your blogs Returns: list of dictionaries [{"blogid": ID_of_this_blog, "blogName": "name_of_this_blog", "url": "URL_of_this_blog"}, ...] Arguments: - username: your weblog username - password: your weblog password - serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer) Example: >>> blogList = blogger.listBlogs("my_blogger_username", "my_secret_password") >>> for blog in blogList: ... print "ID:", blog["blogid"] ... print "Name:, blog["blogName"] ... print "URL:", blog["url"] ... print Manila notes: - Manila does not support this method, because it does not keep a centralized database of a user's blogs. """ server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport) response = server.blogger.getUsersBlogs(constants.applicationKey, username, password) return response
5,333,170
def find_lineup_no_optimization(set1, set2): """ Find the approximate offset between two GPS data sets without range start optimization. This algorithm first identifies a primary data set and a secondary one based on which starts later. The offset is then applied to the secondary data set. After mapping the timestamps to the corresponding points from each set, it finds the optimal offset using the later start time of the two sets as the starting point. Requires checking of more values in the set to obtain an accurate offset, compared to the optimized version. Args: set1: GpsDataSet object set2: GpsDataSet object Returns: Tuple of two datetimes, the start time for set 1 and for set 2 given the calculated offset. If no lineup is found, it will return None. """ set1_start_time = set1.gps_data_list[0].time set2_start_time = set2.gps_data_list[0].time if set1_start_time > set2_start_time: later_start_time = utils.round_time(set1_start_time) primary_set = set1 secondary_set = set2 else: later_start_time = utils.round_time(set2_start_time) primary_set = set2 secondary_set = set1 # create dicts that map rounded times to points primary_time_points_mapping = create_time_to_points_mapping(primary_set) secondary_time_points_mapping = create_time_to_points_mapping(secondary_set) offset_range_length = 200 # how many offsets to check so, for 200, check offsets (-100,100) point_checking_range_length = 200 # points to check around each offset, for 200, check points (-100,100) # find best offset optimal_offset = find_optimal_offset(primary_time_points_mapping, secondary_time_points_mapping, later_start_time, offset_range_length, point_checking_range_length) print(optimal_offset) if optimal_offset is None: print("no optimal line-up for these two data sets; check if correct files are being used") return (None, None) if primary_set == set1: print("Optimal offset: set 2 is %s seconds from set 1" % optimal_offset) return (later_start_time, later_start_time + timedelta(seconds=optimal_offset)) else: print("Optimal offset: set 1 is %s seconds from set 2" % optimal_offset) return (later_start_time + timedelta(seconds=optimal_offset), later_start_time)
5,333,171
def strongly_connected_components(G): """Generate nodes in strongly connected components of graph. Parameters ---------- G : NetworkX Graph An directed graph. Returns ------- comp : generator of lists A list of nodes for each strongly connected component of G. Raises ------ NetworkXNotImplemented: If G is undirected. See Also -------- connected_components, weakly_connected_components Notes ----- Uses Tarjan's algorithm with Nuutila's modifications. Nonrecursive version of algorithm. References ---------- .. [1] Depth-first search and linear graph algorithms, R. Tarjan SIAM Journal of Computing 1(2):146-160, (1972). .. [2] On finding the strongly connected components in a directed graph. E. Nuutila and E. Soisalon-Soinen Information Processing Letters 49(1): 9-14, (1994).. """ preorder={} lowlink={} scc_found={} scc_queue = [] i=0 # Preorder counter for source in G: if source not in scc_found: queue=[source] while queue: v=queue[-1] if v not in preorder: i=i+1 preorder[v]=i done=1 v_nbrs=G[v] for w in v_nbrs: if w not in preorder: queue.append(w) done=0 break if done==1: lowlink[v]=preorder[v] for w in v_nbrs: if w not in scc_found: if preorder[w]>preorder[v]: lowlink[v]=min([lowlink[v],lowlink[w]]) else: lowlink[v]=min([lowlink[v],preorder[w]]) queue.pop() if lowlink[v]==preorder[v]: scc_found[v]=True scc=[v] while scc_queue and preorder[scc_queue[-1]]>preorder[v]: k=scc_queue.pop() scc_found[k]=True scc.append(k) yield scc else: scc_queue.append(v)
5,333,172
def matching_plots_nn(plots_0, plots_1, K): """ :param plots_0: :param plots_1: :param K: :return: """ M, N = plots_0.shape[0], plots_1.shape[0] mapping = {} cost_mat = np.zeros((M, N), dtype=np.float32) for i, plot_0 in enumerate(plots_0): for j, plot_1 in enumerate(plots_1): shift_vector = plot_0 - plot_1 l2_dist = np.linalg.norm(shift_vector, ord=2) cost_mat[i][j] = l2_dist # 取topK: cost最小 # k_smallest = heapq.nsmallest(K, cost_mat.ravel().tolist()) inds = np.argpartition(cost_mat.ravel(), K)[:K] inds_i = inds // N inds_j = inds % N # k_smallest = cost_mat[inds_i, inds_j] for i, j in zip(inds_i, inds_j): # i∈range(M), j∈range(N) mapping[i] = j return mapping
5,333,173
def find_file_start(chunks, pos): """Find a chunk before the one specified which is not a file block.""" pos = pos - 1 while pos > 0: if chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102: # This is not a block return pos else: pos = pos - 1 return pos
5,333,174
def navigation_children(parser, token): """Navigation""" args = token.contents.split() kwargs = extract_kwargs(args) if len(args) < 2: raise template.TemplateSyntaxError( _("navigation_children requires object as argument and optionally tree={{tree_name}}") ) return NavigationChildrenNode(args[1], **kwargs)
5,333,175
def _update_manifest(manifest_file, name, version): """Update the toolplus manifest file with updated name and version """ if os.path.exists(manifest_file): with open(manifest_file) as in_handle: manifest = yaml.safe_load(in_handle) else: manifest = {} manifest[name] = {"name": name, "version": version} with open(manifest_file, "w") as out_handle: yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
5,333,176
def get_all_ann_index(self): """ Retrieves all annotation ids """ return list(self.ann_infos.keys())
5,333,177
def file_to_list(file_path): """ 读取文件到lists :param param1: this is a first param :param param2: this is a second param :returns: this is a description of what is returned :raises keyError: raises an exception @author: jhuang @time:1/22/2018 """ lists = [] fd = file(file_path, "r") for line in fd.readlines(): lists.append(str(line).replace("\n", "")) return lists
5,333,178
def reverse_words(str): """Reverses the letters in each word of a string.""" words = str.split() new_words = reverse(words[0]) for word in words[1:]: new_words += ' ' + reverse(word) return new_words
5,333,179
def download_file( bucket_name, source_file_name, destination_file_name, bucket_client=None ): """Downloads a blob from the bucket.""" # The ID of your GCS bucket # bucket_name = "your-bucket-name" # The ID of your GCS object # source_blob_name = "storage-object-name" # The path to which the file should be downloaded # destination_file_name = "local/path/to/file" if bucket_client is None: storage_client = storage.Client() bucket_client = storage_client.bucket(bucket_name) # Construct a client side representation of a blob. # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve # any content from Google Cloud Storage. As we don't need additional data, # using `Bucket.blob` is preferred here. blob = bucket_client.blob(source_file_name) blob.download_to_filename(destination_file_name) log.info( f"Downloaded storage object {source_file_name} " f"from bucket {bucket_name} " f"to local file {destination_file_name}" )
5,333,180
def review_request_closed_cb(sender, user, review_request, type, **kwargs): """Send e-mail when a review request is closed. Listens to the :py:data:`~reviewboard.reviews.signals.review_request_closed` signal and sends an e-mail if this type of notification is enabled (through the ``mail_send_review_close_mail`` site configuration setting). """ siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('mail_send_review_close_mail'): mail_review_request(review_request, user, close_type=type)
5,333,181
def parse_www_authenticate_header(value, on_update=None): """Parse an HTTP WWW-Authenticate header into a :class:`WWWAuthenticate` object. :param value: a WWW-Authenticate header to parse. :param on_update: an optional callable that is called every time a value on the :class:`WWWAuthenticate` object is changed. :return: a :class:`WWWAuthenticate` object. """ if not value: return WWWAuthenticate(on_update=on_update) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except (ValueError, AttributeError): return WWWAuthenticate(value.lower(), on_update=on_update) return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)
5,333,182
def get_image_feature_column(X: pd.DataFrame) -> str: """ Get only the image feature column name from X """ X = X.select_dtypes(object) img_features_col_mask = [X[col].str.startswith("/9j/", na=False).any() for col in X] # should have just one image feature assert sum(img_features_col_mask) == 1, "expecting just one image feature column" img_col = X.columns[np.argmax(img_features_col_mask)] return img_col
5,333,183
def pbr_specular_glossiness(mh): """Creates node tree for pbrSpecularGlossiness materials.""" # This does option #1 from # https://github.com/KhronosGroup/glTF-Blender-IO/issues/303 # Sum a Glossy and Diffuse Shader glossy_node = mh.node_tree.nodes.new('ShaderNodeBsdfGlossy') diffuse_node = mh.node_tree.nodes.new('ShaderNodeBsdfDiffuse') add_node = mh.node_tree.nodes.new('ShaderNodeAddShader') glossy_node.location = 10, 220 diffuse_node.location = 10, 0 add_node.location = 230, 100 mh.node_tree.links.new(add_node.inputs[0], glossy_node.outputs[0]) mh.node_tree.links.new(add_node.inputs[1], diffuse_node.outputs[0]) emission_socket, alpha_socket = make_output_nodes( mh, location=(370, 250), shader_socket=add_node.outputs[0], make_emission_socket=mh.needs_emissive(), make_alpha_socket=not mh.is_opaque(), ) emission( mh, location=(-200, 860), color_socket=emission_socket, ) base_color( mh, is_diffuse=True, location=(-200, 380), color_socket=diffuse_node.inputs['Color'], alpha_socket=alpha_socket, ) specular_glossiness( mh, location=(-200, -100), specular_socket=glossy_node.inputs['Color'], roughness_socket=glossy_node.inputs['Roughness'], ) copy_socket( mh, copy_from=glossy_node.inputs['Roughness'], copy_to=diffuse_node.inputs['Roughness'], ) normal( mh, location=(-200, -580), normal_socket=glossy_node.inputs['Normal'], ) copy_socket( mh, copy_from=glossy_node.inputs['Normal'], copy_to=diffuse_node.inputs['Normal'], ) if mh.pymat.occlusion_texture is not None: node = make_settings_node(mh) node.location = (610, -1060) occlusion( mh, location=(510, -970), occlusion_socket=node.inputs['Occlusion'], )
5,333,184
def stop(): """ Stops session, clean all tables associated with this session. Examples -------- >>> from common.python import session >>> session.stop() """ RuntimeInstance.SESSION.stop()
5,333,185
def _PlusMinusString(added_items, removed_items): """Return a concatenation of the items, with a minus on removed items. Args: added_items: list of string items added. removed_items: list of string items removed. Returns: A unicode string with all the removed items first (preceeded by minus signs) and then the added items. """ assert all(isinstance(item, string_types) for item in added_items + removed_items) # TODO(jrobbins): this is not good when values can be negative ints. return ' '.join( ['-%s' % item.strip() for item in removed_items if item] + ['%s' % item for item in added_items if item])
5,333,186
def write_to_log(msg, room_name): """ Writes chat events to log. :param msg: the message to write to the log. :type msg: str :param room_name: the room name. :type room_name: str """ d = time.strftime('%Y-%m-%d') file_name = d + '.log' path = config.CONFIG_PATH + room_name + '/logs/' file_handler.file_writer(path, file_name, msg.encode(encoding='UTF-8', errors='ignore'))
5,333,187
def merge_sort(sez): """urejamo z zlivanjem, torej nazačetku razdelimo seznam na 2 dela, potem pa jih zlijemo tako da imenično jemljemo manjše elemente z enega ali drugega seznama. Nakoncu naredimo nov seznam, ki je urejen""" n=len(sez) if n<=1: return sez levo = merge_sort(sez[:n//2]) desno= merge_sort(sez[n//2:]) #print("Leva stran = ", end='') #print(levo) #print("Desna stran = ", end='') #print(desno) #print("kličem združi", end="") Nsez=zdruzi(levo, desno) #print(Nsez) return Nsez
5,333,188
def forall_funct(pred, l): """TODO : une version résursive de forall, plus fonctionnelle, utilisant UNIQUEMENT empty, head et tail sans boucle for ni affectation. La fonction sera récursive""" if empty(l): return True return pred(head(l)) and forall_funct(pred, tail(l))
5,333,189
def get_pairs(l, k): """ Given a list L of N unique positive integers, returns the count of the total pairs of numbers whose difference is K. First, each integer is stored into a dictionary along with its frequency. Then, for each integer I in the input list, the presence of the integer I+K is checked within the dictionary. The approach may be generalized to the case of non-unique positive integers. The computational time complexity of the algorithm is O(N). :param k: the given difference :type k: int :param l: the list of input integers :type l: list :return: the count of the total pairs of numbers whose difference is k :rtype: int """ hash_map = dict((i, 1) for i in l) return len([1 for i in l if hash_map.get(i + k)])
5,333,190
def numba_to_jax(name: str, numba_fn, abstract_eval_fn, batching_fn=None): """Create a jittable JAX function for the given Numba function. Args: name: The name under which the primitive will be registered. numba_fn: The function that can be compiled with Numba. abstract_eval_fn: The abstract evaluation function. batching_fn: If set, this function will be used when vmap-ing the returned function. Returns: A jitable JAX function. """ primitive = jax.core.Primitive(name) primitive.multiple_results = True def abstract_eval_fn_always(*args, **kwargs): # Special-casing when only a single tensor is returned. shapes = abstract_eval_fn(*args, **kwargs) if not isinstance(shapes, collections.abc.Collection): return [shapes] else: return shapes primitive.def_abstract_eval(abstract_eval_fn_always) primitive.def_impl(partial(_np_evaluation_rule, numba_fn, abstract_eval_fn_always)) def _primitive_bind(*args): result = primitive.bind(*args) output_shapes = abstract_eval_fn(*args) # Special-casing when only a single tensor is returned. if not isinstance(output_shapes, collections.abc.Collection): assert len(result) == 1 return result[0] else: return result if batching_fn is not None: batching.primitive_batchers[primitive] = batching_fn else: batching.primitive_batchers[primitive] = partial( _naive_batching, _primitive_bind ) xla.backend_specific_translations["cpu"][primitive] = partial( _xla_translation_cpu, numba_fn, abstract_eval_fn_always ) xla.backend_specific_translations["gpu"][primitive] = partial( _xla_translation_gpu, numba_fn, abstract_eval_fn_always ) return _primitive_bind
5,333,191
def zip_metadata(iterable: Iterable[T], keys: Iterable[str], values: Iterable[Any]) -> Iterator[T]: """ Adds meta-data to each object in an iterator. :param iterable: The object iterable. :param keys: The meta-data key iterable. :param values: The meta-data iterable. :return: An iterator over the objects with added meta-data. """ from ..meta import with_metadata return (with_metadata(obj, key, value) for obj, key, value in zip(iterable, keys, values))
5,333,192
def get_projects_query_flags(project_ids): """\ 1. Fetch `needs_final` for each Project 2. Fetch groups to exclude for each Project 3. Trim groups to exclude ZSET for each Project Returns (needs_final, group_ids_to_exclude) """ project_ids = set(project_ids) now = time.time() p = redis_client.pipeline() needs_final_keys = [get_project_needs_final_key(project_id) for project_id in project_ids] for needs_final_key in needs_final_keys: p.get(needs_final_key) exclude_groups_keys = [get_project_exclude_groups_key(project_id) for project_id in project_ids] for exclude_groups_key in exclude_groups_keys: p.zremrangebyscore(exclude_groups_key, float('-inf'), now - settings.REPLACER_KEY_TTL) p.zrevrangebyscore(exclude_groups_key, float('inf'), now - settings.REPLACER_KEY_TTL) results = p.execute() needs_final = any(results[:len(project_ids)]) exclude_groups = sorted({ int(group_id) for group_id in sum(results[(len(project_ids) + 1)::2], []) }) return (needs_final, exclude_groups)
5,333,193
def openTypeNameVersionFallback(info): """ Fallback to *versionMajor.versionMinor* in the form 0.000. """ versionMajor = getAttrWithFallback(info, "versionMajor") versionMinor = getAttrWithFallback(info, "versionMinor") return "%d.%s" % (versionMajor, str(versionMinor).zfill(3))
5,333,194
def do_class_schema(mc, args): """Display class schema""" schema = mc.schemas.get(args.class_name, args.method_names, class_version=args.class_version, package_name=args.package_name) print(utils.json_formatter(schema.data))
5,333,195
def get_quoted_text(text): """Method used to get quoted text. If body/title text contains a quote, the first quote is considered as the text. :param text: The replyable text :return: The first quote in the text. If no quotes are found, then the entire text is returned """ lines = text.split('\n\n') for line in lines: if line.startswith('>'): return line[1:] return text
5,333,196
def pad_sequences(sequences, pad_tok): """ Args: sequences: a generator of list or tuple pad_tok: the char to pad with Returns: a list of list where each sublist has same length """ max_length = max(map(lambda x: len(x), sequences)) sequence_padded, sequence_length = _pad_sequences(sequences, pad_tok, max_length) return sequence_padded, sequence_length
5,333,197
def get_username_for_os(os): """Return username for a given os.""" usernames = {"alinux2": "ec2-user", "centos7": "centos", "ubuntu1804": "ubuntu", "ubuntu2004": "ubuntu"} return usernames.get(os)
5,333,198
def stats_getter(context, core_plugin, ignore_list=None): """Update Octavia statistics for each listener (virtual server)""" stat_list = [] lb_service_client = core_plugin.nsxlib.load_balancer.service # Go over all the loadbalancers & services lb_bindings = nsx_db.get_nsx_lbaas_loadbalancer_bindings( context.session) for lb_binding in lb_bindings: if ignore_list and lb_binding['loadbalancer_id'] in ignore_list: continue lb_service_id = lb_binding.get('lb_service_id') try: # get the NSX statistics for this LB service # Since this is called periodically, silencing it at the logs rsp = lb_service_client.get_stats(lb_service_id, silent=True) if rsp and 'virtual_servers' in rsp: # Go over each virtual server in the response for vs in rsp['virtual_servers']: # look up the virtual server in the DB vs_bind = nsx_db.get_nsx_lbaas_listener_binding_by_vs_id( context.session, vs['virtual_server_id']) if vs_bind and 'statistics' in vs: vs_stats = vs['statistics'] stats = copy.copy(lb_const.LB_EMPTY_STATS) stats['id'] = vs_bind.listener_id stats['request_errors'] = 0 # currently unsupported for stat, stat_value in lb_const.LB_STATS_MAP.items(): lb_stat = stat_value stats[stat] += vs_stats[lb_stat] stat_list.append(stats) except nsxlib_exc.ManagerError: pass return stat_list
5,333,199