content
stringlengths
22
815k
id
int64
0
4.91M
def init(): """Init template functionality""" global LOADER, ENVIRONMENT LOADER = FileSystemLoader(env["datastore"]["path"]) ENVIRONMENT = Environment(loader=LOADER, line_statement_prefix='#%', line_comment_prefix='##') env['template']['filters'].update(ENVIRONMENT.filters) ENVIRONMENT.filters = env['template']['filters'] env["template"].update({ "loader": LOADER, "env": ENVIRONMENT, })
5,344,500
def printWorldMap(location: List[int]): """Print the worldmap with the player at the intended location.""" x: int = location[0] y: int = location[1] # Place the player icon 'P' in the correct spot. map: List[str] = worldmap.copy() row: List[str] = list(map[y]) row[x] = 'P' map[y] = "".join(row) print("\n".join(map))
5,344,501
def load_data(database_filepath): """ Input: 1. database_filepath: the path of cleaned datasets Output: 1. X: all messages 2. y: category columns generated by cleaning process 3. category_names: category columns' names Process: 1. Read-in the datafrmae 2. Select required datasets 3. Generate category columns' names """ # 1. Read-in dataframe engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql_table(database_filepath, engine) # 2. Select required datasets X = df['message'] y = df.iloc[:, 4:] # 3. Generate category columns' names category_names = y.columns return X, y, category_names
5,344,502
def click(): """Save and load cache.""" cache['a'] = 3 assert cache[b'a'] == 3 assert cache[u'a'] == 3 cache[b'b'] = True assert cache[b'b'] assert cache[u'b']
5,344,503
def lh_fus(temp): """latent heat of fusion Args: temp (float or array): temperature [K] Returns: float or array: latent heat of fusion """ return 3.336e5 + 1.6667e2 * (FREEZE - temp)
5,344,504
def eval_edge_enemies(enemy_ships, star_set): """ Evaluates enemies at the top and bottom edge and takes actions appropriately Args: enemy_ships (list): The enemy ships star_set (set): The set of stars Returns: boolean: True if all checks pass and False if an enemy reached the bottom edge """ to_remove = list() for enemy_ship in enemy_ships: if enemy_ship.y > WIN_HEIGHT - 50: lose_screen(WINDOW, star_set) elif enemy_ship.y < 10: to_remove.append(enemy_ship) for ship in to_remove: enemy_ships.remove(ship)
5,344,505
def to_eaf(file_path, eaf_obj, pretty=True): """ modified function from https://github.com/dopefishh/pympi/blob/master/pympi/Elan.py Write an Eaf object to file. :param str file_path: Filepath to write to, - for stdout. :param pympi.Elan.Eaf eaf_obj: Object to write. :param bool pretty: Flag to set pretty printing. """ def rm_none(x): try: # Ugly hack to test if s is a string in py3 and py2 basestring def isstr(s): return isinstance(s, basestring) except NameError: def isstr(s): return isinstance(s, str) return {k: v if isstr(v) else str(v) for k, v in x.items() if v is not None} # Annotation Document ADOCUMENT = etree.Element('ANNOTATION_DOCUMENT', eaf_obj.adocument) # Licence for m in eaf_obj.licenses: n = etree.SubElement(ADOCUMENT, 'LICENSE', {'LICENSE_URL': m[1]}) n.text = m[0] # Header HEADER = etree.SubElement(ADOCUMENT, 'HEADER', eaf_obj.header) # Media descriptiors for m in eaf_obj.media_descriptors: etree.SubElement(HEADER, 'MEDIA_DESCRIPTOR', rm_none(m)) # Linked file descriptors for m in eaf_obj.linked_file_descriptors: etree.SubElement(HEADER, 'LINKED_FILE_DESCRIPTOR', rm_none(m)) # Properties for k, v in eaf_obj.properties: etree.SubElement(HEADER, 'PROPERTY', {'NAME': k}).text = str(v) # Time order TIME_ORDER = etree.SubElement(ADOCUMENT, 'TIME_ORDER') for t in sorted(eaf_obj.timeslots.items(), key=lambda x: int(x[0][2:])): etree.SubElement(TIME_ORDER, 'TIME_SLOT', rm_none( {'TIME_SLOT_ID': t[0], 'TIME_VALUE': t[1]})) # Tiers for t in sorted(eaf_obj.tiers.items(), key=lambda x: x[1][3]): tier = etree.SubElement(ADOCUMENT, 'TIER', rm_none(t[1][2])) for a in t[1][0].items(): ann = etree.SubElement(tier, 'ANNOTATION') alan = etree.SubElement(ann, 'ALIGNABLE_ANNOTATION', rm_none( {'ANNOTATION_ID': a[0], 'TIME_SLOT_REF1': a[1][0], 'TIME_SLOT_REF2': a[1][1], 'SVG_REF': a[1][3]})) etree.SubElement(alan, 'ANNOTATION_VALUE').text = a[1][2] for a in t[1][1].items(): ann = etree.SubElement(tier, 'ANNOTATION') rean = etree.SubElement(ann, 'REF_ANNOTATION', rm_none( {'ANNOTATION_ID': a[0], 'ANNOTATION_REF': a[1][0], 'PREVIOUS_ANNOTATION': a[1][2], 'SVG_REF': a[1][3]})) etree.SubElement(rean, 'ANNOTATION_VALUE').text = a[1][1] # Linguistic types for l in eaf_obj.linguistic_types.values(): etree.SubElement(ADOCUMENT, 'LINGUISTIC_TYPE', rm_none(l)) # Locales for lc, (cc, vr) in eaf_obj.locales.items(): etree.SubElement(ADOCUMENT, 'LOCALE', rm_none( {'LANGUAGE_CODE': lc, 'COUNTRY_CODE': cc, 'VARIANT': vr})) # Languages for lid, (ldef, label) in eaf_obj.languages.items(): etree.SubElement(ADOCUMENT, 'LANGUAGE', rm_none( {'LANG_ID': lid, 'LANG_DEF': ldef, 'LANG_LABEL': label})) # Constraints for l in eaf_obj.constraints.items(): etree.SubElement(ADOCUMENT, 'CONSTRAINT', rm_none( {'STEREOTYPE': l[0], 'DESCRIPTION': l[1]})) # Controlled vocabularies for cvid, (descriptions, cv_entries, ext_ref) in\ eaf_obj.controlled_vocabularies.items(): cv = etree.SubElement(ADOCUMENT, 'CONTROLLED_VOCABULARY', rm_none({'CV_ID': cvid, 'EXT_REF': ext_ref})) for lang_ref, description in descriptions: des = etree.SubElement(cv, 'DESCRIPTION', {'LANG_REF': lang_ref}) if description: des.text = description for cveid, (values, ext_ref) in cv_entries.items(): cem = etree.SubElement(cv, 'CV_ENTRY_ML', rm_none({ 'CVE_ID': cveid, 'EXT_REF': ext_ref})) for value, lang_ref, description in values: val = etree.SubElement(cem, 'CVE_VALUE', rm_none({ 'LANG_REF': lang_ref, 'DESCRIPTION': description})) val.text = value # Lexicon refs for l in eaf_obj.lexicon_refs.values(): etree.SubElement(ADOCUMENT, 'LEXICON_REF', rm_none(l)) # Exteral refs for eid, (etype, value) in eaf_obj.external_refs.items(): etree.SubElement(ADOCUMENT, 'EXTERNAL_REF', rm_none( {'EXT_REF_ID': eid, 'TYPE': etype, 'VALUE': value})) # https://github.com/dopefishh/pympi/blob/master/pympi/Elan.py return '<?xml version="1.0" encoding="UTF-8"?>'+etree.tostring(ADOCUMENT, encoding='utf-8').decode("utf-8")
5,344,506
def delete_station(station_id): """Delete station from stations :param station_id: :return: string """ logger.debug(f"Call delete_stations: {station_id}") # Load old data into structure stations = load_stations() # Find index in list of stations target_index = find_index_in_list_of_dict( lst=stations, key='StationID', value=station_id ) # remove from list by index stations.remove(stations[target_index]) # save changes save_stations(stations) return {"status": "success"}
5,344,507
def main(data_path: str, saved_model_path: str) -> None: """The main training function""" if saved_model_path: global embedding_dim, char_embedding_dim, hidden_dim, char_hidden_dim, use_bert_cased, \ use_bert_uncased, use_bert_large embedding_dim, char_embedding_dim, hidden_dim, char_hidden_dim, use_bert_cased, use_bert_uncased, \ use_bert_large = load_hyper_params(saved_model_path) if use_bert_uncased or use_bert_cased: use_bert = True else: use_bert = False if use_bert: train_iter, \ val_iter, \ word_to_ix, \ ix_to_word, \ tag_vocab, \ char_to_ix = create_bert_datasets( data_path=data_path, mode=TRAIN, use_bert_cased=use_bert_cased, use_bert_uncased=use_bert_uncased, use_bert_large=use_bert_large ) vocab_size = None word_vocab = None else: train_iter, \ val_iter, \ word_vocab, \ tag_vocab, \ char_to_ix = create_datasets(data_path=data_path, mode=TRAIN) #char_to_ix gets added to automatically with any characters (e.g. < >) encountered during evaluation, but we want to #save the original copy so that the char embeddings para can be computed, hence we create a copy here. word_to_ix, ix_to_word = word_vocab.stoi, word_vocab.itos vocab_size = len(word_to_ix) tag_to_ix, ix_to_tag = tag_vocab.stoi, tag_vocab.itos char_to_ix_original = copy.deepcopy(char_to_ix) word_vocab_original = copy.deepcopy(word_vocab) word_to_ix_original = copy.deepcopy(word_to_ix) ix_to_word_original = copy.deepcopy(ix_to_word) tag_vocab_original = copy.deepcopy(tag_vocab) model = LSTMTagger( embedding_dim=embedding_dim, hidden_dim=hidden_dim, vocab_size=vocab_size, tagset_size=len(tag_to_ix), char_embedding_dim=char_embedding_dim, char_hidden_dim=char_hidden_dim, char_vocab_size=len(char_to_ix), use_bert_cased=use_bert_cased, use_bert_uncased=use_bert_uncased, use_bert_large=use_bert_large ) loss_function = CrossEntropyLoss(ignore_index=tag_to_ix['<pad>']) model.to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) if models_folder not in os.listdir(".."): os.mkdir(os.path.join("..", models_folder)) if saved_model_path: av_train_losses, \ av_eval_losses, \ checkpoint_epoch, \ best_accuracy, \ lowest_av_eval_loss, \ best_micro_precision, \ best_micro_recall, \ best_micro_f1, \ best_weighted_macro_precision, \ best_weighted_macro_recall, \ best_weighted_macro_f1 = load_model(model=model, saved_model_path=saved_model_path, optimizer=optimizer) model_file_name = os.path.split(saved_model_path)[1] else: checkpoint_epoch = 0 av_train_losses = [] av_eval_losses = [] lowest_av_eval_loss = 999999 model_file_name = strftime("%Y_%m_%d_%H_%M_%S.pt") #torch.autograd.set_detect_anomaly(True) print("training..\n") model.train() start_epoch = checkpoint_epoch+1 end_epoch = checkpoint_epoch+num_epochs for epoch in range(start_epoch, end_epoch+1): # again, normally you would NOT do 300 epochs, it is toy data model.train() print('===============================') print('\n======== Epoch {} / {} ========'.format(epoch, end_epoch)) batch_num = 0 train_losses = [] for batch in train_iter: batch_num += 1 if batch_num % 20 == 0 or batch_num == 1: if batch_num != 1: print("\nAverage Training loss for epoch {} at end of batch {}: {}".format(epoch, str(batch_num-1),sum(train_losses)/len(train_losses))) print('\n======== at batch {} / {} ========'.format(batch_num, len(train_iter))) model.zero_grad() if use_bert: sentences_in, attention_masks, token_start_idx, targets, original_sentences = batch sentences_in = sentences_in.to(device) attention_masks = attention_masks.to(device) targets = targets.to(device) max_length = (attention_masks != 0).max(0)[0].nonzero()[-1].item()+1 if max_length < sentences_in.shape[1]: sentences_in = sentences_in[:, :max_length] attention_masks = attention_masks[:, :max_length] sent_batch_size = sentences_in.shape[0] original_sentences_split = [sent.split() for sent in original_sentences] word_batch_size = max([len(sent) for sent in original_sentences_split]) sent_lengths = [item for item in map(len, token_start_idx)] else: word_batch_size = batch.sentence.shape[0] sent_batch_size = batch.sentence.shape[1] sentences_in = batch.sentence.permute(1, 0).to(device) targets = batch.tags.permute(1, 0).reshape(sent_batch_size * word_batch_size).to(device) attention_masks = None token_start_idx = None original_sentences_split = None sent_lengths = train_iter.sent_lengths[batch_num - 1] words_in = get_words_in( sentences_in=sentences_in, char_to_ix=char_to_ix, ix_to_word=ix_to_word, device=device, original_sentences_split=original_sentences_split ) model.init_hidden(sent_batch_size=sent_batch_size, device=device) tag_logits = model( sentences=sentences_in, words=words_in, char_hidden_dim=char_hidden_dim, sent_lengths=sent_lengths, word_batch_size=word_batch_size, device=device, attention_masks=attention_masks, token_start_idx=token_start_idx ) mask = targets != 1 loss = loss_function(tag_logits, targets) loss /= mask.float().sum() train_losses.append(loss.item()) loss.backward() optimizer.step() av_train_losses.append(sum(train_losses) / len(train_losses)) accuracy, av_eval_loss, micro_precision, micro_recall, micro_f1, weighted_macro_precision, \ weighted_macro_recall, weighted_macro_f1 = eval_model( model=model, loss_function=loss_function, val_iter=val_iter, char_to_ix=char_to_ix, ix_to_word=ix_to_word, ix_to_tag=ix_to_tag, av_eval_losses=av_eval_losses, use_bert=use_bert ) print_results(epoch, accuracy, av_eval_loss, micro_precision, micro_recall, micro_f1, weighted_macro_precision, weighted_macro_recall, weighted_macro_f1) if av_eval_losses[-1] < lowest_av_eval_loss: lowest_av_eval_loss = av_eval_losses[-1] best_accuracy, \ best_micro_precision, \ best_micro_recall, \ best_micro_f1, \ best_weighted_macro_precision, \ best_weighted_macro_recall, \ best_weighted_macro_f1 = accuracy, \ micro_precision, \ micro_recall, \ micro_f1, \ weighted_macro_precision, \ weighted_macro_recall, \ weighted_macro_f1 checkpoint_epoch = epoch save_model( epoch=checkpoint_epoch, model=model, optimizer=optimizer, av_train_losses=av_train_losses, av_eval_losses=av_eval_losses, model_file_name=model_file_name, word_to_ix=word_to_ix_original, ix_to_word=ix_to_word_original, word_vocab=word_vocab_original, tag_vocab=tag_vocab_original, char_to_ix=char_to_ix_original, models_folder=models_folder, embedding_dim=embedding_dim, char_embedding_dim=char_embedding_dim, hidden_dim=hidden_dim, char_hidden_dim=char_hidden_dim, accuracy=best_accuracy, av_eval_loss=lowest_av_eval_loss, micro_precision=best_micro_precision, micro_recall=best_micro_recall, micro_f1=best_micro_f1, weighted_macro_precision=best_weighted_macro_precision, weighted_macro_recall=best_weighted_macro_recall, weighted_macro_f1=best_weighted_macro_f1, use_bert_cased=use_bert_cased, use_bert_uncased=use_bert_uncased, use_bert_large=use_bert_large ) print_results( epoch=checkpoint_epoch, accuracy=best_accuracy, av_eval_loss=lowest_av_eval_loss, micro_precision=best_micro_precision, micro_recall=best_micro_recall, micro_f1=best_micro_f1, weighted_macro_precision=best_weighted_macro_precision, weighted_macro_recall=best_weighted_macro_recall, weighted_macro_f1=best_weighted_macro_f1, final=True ) plot_train_eval_loss(av_train_losses, av_eval_losses)
5,344,508
def query_for_build_status(service, branch, target, starting_build_id): """Query Android Build Service for the status of the 4 builds in the target branch whose build IDs are >= to the provided build ID""" try: print ('Querying Android Build APIs for builds of {} on {} starting at' ' buildID {}').format(target, branch, starting_build_id) return service.build().list(buildType='submitted', branch=branch, target=target, maxResults='4', startBuildId=starting_build_id).execute() except errors.HttpError as error: print 'HTTP Error while attempting to query the build status.' print error return None
5,344,509
def create_pool( dsn=None, *, min_size=10, max_size=10, max_queries=50000, max_inactive_connection_lifetime=300.0, setup=None, init=None, loop=None, authenticator=None, **connect_kwargs, ): """Create an Asyncpg connection pool through Approzium authentication. Takes same arguments as ``asyncpg.create_pool`` in addition to the `authenticator` argument :return: An instance of :class:`~approzium.asyncpg.pool._ApproziumPool`. Example: .. code-block:: python >>> import approzium >>> from approzium.asyncpg import create_pool >>> auth = approzium.AuthClient("myauthenticator.com:6001", disable_tls=True) >>> pool = await create_pool(user='postgres', authenticator=auth) >>> con = await pool.acquire() >>> try: ... await con.fetch('SELECT 1') ... finally: ... await pool.release(con) """ return _ApproziumPool( dsn, connection_class=Connection, min_size=min_size, max_size=max_size, max_queries=max_queries, loop=loop, setup=setup, init=init, max_inactive_connection_lifetime=max_inactive_connection_lifetime, authenticator=authenticator, **connect_kwargs, )
5,344,510
def _get_all_schedule_profile_entries_v1(profile_name, **kwargs): """ Perform a GET call to get all entries of a QoS schedule profile :param profile_name: Alphanumeric name of the schedule profile :param kwargs: keyword s: requests.session object with loaded cookie jar keyword url: URL in main() function :return: Dictionary containing schedule profile entry URIs """ target_url = kwargs["url"] + "system/qos/%s/queues" % profile_name response = kwargs["s"].get(target_url, verify=False) if not common_ops._response_ok(response, "GET"): logging.warning("FAIL: Getting dictionary of URIs of entries in QoS schedule profile '%s' failed with status code %d: %s" % (profile_name, response.status_code, response.text)) else: logging.info("SUCCESS: Getting dictionary of URIs of entries in QoS schedule profile '%s' succeeded" % profile_name) schedule_profile_entries = response.json() # for some reason, this API returns a list when empty, and a dictionary when there is data # make this function always return a dictionary if not schedule_profile_entries: return {} else: return schedule_profile_entries
5,344,511
def loadSource(path): """Loads a list of transportReactions. Format: R("Macgamb_Transp") R("Madnb_Transp") R("MalaDb_Transp")...""" file = open(path, 'r') sources = [line.strip() for line in file] file.close() return sources
5,344,512
def frombin( __data: Bitcode, __dtype: SupportedDataType | bytes, num: int = 1, *, encoding: Optional[str] = None, signed: bool = True, ) -> ValidDataset: """converts a string of 0 and 1 back into the original data Args: data (BinaryCode): a string of 0 and 1 dtype (Union[int, float, str]): the desired data type to convert to Raises: TypeError: if the desired datatype is not of the integer, floats or strings data type Returns: Union[int, float, str]: converted data """ if __dtype is int: stop = len(__data) step = stop // num if signed: decoded_data = [None] * num for index, i in enumerate(range(0, stop, step)): bindata = __data[i : i + step] decoded_data[index] = int("-%s" % (bindata) if bindata[0] == "1" else bindata, 2) else: decoded_data = [int(__data[i : i + step], 2) for i in range(0, stop, step)] return decoded_data if num != 1 else decoded_data[0] bytedata = int(__data, 2).to_bytes((len(__data) + 7) // 8, config.ENDIAN) if __dtype in ("s", str): return "".join(bytes.decode(bytedata, encoding or config.DEFAULT_STR_FORMAT)) else: try: decoded_data = list( struct.unpack("%s%s%s" % (">" if config.ENDIAN == "big" else "<", num, __dtype), bytedata) ) return decoded_data if num != 1 else decoded_data[0] except struct.error: raise TypeError(f"cannot convert byte data to '{__dtype}'")
5,344,513
def check_size(): """Assumes the problem size has been set by set_size before some operation. This checks if the size was changed Size is defined as (PIs, POs, ANDS, FF, max_bmc) Returns TRUE is size is the same""" global npi, npo, nands, nff, nmd #print n_pis(),n_pos(),n_ands(),n_latches() result = ((npi == n_pis()) and (npo == n_pos()) and (nands == n_ands()) and (nff == n_latches()) ) return result
5,344,514
def vrms2dbm(vp): """ Converts a scalar or a numpy array from volts RMS to dbm assuming there is an impedence of 50 Ohm Arguments: - vp: scalar or numpy array containig values in volt RMS to be converted in dmb Returns: - scalar or numpy array containing the result """ return 10. * np.log10(20. * (vp) ** 2.)
5,344,515
def git_commit(message, transaction_id, author, timestamp): """Add changes to index and commit them in Git :param message: git commit message :param transaction_id: AccuRev transaction ID :param author: AccuRev transaction author :param timestamp: timestamp at which the original AccuRev transaction was performed """ # add all changes (modified, new, deleted) to Git index print '[Git] add changes to index...' exec_cmd(['git', 'add', '--all']) # temporary file used to format the commit message with tempfile.NamedTemporaryFile(delete=False) as f: f.write('{} \n\n[AccuRev transaction: {}]'.format(message, transaction_id)) print '[Git] commit changes...' output = exec_cmd(['git', 'commit', '--file={}'.format(f.name), '--author="AccuRev user {} <>"'.format(author), '--date="{}"'.format(timestamp)], fail=False) # in case of error check if commit failed with 'nothing to commit' otherwise exit if 'ERROR:' in output: if 'nothing to commit' not in output: sys.exit(output) # remove temporary file os.remove(f.name)
5,344,516
def aa_i2c_read (aardvark, slave_addr, flags, data_in): """usage: (int return, u08[] data_in) = aa_i2c_read(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_in) All arrays can be passed into the API as an ArrayType object or as a tuple (array, length), where array is an ArrayType object and length is an integer. The user-specified length would then serve as the length argument to the API funtion (please refer to the product datasheet). If only the array is provided, the array's intrinsic length is used as the argument to the underlying API function. Additionally, for arrays that are filled by the API function, an integer can be passed in place of the array argument and the API will automatically create an array of that length. All output arrays, whether passed in or generated, are passed back in the returned tuple.""" if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY # data_in pre-processing __data_in = isinstance(data_in, int) if __data_in: (data_in, num_bytes) = (array_u08(data_in), data_in) else: (data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1]))) if data_in.typecode != 'B': raise TypeError("type for 'data_in' must be array('B')") # Call API function (_ret_) = api.py_aa_i2c_read(aardvark, slave_addr, flags, num_bytes, data_in) # data_in post-processing if __data_in: del data_in[max(0, min(_ret_, len(data_in))):] return (_ret_, data_in)
5,344,517
def get_log_dir(env=None): """ Get directory to use for writing log files. There are multiple possible locations for this. The ROS_LOG_DIR environment variable has priority. If that is not set, then ROS_HOME/log is used. If ROS_HOME is not set, $HOME/.ros/log is used. @param env: override os.environ dictionary @type env: dict @return: path to use use for log file directory @rtype: str """ if env is None: env = os.environ if ROS_LOG_DIR in env: return env[ROS_LOG_DIR] else: return os.path.join(get_ros_home(env), 'log')
5,344,518
def urbandictionary_search(search): """ Searches urbandictionary's API for a given search term. :param search: The search term str to search for. :return: definition str or None on no match or error. """ if str(search).strip(): urban_api_url = 'http://api.urbandictionary.com/v0/define?term=%s' % search response = util.web.http_get(url=urban_api_url, json=True) if response['json'] is not None: try: definition = response['json']['list'][0]['definition'] return definition.encode('ascii', 'ignore') except (KeyError, IndexError): return None else: return None
5,344,519
def Performance(ALGORITHM_CONFIG, CELLULAR_MODEL_CONFIG, alog_name): """ Performance testing """ # Server profile: num_ues=200, APs=16, Scale=200.0, explore_radius=1 loadbalanceRL = interface.Rainman2(SETTINGS) loadbalanceRL.algorithm_config = ALGORITHM_CONFIG loadbalanceRL.environment_config = CELLULAR_MODEL_CONFIG if alog_name=='linear': result_linear = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'LinearRegression') return result_linear if alog_name=='Naive': result_Naive = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'Naive') return result_Naive if alog_name=='NN': result_NN = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'NN') return result_NN if alog_name=='DQN': result_DQN = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'DQN') return result_DQN
5,344,520
def launch(sid): """ Launch a scan Launch the scan specified by the sid. """ data = connect('POST', '/scans/{0}/launch'.format(sid)) return data['scan_uuid']
5,344,521
def transpose_dict(data, data_key): """Function: transpose_dict Description: Transpose specified keys in a list of dictionaries to specified data types or None. Arguments: (input) data -> Initial list of dictionaries. (input) data_key -> Dictionary of keys and data types. (output) mod_data -> Modified list of dictionaries. """ data = list(data) data_key = dict(data_key) mod_data = list() literal_list = ["bool", "list"] for list_item in data: list_item = dict(list_item) for item in set(list_item.keys()) & set(data_key.keys()): if not list_item[item] or list_item[item] == "None": list_item[item] = None elif data_key[item] == "int": list_item[item] = int(list_item[item]) elif data_key[item] in literal_list: list_item[item] = ast.literal_eval(list_item[item]) mod_data.append(list_item) return mod_data
5,344,522
def assert_almost_equal( actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #7"] ): """ usage.scipy: 1 """ ...
5,344,523
def aligner_to_symbol(calls): """ Assign symbols to different aligners in the input file Set the attribute of the class instances return a list of indices for which each aligner is found uniquely and all aligners sorted by aligners """ symbols = ['o', '+', 'x', 'v', '*', 'D', 's', 'p', '8', 'X'] aligners = sorted(set([c.aligner for c in calls]), reverse=True) aligner_to_symbol_dict = {a: s for a, s in zip(aligners, symbols)} for c in calls: c.shape = aligner_to_symbol_dict[c.aligner] index_and_aligners = zip([[c.aligner for c in calls].index(i) for i in aligners], aligners) return zip(*sorted(index_and_aligners, key=lambda x: x[1]))
5,344,524
def timestamped_filename(line): """Given a line like '.... filename <timestamp>', return filename.""" m = re_timestamped_line.search(line) if m: return m.group("filename") else: print >> sys.stderr, "Error: could not find filename in:", line return None
5,344,525
def nfvi_create_subnet(network_uuid, subnet_name, ip_version, subnet_ip, subnet_prefix, gateway_ip, dhcp_enabled, callback): """ Create a subnet """ cmd_id = _network_plugin.invoke_plugin('create_subnet', network_uuid, subnet_name, ip_version, subnet_ip, subnet_prefix, gateway_ip, dhcp_enabled, callback=callback) return cmd_id
5,344,526
def download_mnistf(path): """Download fashion MNIST data in gzip format Arguments path: path where to cache the dataset locally """ ## TODO: os.path does not work on GCS storage files. if not os.path.exists(path): os.makedirs(path) for file in MNISTF_FILES: source_path = os.path.join(MNISTF_REPO, file + '.gz') target_path = os.path.join(path, file + '.gz') if not os.path.exists(target_path): print('{}: downloading'.format(file)) r = requests.get(source_path) open(target_path, 'wb').write(r.content)
5,344,527
def convertCRS(powerplants, substations, towers, crs, grid): """ :param powerplants: :param substations: :param towers: :param crs: :return: """ substations.to_crs(crs) # powerplants = powerplants.set_crs(crs) # powerplants = powerplants.to_crs(crs) # print(powerplants.crs) towers = towers.to_crs(crs) return(substations, powerplants, towers, grid)
5,344,528
def cleanup_old_versions(src, keep_last_versions): """Deletes old deployed versions of the function in AWS Lambda. Won't delete $Latest and any aliased version :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param int keep_last_versions: The number of recent versions to keep and not delete """ if keep_last_versions <= 0: print("Won't delete all versions. Please do this manually") else: path_to_config_file = os.path.join(src, 'config.yaml') cfg = read(path_to_config_file, loader=yaml.load) aws_access_key_id = cfg.get('aws_access_key_id') aws_secret_access_key = cfg.get('aws_secret_access_key') client = get_client('lambda', aws_access_key_id, aws_secret_access_key, cfg.get('region')) response = client.list_versions_by_function( FunctionName=cfg.get("function_name") ) versions = response.get("Versions") if len(response.get("Versions")) < keep_last_versions: print("Nothing to delete. (Too few versions published)") else: version_numbers = [elem.get("Version") for elem in versions[1:-keep_last_versions]] for version_number in version_numbers: try: client.delete_function( FunctionName=cfg.get("function_name"), Qualifier=version_number ) except botocore.exceptions.ClientError as e: print("Skipping Version {}: {}" .format(version_number, e.message))
5,344,529
def get_pathway(page_name, end_pg, max_len, trail, paths): """ Finds a list of all paths from a starting wikipedia page to an end page Assumes page_name is a valid wikipedia article title and end_pg is a valid Wikipedia Page Object Args: page_name: (Str) The name of the current article end_pg: (Wikipedia Page) The page the path should end at max_len: (Int) The number of maximum steps between the start page and the end page trail: (List) The current path being searched Paths: (List) A set of all the paths between the starting page and the end page Returns nothing but appends a given list of paths """ trail.append(page_name) # add the current page to the current trail # Check if the page has the the end page as a link and # add it to thhe list of paths if h.has_end(page_name, end_pg): # if the page contains a link to the end page # add the end page to the trail, and add the trail to the paths list trail.append(end_pg.title) paths.append(trail) print(f"**Pathway {len(paths)}**: {h.plot_path(trail)}") return None # if the trail is above the maximum length return none elif max_len <= 1: print(f"Not a path: {trail}") return None else: # Check each of the links in the page # Continue branching looking for the end for link in h.get_links(page_name): if link not in trail: if h.is_page(link): get_pathway(link, end_pg, max_len - 1, trail[:], paths)
5,344,530
def message(level, sender, text): """Print a message using NEST's message system. Parameters ---------- level : Level sender : Message sender text : str Text to be sent in the message """ sps(level) sps(sender) sps(text) sr('message')
5,344,531
def clip_to_ndc(point_clip_space, name="clip_to_ndc"): """Transforms points from clip to normalized device coordinates (ndc). Note: In the following, A1 to An are optional batch dimensions. Args: point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents points in clip space. name: A name for this op. Defaults to "clip_to_ndc". Raises: ValueError: If `point_clip_space` is not of size 4 in its last dimension. Returns: A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in normalized device coordinates. """ with tf.name_scope(name): point_clip_space = tf.convert_to_tensor(value=point_clip_space) shape.check_static( tensor=point_clip_space, tensor_name="point_clip_space", has_dim_equals=(-1, 4)) w = point_clip_space[..., -1:] return point_clip_space[..., :3] / w
5,344,532
def augment_docs_with_tracking_info(docs, user): """Add attribute to each document with whether the document is tracked by the user or not.""" tracked = set() if user and user.is_authenticated: clist = CommunityList.objects.filter(user=user).first() if clist: tracked.update(docs_tracked_by_community_list(clist).filter(pk__in=docs).values_list("pk", flat=True)) for d in docs: d.tracked_in_personal_community_list = d.pk in tracked
5,344,533
def korrektur(wordfile, datei): """Patch aus korrigierten Einträgen""" if not datei: datei = 'korrektur.todo' teste_datei(datei) korrekturen = {} for line in open(datei, 'r'): if line.startswith('#'): continue # Dekodieren, Zeilenende entfernen line = line.decode('utf8').strip() if not line: continue # Eintrag ggf. komplettieren if u';' not in line: line = u'%s;%s' % (join_word(line), line) entry = WordEntry(line) key = entry[0] entry.regelaenderungen() # teste auf Dinge wie s-t/-st korrekturen[key] = entry wortliste = list(wordfile) wortliste_neu = [] # korrigierte Liste for entry in wortliste: key = entry[0] if key in korrekturen: entry = korrekturen.pop(key) wortliste_neu.append(entry) if korrekturen: print korrekturen # übrige Einträge return (wortliste, wortliste_neu)
5,344,534
def read_number(dtype, prompt='', floor=None, ceil=None, repeat=False): """ Reads a number within specified bounds. """ while True: try: result = dtype(input(prompt)) if floor is not None and result < floor: raise ValueError(f'Number must be no less than {floor}.') if ceil is not None and result > ceil: raise ValueError(f'Number must be no greater than {ceil}.') except ValueError as e: print(e) result = None if result is not None or not repeat: return result
5,344,535
def dem_adjust( da_elevtn: xr.DataArray, da_flwdir: xr.DataArray, da_rivmsk: Optional[xr.DataArray] = None, flwdir: Optional[pyflwdir.FlwdirRaster] = None, connectivity: int = 4, river_d8: bool = False, logger=logger, ) -> xr.DataArray: """Returns hydrologically conditioned elevation. The elevation is conditioned to D4 (`connectivity=4`) or D8 (`connectivity=8`) flow directions based on the algorithm described in Yamazaki et al. [1]_ The method assumes the original flow directions are in D8. Therefore, if `connectivity=4`, an intermediate D4 conditioned elevation raster is derived first, based on which new D4 flow directions are obtained used to condition the original elevation. Parameters ---------- da_elevtn, da_flwdir, da_rivmsk : xr.DataArray elevation [m+REF] D8 flow directions [-] binary river mask [-], optional flwdir : pyflwdir.FlwdirRaster, optional D8 flow direction raster object. If None it is derived on the fly from `da_flwdir`. connectivity: {4, 8} D4 or D8 flow connectivity. river_d8 : bool If True and `connectivity==4`, additionally condition river cells to D8. Requires `da_rivmsk`. Returns ------- xr.Dataset Dataset with hydrologically adjusted elevation ('elevtn') [m+REF] References ---------- .. [1] Yamazaki et al. (2012). Adjustment of a spaceborne DEM for use in floodplain hydrodynamic modeling. Journal of Hydrology, 436-437, 81–91. https://doi.org/10.1016/j.jhydrol.2012.02.045 See Also -------- pyflwdir.FlwdirRaster.dem_adjust pyflwdir.FlwdirRaster.dem_dig_d4 """ # get flow directions for entire domain and for rivers if flwdir is None: flwdir = flwdir_from_da(da_flwdir, mask=False) if connectivity == 4 and river_d8 and da_rivmsk is None: raise ValueError('Provide "da_rivmsk" in combination with "river_d8"') elevtn = da_elevtn.values nodata = da_elevtn.raster.nodata logger.info(f"Condition elevation to D{connectivity} flow directions.") # get D8 conditioned elevation elevtn = flwdir.dem_adjust(elevtn) # get D4 conditioned elevation (based on D8 conditioned!) if connectivity == 4: rivmsk = da_rivmsk.values == 1 if da_rivmsk is not None else None # derive D4 flow directions with forced pits at original locations d4 = pyflwdir.dem.fill_depressions( elevtn=flwdir.dem_dig_d4(elevtn, rivmsk=rivmsk, nodata=nodata), nodata=nodata, connectivity=connectivity, idxs_pit=flwdir.idxs_pit, )[1] # condition the DEM to the new D4 flow dirs flwdir_d4 = pyflwdir.from_array( d4, ftype="d8", transform=flwdir.transform, latlon=flwdir.latlon ) elevtn = flwdir_d4.dem_adjust(elevtn) # condition river cells to D8 if river_d8: flwdir_river = flwdir_from_da(da_flwdir, mask=rivmsk) elevtn = flwdir_river.dem_adjust(elevtn) # assert np.all((elv2 - flwdir_d4.downstream(elv2))>=0) # save to dataarray da_out = xr.DataArray( data=elevtn, coords=da_elevtn.raster.coords, dims=da_elevtn.raster.dims, ) da_out.raster.set_nodata(nodata) da_out.raster.set_crs(da_elevtn.raster.crs) return da_out
5,344,536
def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'): """ Train a message classification model """ from copy import copy from numpy import zeros, unique from itertools import product pred = zeros(len(obs)) fullpred = zeros((len(obs),len(unique(obs)))) model = copy(bestModel.model) #find the best model via tuning grid for tune in [dict(list(zip(modelgrid, v))) for v in product(*list(modelgrid.values()))]: for k in list(tune.keys()): setattr(model,k,tune[k]) i = 0 for tr, vl in cv: model.fit(trainSet.ix[tr].values,obs[tr]) pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1] fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values) i += 1 bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric) #re-train with all training data bestModel.model.fit(trainSet.values,obs) print(bestModel) return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}}
5,344,537
def vm_deploy(vm, force_stop=False): """ Internal API call used for finishing VM deploy; Actually cleaning the json and starting the VM. """ if force_stop: # VM is running without OS -> stop cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid) else: # VM is stopped and deployed -> start cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid) msg = 'Deploy server' lock = 'vmadm deploy ' + vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid } callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid}) return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
5,344,538
def get_cpuinfo(): """Returns the flags of the processor.""" if sys.platform == 'darwin': return platforms.osx.get_cpuinfo() if sys.platform == 'win32': return platforms.win.get_cpuinfo() if sys.platform == 'linux2': return platforms.linux.get_cpuinfo() return {}
5,344,539
def previous_analytics(request, package, id): """ Return a list of previous analytics for the given package. Only shows analytics which the user can access. Also limits to the last 100 of them! """ context = [] profile = request.user.get_profile() #TODO: this code block needs to go into a separate method # together with the cut-off logic in _appdetails_get_objects_fast() if profile.is_subscribed(): if (profile.get_subscription_plan_name() == 'Beaker'): # show everything cut_off = datetime.now() else: # show everything older than one week cut_off = datetime.now() - timedelta(days=7) else: # show everything older than one month cut_off = datetime.now() - timedelta(days=30) #TODO: this query can be very slow if there are # large number of previous analytics available for adv in Advisory.objects.filter( status=STATUS_LIVE, old__package=id, new__released_on__lte=cut_off, ).order_by( '-new__released_on', '-old__released_on' )[:100]: context.append( { 'name' : adv.__unicode__(), 'url' : adv.get_full_path(), } ) return render( request, 'previous_analytics.html', { 'context' : context } )
5,344,540
def get_monitor_value(image, monitor_key): """Return the monitor value from an image using an header key. :param fabio.fabioimage.FabioImage image: Image containing the header :param str monitor_key: Key containing the monitor :return: returns the monitor else returns 1.0 :rtype: float """ if monitor_key is None or monitor_key == "": return 1.0 try: monitor = header_utils.get_monitor_value(image, monitor_key) return monitor except header_utils.MonitorNotFound: logger.warning("Monitor %s not found. No normalization applied.", monitor_key) return 1.0 except Exception as e: logger.warning("Fail to load monitor. No normalization applied. %s", str(e)) return 1.0
5,344,541
def resources(request): """ Page for accessing RMG resources, including papers and presentations """ folder = os.path.join(settings.STATIC_ROOT, 'presentations') files = [] if os.path.isdir(folder): files = os.listdir(folder) toRemove = [] for f in files: if not os.path.isfile(os.path.join(folder, f)): # Remove any directories toRemove.append(f) elif f[0] == '.': # Remove any hidden files toRemove.append(f) for item in toRemove: files.remove(item) # Parse file names for information to display on webpage presentations = [] if files: files.sort() for f in files: name = os.path.splitext(f)[0] parts = name.split('_') date = parts[0] date = date[0:4] + '-' + date[4:6] + '-' + date[6:] title = ' '.join(parts[1:]) title = title.replace('+', ' and ') presentations.append((title, date, f)) return render(request, 'resources.html', {'presentations': presentations})
5,344,542
def plot_data(t, r, v, a, j, unit, legenda, ax): """Plot kinematics of minimum jerk trajectories.""" try: import matplotlib.pyplot as plt except ImportError: print('matplotlib is not available.') return if ax is None: _, ax = plt.subplots(1, 4, sharex=True, figsize=(10, 3)) ax[0].plot(t, r) ax[0].set_title(r'Displacement [$\mathrm{%s}$]'%unit) ax[0].legend(legenda, framealpha=.5, loc='best') ax[1].plot(t, v) ax[1].set_title(r'Velocity [$\mathrm{%s/s}$]'%unit) ax[2].plot(t, a) ax[2].set_title(r'Acceleration [$\mathrm{%s/s^2}$]'%unit) ax[3].plot(t, j) ax[3].set_title(r'Jerk [$\mathrm{%s/s^3}$]'%unit) for i, axi in enumerate(ax.flat): axi.set_xlabel(r'Time [$s$]') axi.xaxis.set_major_locator(plt.MaxNLocator(4)) axi.yaxis.set_major_locator(plt.MaxNLocator(4)) plt.tight_layout() plt.show()
5,344,543
def cmd(ctx, url, key, secret, whitelist=None, **kwargs): """Get assets using a saved query.""" client = ctx.obj.start_client(url=url, key=key, secret=secret) kwargs["report_software_whitelist"] = load_whitelist(whitelist) p_grp = ctx.parent.command.name apiobj = getattr(client, p_grp) with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror): apiobj.get_by_saved_query(**kwargs)
5,344,544
def _log_histograms(writer: tensorboard.SummaryWriter, model: models.NerfModel, state: model_utils.TrainState): """Log histograms to Tensorboard.""" step = int(state.optimizer.state.step) params = state.optimizer.target['model'] if 'appearance_encoder' in params: embeddings = params['appearance_encoder']['embed']['embedding'] writer.histogram('appearance_embedding', embeddings, step) if 'camera_encoder' in params: embeddings = params['camera_encoder']['embed']['embedding'] writer.histogram('camera_embedding', embeddings, step) if 'warp_field' in params and model.warp_metadata_encoder_type == 'glo': embeddings = params['warp_field']['metadata_encoder']['embed']['embedding'] writer.histogram('warp_embedding', embeddings, step)
5,344,545
def default_thread_index (value, threads): """ find index in threads array value :param value: :param threads: :return: """ value_index = threads.index(value) return value_index
5,344,546
def _create_sky_model(sky_file, ra, dec, stokes_i_flux): """Create an OSKAR sky model. Args: sky_file (string): filename path of the sky model being created. ra (float list): Right ascension, in degrees, of sources to put in the sky model. dec (float list): Declination, in degrees, of sources to put in the sky model. stokes_i_flux (float list): Stokes-I flux, in Jy, of sources to put in the sky model. """ if not os.path.isdir(os.path.dirname(sky_file)): os.makedirs(os.path.dirname(sky_file)) fh = open(sky_file, 'w') for ra_, dec_, I_ in zip(ra, dec, stokes_i_flux): fh.write('%.14f, %.14f, %.3f\n' % (ra_, dec_, I_)) fh.close()
5,344,547
def delay_fast_forward_until_set(event): """adds the given event to a set that will delay fast_forwards until they are set (does not need to be removed, as it's a weak ref)""" _virtual_time_state.acquire() try: _fast_forward_delay_events.add(event) finally: _virtual_time_state.release()
5,344,548
def new_things(url): """Attempts to register new things on the directory Takes 1 argument: url - URL containing thing descriptions to register """ response = requests.post('{}/things/register_url'.format(settings.THING_DIRECTORY_HOST), headers={ 'Authorization': settings.THING_DIRECTORY_KEY, }, json={'url':url}) response.raise_for_status() return response.json()['uuids']
5,344,549
def isID(value): """Checks if value looks like a Ulysses ID; i.e. is 22 char long. Not an exact science; but good enougth to prevent most mistakes. """ return len(value) == 22
5,344,550
def tool_on_path(tool: str) -> str: """ Helper function to determine if a given tool is on the user's PATH variable. Wraps around runspv.tool_on_path(). :param tool: the tool's filename to look for. :return: the path of the tool, else ToolNotOnPathError if the tool isn't on the PATH. """ return runspv.tool_on_path(tool)
5,344,551
def DefineDecode(i, n, invert=False): """ Decode the n-bit number i. @return: 1 if the n-bit input equals i """ class _Decode(Circuit): name = 'Decode_{}_{}'.format(i, n) IO = ['I', In(Bits[ n ]), 'O', Out(Bit)] @classmethod def definition(io): if n <= 8: j = 1 << i if invert: m = 1 << n mask = (1 << m) - 1 j = mask & (~j) decode = ROMN(j, n) else: nluts = (n + 3) // 4 data = nluts * [0] for j in range(nluts): data[j] = (i >> 4*j) & 0xf # 4-bit pieces decode = FlatHalfCascade(n, 4, data, ZERO, 1) wire(io.I, decode.I) wire(decode.O, io.O) return _Decode
5,344,552
def absent(name, database, **client_args): """ Ensure that given continuous query is absent. name Name of the continuous query to remove. database Name of the database that the continuous query was defined on. """ ret = { "name": name, "changes": {}, "result": True, "comment": "continuous query {0} is not present".format(name), } if __salt__["influxdb.continuous_query_exists"](database, name, **client_args): if __opts__["test"]: ret["result"] = None ret["comment"] = ( "continuous query {0} is present and needs to be removed" ).format(name) return ret if __salt__["influxdb.drop_continuous_query"](database, name, **client_args): ret["comment"] = "continuous query {0} has been removed".format(name) ret["changes"][name] = "Absent" return ret else: ret["comment"] = "Failed to remove continuous query {0}".format(name) ret["result"] = False return ret return ret
5,344,553
def save_graph(path, G, pos): """ Saves a networkx graph in a json file and its nodal layoput for plotting in npz to be loaded later with `nnc.helpers.graph_helpers.load_graph) method. :param path: The path to the file :param G: The `networkx.Graph` object :param pos: The position dictionary with format `node_id : [x, y]` denoting note index and layout coordinates. :return: None """ pos = {x: tuple(y.tolist()) for x, y in pos.items()} nx.set_node_attributes(G, values=pos, name='pos') graph_json = json_graph.node_link_data(G) graph_path = os.path.join(path, 'graph.json') os.makedirs(path, exist_ok=True) with open(graph_path, "w+") as f1: json.dump(graph_json, f1) pos_path = os.path.join(path, 'pos.npz') np.savez(pos_path, pos)
5,344,554
def get_ads(client, customer_id, new_ad_resource_names): """Retrieves a google.ads.google_ads.v4.types.AdGroupAd instance. Args: client: A google.ads.google_ads.client.GoogleAdsClient instanc e. customer_id: (str) Customer ID associated with the account. new_ad_resource_names: (str) Resource name associated with the Ad group. Returns: An instance of the google.ads.google_ads.v4.types.AdGroupAd message class of the newly created ad group ad. """ def formatter(given_string): """This helper function is used to assign ' ' to names of resources so that this formatted string can be used within an IN clause. Args: given_string: (str) The string to be formatted. """ results = [] for i in given_string: results.append(repr(i)) return ','.join(results) resouce_names = formatter(new_ad_resource_names) ga_service = client.get_service('GoogleAdsService', version='v4') query = ('SELECT ad_group_ad.ad.id, ' 'ad_group_ad.ad.expanded_text_ad.headline_part1, ' 'ad_group_ad.ad.expanded_text_ad.headline_part2, ' 'ad_group_ad.status, ad_group_ad.ad.final_urls, ' 'ad_group_ad.resource_name ' 'FROM ad_group_ad ' 'WHERE ad_group_ad.resource_name in ({}) '. format(resouce_names)) response = ga_service.search(customer_id, query, PAGE_SIZE) response =iter(response) ads = [] while response: try: current_row = next(response) ads.append(current_row.ad_group_ad) except StopIteration: break return ads
5,344,555
def corrfact_vapor_rosolem(h, h_ref=None, const=0.0054): """Correction factor for vapor correction from absolute humidity (g/m3). The equation was suggested by Rosolem et al. (2013). If no reference value for absolute humidity ``h_ref`` is provided, the average value will be used. Parameters ---------- h : float or array of floats Absolute humidity (g / m3) h_ref : float Reference value for absolute humidity const : float Empirical constant, defaults to 0.0054 Returns ------- output : float or array of floats Correction factor for water vapor effect (dimensionless) """ if h_ref is None: h_ref = np.mean(h) return 1 + const * (h - h_ref)
5,344,556
def sine_ease_out(p): """Modeled after quarter-cycle of sine wave (different phase)""" return sin(p * tau)
5,344,557
def _extract_codes_from_element_text(dataset, parent_el_xpath, condition=None): # pylint: disable=invalid-name """Extract codes for checking from a Dataset. The codes are being extracted from element text. Args: dataset (iati.data.Dataset): The Dataset to check Codelist values within. parent_el_xpath (str): An XPath to locate the element(s) with the attribute of interest. condition (str): An optional XPath expression to limit the scope of what is extracted. Returns: list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located. """ # include the condition if condition: parent_el_xpath = parent_el_xpath + '[' + condition + ']' parents_to_check = dataset.xml_tree.xpath(parent_el_xpath) located_codes = list() for parent in parents_to_check: located_codes.append((parent.text, parent.sourceline)) return located_codes
5,344,558
def test_gram_intercept_constrained_projection(): """Constrained projection should error""" X = jax.random.uniform(random.generate_key(), shape=(5, 10)) XTX = OnlineGram(10) XTX.update(X) with pytest.raises(ValueError): XTX.fit_intercept(projection=1, input_dim=2)
5,344,559
def rotate_im(img, angle, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, value=None): """Rotate the image. Rotate the image such that the rotated image is enclosed inside the tightest rectangle. The area not occupied by the pixels of the original image is colored black. Parameters ---------- image : numpy.ndarray numpy image angle : float angle by which the image is to be rotated Returns ------- numpy.ndarray Rotated Image """ # grab the dimensions of the image and then determine the # centre (h, w) = img.shape[:2] (cX, cY) = (w // 2, h // 2) # grab the rotation matrix (applying the negative of the # angle to rotate clockwise), then grab the sine and cosine # (i.e., the rotation components of the matrix) M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0) cos = np.abs(M[0, 0]) sin = np.abs(M[0, 1]) # compute the new bounding dimensions of the image nW = int((h * sin) + (w * cos)) nH = int((h * cos) + (w * sin)) # adjust the rotation matrix to take into account translation M[0, 2] += (nW / 2) - cX M[1, 2] += (nH / 2) - cY warp_fn = _maybe_process_in_chunks( cv2.warpAffine, M=M, dsize=(nW, nH), flags=interpolation, borderMode=border_mode, borderValue=value ) return warp_fn(img)
5,344,560
def list_modules(curdir=CURDIR, pattern=MOD_FILENAME_RE): """List names from {ok,ng}*.py. """ return sorted( m.name.replace('.py', '') for m in curdir.glob('*.py') if pattern.match(m.name) )
5,344,561
def save_model_h5py(model, save_path="model_save"): """saves a keras model to h5py file (also makes the 'model_save/' directory if it wasn't there before) :param model: model to save :param save_path: path where to save model, staring from the directory of the simulation (default "model_save")""" # directory if not os.path.exists(save_path): os.makedirs(save_path) # serialize model to JSON model_json = model.to_json() with open(save_path+"/model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights(save_path+"/model.h5")
5,344,562
def main(argv=None): """Process the job.""" argv = sys.argv[1:] if argv is None else argv cyclonedx_lint.main(argv)
5,344,563
def encode_set_validator_config_and_reconfigure_script( validator_account: AccountAddress, consensus_pubkey: bytes, validator_network_addresses: bytes, fullnode_network_addresses: bytes, ) -> Script: """# Summary Updates a validator's configuration, and triggers a reconfiguration of the system to update the validator set with this new validator configuration. Can only be successfully sent by a Validator Operator account that is already registered with a validator. # Technical Description This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to trigger a reconfiguration of the system. This reconfiguration will update the validator set on-chain with the updated `ValidatorConfig::ValidatorConfig`. # Parameters | Name | Type | Description | | ------ | ------ | ------------- | | `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. | | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | # Common Abort Conditions | Error Category | Error Reason | Description | | ---------------- | -------------- | ------------- | | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. | | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | # Related Scripts * `Script::create_validator_account` * `Script::create_validator_operator_account` * `Script::add_validator_and_reconfigure` * `Script::remove_validator_and_reconfigure` * `Script::set_validator_operator` * `Script::set_validator_operator_with_nonce_admin` * `Script::register_validator_config` """ return Script( code=SET_VALIDATOR_CONFIG_AND_RECONFIGURE_CODE, ty_args=[], args=[ TransactionArgument__Address(value=validator_account), TransactionArgument__U8Vector(value=consensus_pubkey), TransactionArgument__U8Vector(value=validator_network_addresses), TransactionArgument__U8Vector(value=fullnode_network_addresses), ], )
5,344,564
def _do_ecf_reference_data_import( import_method, widget, logwidget=None, specification_items=None, ecfdate=None, datecontrol=None, ): """Import a new ECF club file. widget - the manager object for the ecf data import tab """ ecffile = widget.datagrid.get_data_source().dbhome # The commented code fails if tkinter is compiled without --enable-threads # as in OpenBSD 5.7 i386 packages. The standard build from FreeBSD ports # until early 2015 at least, when this change was introduced, is compiled # with --enable-threads so the commented code worked. Not sure if the # change in compiler on FreeBSD from gcc to clang made a difference. The # Microsoft Windows' Pythons seem to be compiled with --enable-threads # because the commented code works in that environment. The situation on # OS X, and any GNU-Linux distribution, is not known. # Comparison with the validate_and_copy_ecf_ogd_players_post_2006_rules() # method in the sibling module sqlite3ecfogddataimport, which worked on # OpenBSD 5.7 as it stood, highlighted the changes needed. # ecfdate = widget.get_ecf_date() if not ecffile: return False if not ecfdate: return False results = widget.get_appsys().get_results_database() if not results: return False results.do_database_task( import_method, logwidget=logwidget, taskmethodargs=dict( ecffile=ecffile, ecfdate=ecfdate, parent=widget.get_widget(), # datecontrol=widget.ecfdatecontrol.get(), datecontrol=datecontrol, # See --enable-threads comment just above. ), use_specification_items=specification_items, ) return True
5,344,565
def ast_operators(node): """Return a set of all operators and calls in the given AST, or return an error if any are invalid.""" if isinstance(node, (ast.Name, ast.Constant)): return set() elif isinstance(node, ast.BinOp): return {type(node.op)} | ast_operators(node.left) | ast_operators(node.right) elif isinstance(node, ast.UnaryOp): return {type(node.op)} | ast_operators(node.operand) elif isinstance(node, ast.Call): if node.func.id not in METRIC_OPS: raise ValueError(f"Unknown fn `{node.func.id}` in metric equation.") # Make sure the number of args matches the fn signature fn_argspec = inspect.getfullargspec(METRIC_OPS[node.func.id]) if (not node.args or (fn_argspec.varargs is None and fn_argspec.varkw is None and len(node.args) != len(fn_argspec.args))): raise ValueError(f"Unexpected number of args to {node.func.id}") return {node.func.id}.union(*(ast_operators(arg) for arg in node.args)) else: raise TypeError(node)
5,344,566
def validate_k(data, k): """ Validates that the number of folds is valid. :param pd.DataFrame data :param int k :raises InvalidFoldError: if the number of folds is not supported by the data """ if k > min(data.shape): raise InvalidFoldError()
5,344,567
async def async_turn_on(hass, entity_id, speed=None): """Turn fan on.""" data = { key: value for key, value in [ (ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed), ] if value is not None } await hass.services.async_call( DOMAIN, SERVICE_TURN_ON, data, blocking=True)
5,344,568
def dump_json(containers): """ Function to output the autoprotocol json instructions """ for key in containers: with open("json\\" + containers[key].barcode + ".json", 'w') as json_file: json.dump(containers[key].p.as_dict(), json_file, indent=2)
5,344,569
def calibrate_clock(out, tolerance=0.002, dcor=False): """\ currently for F2xx only: recalculate the clock calibration values and write them to the flash. """ device = get_msp430_type() >> 8 variables = {} if device == 0xf2: # first read the segment form the device, so that only the calibration values # are updated. any other data in SegmentA is not changed. segment_a = memory.Memory() segment_a.append(memory.Segment(0x10c0, jtag._parjtag.memread(0x10c0, 64))) # get the settings for all the frequencies for frequency in calibvalues_memory_map: measured_frequency, dco, bcs1 = clock.setDCO( frequency * (1 - tolerance), frequency * (1 + tolerance), maxrsel=15, dcor=dcor ) variables['f%dMHz_dcoctl' % (frequency / 1e6)] = TYPE_8BIT, dco variables['f%dMHz_bcsctl1' % (frequency / 1e6)] = TYPE_8BIT, bcs1 out.write('BCS settings for %s: DCOCTL=0x%02x BCSCTL1=0x%02x\n' % ( nice_frequency(measured_frequency), dco, bcs1) ) segment_a.setMem(calibvalues_memory_map[frequency]['DCO'], chr(dco)) segment_a.setMem(calibvalues_memory_map[frequency]['BCS1'], chr(bcs1)) # erase segment and write new values jtag._parjtag.memerase(jtag.ERASE_SEGMENT, segment_a[0].startaddress) jtag._parjtag.memwrite(segment_a[0].startaddress, segment_a[0].data) else: raise NotImplementedError("--calibrate is not supported on %Xxx" % device) return variables
5,344,570
def print_dtypes(title: str, ds: pd.Series, show_numbers: bool = True) -> None: """ displays dataframe dtypes (pd.Series) to console output """ method = f"{inspect.currentframe().f_code.co_name}()" if isinstance(ds, pd.Series): count = 0 print(f"\n{method} {title}") for key, val in ds.iteritems(): if show_numbers: print(f"\t{str(val):16}\t{key:48}\tcol_{count:02}") else: print(f"\t{str(val):16}\t{key:48}") count += 1 else: print(f"{method} {FAILURE} invalid type {type(ds)}")
5,344,571
def emit_trinop(v1, v2, v3, v4, v5): """Emit trin op.""" for ii in range(0, flag_copies): sys.stdout.write(" %s%d = (%s%d - %s%d) ^ " "(%s%d + %s%d) ;\n" % (v1, ii, v2, ii, v3, ii, v4, ii, v5, ii))
5,344,572
def get_rejection_listings(username): """ Get Rejection Listings for a user Args: username (str): username for user """ activities = models.ListingActivity.objects.for_user(username).filter( action=models.ListingActivity.REJECTED) return activities
5,344,573
def quadratic_weighted_kappa(y_true, y_pred): """ QWK (Quadratic Weighted Kappa) Score Args: y_true: target array. y_pred: predict array. must be a discrete format. Returns: QWK score """ return cohen_kappa_score(y_true, y_pred, weights='quadratic')
5,344,574
def statistika(): """Posodobi podatke in preusmeri na statistika.html""" check_user_id() data_manager.load_data_from_file() data_manager.data_for_stats() return bottle.template("statistika.html", data_manager=data_manager)
5,344,575
def value_iter_run(test_episode_num=1_000_000): """ Run value iteration algorithm. """ model = ValueIteration(env) model.value_iteration() model.test_policy(episode_num=test_episode_num) model.save_fig(fig_path) model.save_result(log_path)
5,344,576
def decrement(x): """Given a number x, returns x - 1 unless that would be less than zero, in which case returns 0.""" x -= 1 if x < 0: return 0 else: return x
5,344,577
def simple_page(httpserver, browser, simple_page_content): """Serve simple html page.""" httpserver.serve_content( simple_page_content, code=200, headers={'Content-Type': 'text/html'}) browser.visit(httpserver.url)
5,344,578
def get_dagmaf(maf: msa.Maf) -> DAGMaf.DAGMaf: """Converts MAF to DagMaf. Args: maf: MAF to be converted. Returns: DagMaf built from the MAF. """ sorted_blocks = sort_mafblocks(maf.filecontent) dagmafnodes = [ DAGMaf.DAGMafNode(block_id=b.id, alignment=b.alignment, orient=b.orient, order=b.order(), out_edges=b.out_edges) for b in sorted_blocks ] return DAGMaf.DAGMaf(dagmafnodes)
5,344,579
def optdat10(area,lpdva,ndvab,nglb): """Fornece dados para a otimizacao""" # Tipo de funcao objetivo: tpobj==1 ---Peso # tpobj==2 ---Energia # tpobj==3 ---Máxima tensão # tpobj==4 ---Máximo deslocamento # tpobj = 1 # # Tipo de funcao restrição: tpres==1 ---Peso # tpres==2 ---Tensão # tpres==3 ---Tensão e deslocamento # tpres==4 ---Deslocamento # tpres==5 ---Energia tpres = 2 # # Entrar com os valores limites das variáveis de projeto # vlb---limite inferiores # vub---limite superiores # x0 --- valor inicial # xpdva = np.zeros(ndvab) for idvab in range(ndvab): iel = lpdva[idvab] xpdva[idvab] = area[iel] x0 = xpdva vlb = 0.1 * np.ones(ndvab) vlb = 0.1 * np.ones(ndvab) vub = 10 * np.ones(ndvab) # # Entrar com os valores limites das restrições # clb---limites inferiores # cub---limites superiores cones = np.ones(len(area)) # relacionado ao nº de elementos cones2 = np.ones(nglb) # relacionado ao nº de graus de liberdade clb1 = -250 * cones cub1 = 250 * cones # clb1 = -20*cones # cub1 = 20*cones # dlb1 = -0.4*cones2 # dub1 = 0.4*cones2 clbv = 1.5e+06 - 2.2204e-16 # 0 cubv = 1.5e+06 clbd = -1 * (10 ** -3) * cones2 cubd = 1 * (10 ** -3) * cones2 elbv = 2e-2 eubv = 2e-2 if tpres == 1: # VOLUME cub = cubv clb = clbv elif tpres == 2: # TENSOES clb = clb1 cub = cub1 elif tpres == 3: # TENSOES e DESLOCAMENTOS clb = [clb1, clbd] cub = [cub1, cubd] elif tpres == 4: # DESLOCAMENTOS clb = clbd cub = cubd else: # ENERGIA clb = elbv cub = eubv dadosoptdat10= [tpobj,tpres,vlb,vub,x0,clb,cub] return dadosoptdat10
5,344,580
def rotvec2quat(vec): """ A rotation vector is a 3 dimensional vector which is co-directional to the axis of rotation and whose norm gives the angle of rotation (in radians). Args: vec (list or np.ndarray): a rotational vector. Its norm represents the angle of rotation. Returns: np.ndarray: quaternion [x,y,z,w] (shape: :math:`[4,]`). """ r = R.from_rotvec(vec) return r.as_quat()
5,344,581
def print_properties(props): """Print a ResourceGroup properties instance.""" if props and props.provisioning_state: print("\tProperties:") print("\t\tProvisioning State: {}".format(props.provisioning_state)) print("\n\n")
5,344,582
def generate_pop(pop_size, length): """ 初始化种群 :param pop_size: 种群容量 :param length: 编码长度 :return bin_population: 二进制编码种群 """ decim_population = np.random.randint(0, 2**length-1, pop_size) print(decim_population) bin_population = [('{:0%sb}'%length).format(x) for x in decim_population] return bin_population
5,344,583
def identifyAltIsoformsProteinComp(probeset_gene_db,species,array_type,protein_domain_db,compare_all_features,data_type): """ This function is used by the module IdentifyAltIsoforms to run 'characterizeProteinLevelExonChanges'""" global protein_ft_db; protein_ft_db = protein_domain_db; protein_domain_db=[] exon_db={} ### Create a simplified version of the exon_db dictionary with probesets that map to a match and null protein for probeset in probeset_gene_db: gene, exon_id = probeset_gene_db[probeset] ep = ExonProteinAlignmentData(gene,probeset,exon_id,'',''); exon_db[probeset] = ep global protein_sequence_db if compare_all_features == 'yes': type = 'seqcomp' else: type = 'exoncomp' if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null': exon_protein_sequence_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+'SEQUENCE-protein-dbase_'+type+'.txt' else: exon_protein_sequence_file = 'AltDatabase/'+species+'/'+array_type+'/'+'SEQUENCE-protein-dbase_'+type+'.txt' probeset_protein_db,protein_sequence_db = importExonSequenceBuild(exon_protein_sequence_file,exon_db) exon_hits={} for probeset in probeset_protein_db: gene = probeset_protein_db[probeset].GeneID() exon_hits[gene,probeset]=[] include_sequences = 'no' ### Sequences for comparisons are unnecessary to store. List array-type as exon since AltMouse data has been re-organized, later get rid of AltMouse specific functionality in this function functional_attribute_db,protein_features = characterizeProteinLevelExonChanges(species,exon_hits,probeset_protein_db,'exon',include_sequences) if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null': export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-domain-annotations-'+type+'.txt' else: export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-'+type+'.txt' formatAttributeForExport(protein_features,export_file) if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null': export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-protein-annotations-'+type+'.txt' else: export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-annotations-'+type+'.txt' formatAttributeForExport(functional_attribute_db,export_file)
5,344,584
def prepare_argument_parser(): """ Set up the argument parser for the different commands. Return: Configured ArgumentParser object. """ argument_parser = argparse.ArgumentParser( description='Build source code libraries from modules.') argument_parser.add_argument( '-r', '--repository', metavar="REPO", dest='repositories', action='append', default=[], help="Repository file(s) which should be available for the current library. " "The loading of repository files from a VCS is only supported through " "the library configuration file.") argument_parser.add_argument( '-c', '--config', dest='config', default='project.xml', help="Project configuration file. " "Specifies the required repositories, modules and options " "(default: '%(default)s').") argument_parser.add_argument( '-C', '--cwd', dest='cwd', default=None, help="Current working directory (default: '.').") argument_parser.add_argument( '-p', '--path', dest='path', default=None, help="Path in which the library will be generated (default: CWD).") argument_parser.add_argument( '-D', '--option', metavar='OPTION', dest='options', action='append', type=str, default=[], help="Additional options. Options given here will be merged with options " "from the configuration file and will overwrite the configuration " "file definitions.") argument_parser.add_argument( '--collect', metavar='COLLECTOR', dest='collectors', action='append', type=str, default=[], help="Additional collectors. Values given here will be merged with collectors " "from the configuration file.") argument_parser.add_argument( '-v', '--verbose', action='count', default=0, dest='verbose') argument_parser.add_argument( "--plain", dest="plain", action="store_true", default=(not sys.stdout.isatty() or not sys.stderr.isatty()), help="Disable styled output, only output plain ASCII.") argument_parser.add_argument( '--version', action='version', version='%(prog)s {}'.format(__version__), help="Print the lbuild version number and exit.") subparsers = argument_parser.add_subparsers( title="Actions", dest="action") actions = [ DiscoverAction(), DiscoverOptionsAction(), SearchAction(), ValidateAction(), BuildAction(), CleanAction(), InitAction(), UpdateAction(), DependenciesAction(), ] for action in actions: action.register(subparsers) return argument_parser
5,344,585
def CountClusterSizes(clusterLabels): """ This function takes the labels produced by spectral clustering (or other clustering algorithm) and counts the members in each cluster. This is primarily to see the distribution of cluster sizes over all windows, particularly to see if there singleton clusters or a significant number of clusters with a small number of members. Parameters --------- clusterLabels: numpy array of int (clustered customers) - the cluster label of each customer Returns ------- clusterCounts: numpy array of int (0,k) - the number of customers in each cluster """ currentK = len(np.unique(clusterLabels)) clusterCounts = np.zeros((1,currentK),dtype=int) for clustCtr in range(0,currentK): indices = np.where(clusterLabels==clustCtr)[0] clusterCounts[0,clustCtr] = len(indices) return clusterCounts
5,344,586
def solver_problem1(digits_list): """input digits and return numbers that 1, 4, 7, 8 occurs""" cnt = 0 for digits in digits_list: for d in digits: if len(d) in [2, 3, 4, 7]: cnt += 1 return cnt
5,344,587
def spam_dotprods(rhoVecs, povms): """SPAM dot products (concatenates POVMS)""" nEVecs = sum(len(povm) for povm in povms) ret = _np.empty((len(rhoVecs), nEVecs), 'd') for i, rhoVec in enumerate(rhoVecs): j = 0 for povm in povms: for EVec in povm.values(): ret[i, j] = _np.vdot(EVec.todense(), rhoVec.todense()); j += 1 # todense() gives a 1D array, so no need to transpose EVec return ret
5,344,588
def is_just_monitoring_error(unique_message): """ Return True if the unique_message is an intentional error just for monitoring (meaning that it contains the one of the JUST_MONITORING_ERROR_MARKERS somewhere in the exc_text) """ if sys.version_info == 2: exc_text = unicode(unique_message.exc_text) message = unicode(unique_message.message) else: exc_text = str(unique_message.exc_text) message = str(unique_message.message) return any([(marker in exc_text or marker in message) for marker in setting('MONITORING_ERROR_MARKERS')])
5,344,589
def get_chi_atom_indices(): """Returns atom indices needed to compute chi angles for all residue types. Returns: A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are in the order specified in rc.restypes + unknown residue type at the end. For chi angles which are not defined on the residue, the positions indices are by default set to 0. """ chi_atom_indices = [] for residue_name in rc.restypes: residue_name = rc.restype_1to3[residue_name] residue_chi_angles = rc.chi_angles_atoms[residue_name] atom_indices = [] for chi_angle in residue_chi_angles: atom_indices.append([rc.atom_order[atom] for atom in chi_angle]) for _ in range(4 - len(atom_indices)): atom_indices.append( [0, 0, 0, 0] ) # For chi angles not defined on the AA. chi_atom_indices.append(atom_indices) chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue. return chi_atom_indices
5,344,590
def test_part1() -> None: """ Examples for Part 1. """ initial = State(0, "...........", ("BA", "CD", "BC", "DA"), 2) assert ( State.from_input( "\n".join( ( "#############", "#...........#", "###B#C#B#D###", " #A#D#C#A#", " #########", ) ) ) == initial ) one = State(40, "...B.......", ("BA", "CD", "C", "DA"), 2) assert initial.distance(0, 0) == 3 assert initial.distance(1, 9) == 6 assert initial.distance(1, 2) == -1 assert one.distance(1, 1) == -1 assert one.distance(2, 7) == 3 assert one in initial.valid_moves() assert not one.is_ready(0) assert not one.is_ready(1) assert one.is_ready(2) assert not one.is_ready(3) two = State(240, "...B.C.....", ("BA", "D", "C", "DA"), 2) assert two in one.valid_moves() three = State(440, "...B.......", ("BA", "D", "CC", "DA"), 2) assert three in two.valid_moves() ten = State(8513, ".....D...A.", ("A", "BB", "CC", "D"), 2) eleven = State(12513, ".........A.", ("A", "BB", "CC", "DD"), 2) assert eleven in ten.valid_moves() solution = State(12521, "...........", ("AA", "BB", "CC", "DD"), 2) assert solution in eleven.valid_moves() assert not initial.is_solved() assert not one.is_solved() assert solution.is_solved() assert initial.solve() == solution
5,344,591
def get_all_device_stats(): """Obtain and return statistics for all attached devices.""" devices = get_devices() stats = {} for serial in devices: model, device_stats = get_device_stats(serial) if not stats.get(model): stats[model] = {} stats[model][serial] = device_stats return stats
5,344,592
def geojson2shp(in_filename, out_filename, source_epsg, target_epsg, sigevent_url): """ Converts GeoJSON into Esri Shapefile. Arguments: in_filename -- the input GeoJSON out_filename -- the output Shapefile source_epsg -- the EPSG code of source file target_epsg -- the EPSG code of target file sigevent_url -- the URL for SigEvent """ if source_epsg == target_epsg: ogr2ogr_command_list = ['ogr2ogr', '-f', 'ESRI Shapefile', '-fieldTypeToString', 'Date,Time,DateTime', out_filename, in_filename] else: ogr2ogr_command_list = ['ogr2ogr', '-f', 'ESRI Shapefile', '-fieldTypeToString', 'Date,Time,DateTime', '-s_srs', source_epsg, '-t_srs', target_epsg, out_filename, in_filename] run_command(ogr2ogr_command_list, sigevent_url)
5,344,593
def plot_coefs(coefficients, nclasses): """ Plot the coefficients for each label coefficients: output from clf.coef_ nclasses: total number of possible classes """ scale = np.max(np.abs(coefficients)) p = plt.figure(figsize=(25, 5)) for i in range(nclasses): p = plt.subplot(1, nclasses, i + 1) p = plt.imshow(coefficients[i].reshape(28, 28), cmap=plt.cm.RdBu, vmin=-scale, vmax=scale) p = plt.axis('off') p = plt.title('Class %i' % i) return None
5,344,594
def timeleft(cli, nick, chan, rest): """Returns the time left until the next day/night transition.""" if (chan != nick and var.LAST_TIME and var.LAST_TIME + timedelta(seconds=var.TIME_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if chan != nick: var.LAST_TIME = datetime.now() if var.PHASE in var.TIMERS: t = var.TIMERS[var.PHASE] remaining = int((t[1] + t[2]) - time.time()) if var.PHASE == "day": what = "sunset" elif var.PHASE == "night": what = "sunrise" elif var.PHASE == "join": what = "game is canceled" msg = "There is \u0002{0[0]:0>2}:{0[1]:0>2}\u0002 remaining until {1}.".format(divmod(remaining, 60), what) else: msg = "{0} timers are currently disabled.".format(var.PHASE.capitalize()) if nick == chan: pm(cli, nick, msg) elif nick not in var.list_players() and var.PHASE not in ("none", "join"): cli.notice(nick, msg) else: cli.msg(chan, msg)
5,344,595
def shade_pixels(shader): """Set all pixels using a pixel shader style function :param pixels: A function which accepts the x and y positions of a pixel and returns values r, g and b For example, this would be synonymous to clear:: set_pixels(lambda x, y: return 0,0,0) Or perhaps we want to map red along the horizontal axis, and blue along the vertical:: set_pixels(lambda x, y: return (x/7.0) * 255, 0, (y/7.0) * 255) """ width, height = get_shape() for x in range(width): for y in range(height): r, g, b = shader(x, y) set_pixel(x, y, r, g, b)
5,344,596
def infect(): """Return a function that calls the infect endpoint on app.""" def inner(users, qs): app.debug = True with app.test_client() as client: headers = {'Content-Type': 'application/json'} data = json.dumps(users) rv = client.post('/infect?{0}'.format(qs), data=data, headers=headers) return json.loads(rv.data.decode()) return inner
5,344,597
def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['description'] = """This plot shows the number of days with a high temperature at or above a given threshold. You can optionally generate this plot for the year to date period. """ today = datetime.date.today() desc['arguments'] = [ dict(type='station', name='station', default='IA2203', label='Select Station:', network='IACLIMATE'), dict(type="year", name="year", default=today.year, label="Year to Compare:"), dict(type='select', options=PDICT, default='full', label='Day Period Limit:', name='limit'), ] return desc
5,344,598
def calc_cost_of_buying(count, price): """株を買うのに必要なコストと手数料を計算 """ subtotal = int(count * price) fee = calc_fee(subtotal) return subtotal + fee, fee
5,344,599