content
stringlengths
22
815k
id
int64
0
4.91M
def unknown(bot, update): """Message sent when an un unrecognized command is sent""" bot.send_message(chat_id=update.message.chat_id, text="No compriendo nu caz.")
5,323,700
def SMA(value, day): """ 返回简单移动平均序列。传入可以是列表或序列类型。传出是历史到当前周期为止的简单移动平均序列。 """ import talib # result = statistics.mean(value[-day:]) result = talib.SMA(value, day) return result
5,323,701
def get_filtered_ecs_service_names(ecs_client, ecs_cluster_name, name_prefix): """Retrives the service names for the given cluster, using an optional regex Keyword arguments: ecs_client -- Autoscaling boto3 client (if None, will create one) ecs_cluster_name -- the name of the cluster the service is in name_prefix -- the prefix to filter the names """ service_names = [] ecs_client_qualified = ( ecs_client if ecs_client is not None else get_client(service_name="ecs") ) service_arns = ecs_client_qualified.list_services(cluster=ecs_cluster_name)[ "serviceArns" ] batch_size = 10 for i in range(0, len(service_arns), batch_size): batch = service_arns[i : i + batch_size] services = ecs_client_qualified.describe_services( cluster=ecs_cluster_name, services=batch ) service_names_for_batch = [ service["serviceName"] for service in services["services"] ] for service_name in service_names_for_batch: if service_name.startswith(name_prefix): service_names.append(service_name) console_printer.print_info(f"Retrieved service name list as '{service_names}'") return service_names
5,323,702
def read_stdin(format='fasta'): """ Read stdin() and check whether a) line is a read or not b) if a read: convert to fasta/fastq """ last_written_read = "" for line in sys.stdin: la = line.strip().split("\t") if (len(la)) == 14 and last_written_read != la[0]: # yep, this is a read if format == "fasta": write_fasta_stdout(la) if format == "fastq": write_fastq_stdout(la) last_written_read = la[0] else: pass
5,323,703
def main( folder_toxic: str, folder_unintended: str, output: str, unintended_threshold: float, folder_ruddit: str, text_process: bool, preprocess_type: str ) -> None: """Tool to convert test and train dataset from https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data?select=train.csv.zip and https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data?select=all_data.csv to train dataset. """ print('Toxic Comment Classification Challenge') toxic_df = read_toxic_data(folder_toxic) print('Ruddit dataset') ruddit = read_ruddit(folder_ruddit) print('Jigsaw Unintended Bias in Toxicity Classification') if preprocess_type == 'dense': toxic_df = calculate_score(toxic_df) unintented_df = read_unintended_data_dense(folder_unintended) total = pd.concat([toxic_df, unintented_df, ruddit]) else: unintented_df = read_unintended_data_sparse(folder_unintended, unintended_threshold) total = pd.concat([toxic_df, unintented_df]) total = calculate_score(total) total = pd.concat([total, ruddit]) # total = unintented_df total.loc[total['offensiveness_score'] > 1, 'offensiveness_score'] = 1 print('Data preprocessing') tqdm.pandas() total['comment_text'] = total['comment_text'].progress_apply(process_text, full_process=text_process) num_duplicates = total.duplicated(subset='comment_text').sum() if num_duplicates > 0: print(f'Founded {num_duplicates} duplicated rows, will be deleted') total = total.drop_duplicates(subset='comment_text') print('Shape of united data after filtering:', total.shape) total.to_csv(output, index=False) print('Making sample dataset to check quality') sample = make_sample(total) sample.to_csv('sample.csv', index=False)
5,323,704
def remove_zero_pairs(xy): """Returns new xy-pair Numpy array where x=y=0 pairs have been removed Arguments: xy(numpy array): input array """ mask = np.where((xy[:, __X] != 0.0) & (xy[:, __Y] != 0.0))[0] return xy[mask, :]
5,323,705
def AddMEBTChopperPlatesAperturesToSNS_Lattice(accLattice,aprtNodes): """ Function will add two Aperture nodes at the entrance and exit of MEBT chopper plates. It returns the list of Aperture nodes. """ x_size = 0.060 y_size = 0.018 shape = 3 node_pos_dict = accLattice.getNodePositionsDict() node1 = accLattice.getNodesForName("MEBT:ChpPlt:Entr")[0] node2 = accLattice.getNodesForName("MEBT:ChpPlt:Exit")[0] for node in [node1,node2]: node_name = node.getName() (posBefore, posAfter) = node_pos_dict[node] apertureNode = LinacApertureNode(shape,x_size/2.0,y_size/2.0,posBefore) apertureNode.setName(node_name+":Aprt") apertureNode.setSequence(node.getSequence()) node.addChildNode(apertureNode,node.ENTRANCE) aprtNodes.append(apertureNode) aprtNodes = sorted(aprtNodes, key = lambda x: x.getPosition(), reverse = False) return aprtNodes
5,323,706
def build_parser(): """Build argument parser.""" parse = argparse.ArgumentParser(description=("Use this script to generate new APBS input " "files or split an existing parallel input " "file into multiple async files"), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parse.add_argument("--asynch", action="store_true", help="Perform an asynchronous parallel calculation.") parse.add_argument("--split", action="store_true", help=("Split an existing parallel input file to multiple " "async input files.")) parse.add_argument("--potdx", action="store_true", help=("Create an input to compute an electrostatic potential map.")) parse.add_argument("--method", help=("Force output file to write a specific APBS ELEC method."), choices=["para", "auto", "manual", "async"]) parse.add_argument("--cfac", type=float, default=psize.CFAC, help=("Factor by which to expand molecular dimensions to " "get coarse grid dimensions.")) parse.add_argument("--fadd", type=float, default=psize.FADD, help=("Amount to add to molecular dimensions to get fine " "grid dimensions.")) parse.add_argument("--space", type=float, default=psize.SPACE, help="Desired fine mesh resolution") parse.add_argument("--gmemfac", type=int, default=psize.GMEMFAC, help=("Number of bytes per grid point required for sequential " "MG calculation")) parse.add_argument("--gmemceil", type=int, default=psize.GMEMCEIL, help=("Max MB allowed for sequential MG calculation. Adjust " "this to force the script to perform faster calculations " "(which require more parallelism)")) parse.add_argument("--ofrac", type=float, default=psize.OFRAC, help="Overlap factor between mesh partitions (parallel)") parse.add_argument("--redfac", type=float, default=psize.REDFAC, help=("The maximum factor by which a domain dimension can " "be reduced during focusing")) parse.add_argument("--istrng", help="Ionic strength (M). Na+ anc Cl- ions will be used") parse.add_argument("filename") return parse
5,323,707
def find_adjective(sent): """Given a sentence, find the best candidate adjective.""" adj = None for w, p in sent.pos_tags: if p == 'JJ': # This is an adjective adj = w break return adj
5,323,708
def verify_signatures(item: Any, verify_types: bool = True) -> None: """Verifies the signature of all examples associated with the provided item. Provided item should be of type function, module, or module name. - *verify_types*: If `True` all examples will have have their types checked against their associated functions type annotations. """ raise NotImplementedError(f"Currently examples can not be attached to {type(item)}.")
5,323,709
def tensor_index_by_number(data, number): """Tensor getitem by a Number which may be integer/float/bool value""" number_type = const_utils.check_number_index_type(number) if number_type == const_utils.BOOL_: return tensor_index_by_bool(data, number) if number_type == const_utils.INT_: return tensor_index_by_integer(data, number) return const_utils.raise_index_error("Only support integers, slices(`:`), ellipsis(`...`), None and bool.")
5,323,710
def data_overview(human_file, disease_file): """ """ curated_df = pd.read_csv(disease_file, sep='\t') interactome_df = pd.read_csv(human_file, sep='\t') interactome_g = nx.from_pandas_edgelist(interactome_df, source='A', target='B') curated_g = interactome_g.subgraph(curated_df['geneId'].to_list()) list_con_comp = sorted(nx.connected_components(curated_g), key=len, reverse=True) lcc = curated_g.subgraph(list_con_comp[0]) print('Number of genes associated with the disease:', curated_df['geneId'].nunique()) print('Classes of the disease:', curated_df['diseaseClass'].unique()) print('Number of genes present in the interactome:', curated_g.number_of_nodes()) print('Largest connected component:', lcc.number_of_nodes()) nodes_in_g = set(interactome_df['A'].to_list() + interactome_df['B'].to_list()) seed_genes = set(curated_df['geneId'].to_list()) missing_gene = seed_genes.difference(nodes_in_g) print('Number of genes in the interactome:', len(nodes_in_g)) print('Missing gene:', missing_gene)
5,323,711
def _get_stop_as_datetime(event_json)->datetime: """Reads the stop timestamp of the event and returns it as a datetime object. Args: event_json (json): The event encapsulated as json. Returns datetime: Timestamp of the stop of the event. """ name = event_json['info']['name'] payload_stop = 'meta.raw_payload.' + name + '-stop' stop_timestamp_string = event_json['info'][payload_stop]['timestamp'] stop_date_string, stop_time_string = stop_timestamp_string.split('T') stop_time_string, _ = stop_time_string.split('.') date_and_time_string = stop_date_string + ' ' + stop_time_string return datetime.strptime(date_and_time_string, '%Y-%m-%d %H:%M:%S')
5,323,712
def handle_nssend(bot, ievent): """ arguments: <txt> - send string to the nickserv. """ if bot.jabber: return if not ievent.rest: ievent.missing('<txt>') return nsauth.sendstring(bot, ievent.rest) ievent.reply('send')
5,323,713
def fetch_incidents_command(): """ Fetches incidents from the ProofPoint API. """ integration_params = demisto.params() last_fetch = demisto.getLastRun().get('last_fetch', {}) last_fetched_id = demisto.getLastRun().get('last_fetched_incident_id', {}) fetch_delta = integration_params.get('fetch_delta', '6 hours') fetch_limit = integration_params.get('fetch_limit', '50') incidents_states = integration_params.get('states') for state in incidents_states: if not last_fetch.get(state): last_fetch[state] = FIRST_FETCH for state in incidents_states: if not last_fetched_id.get(state): last_fetched_id[state] = '0' incidents = [] for state in incidents_states: request_params = { 'created_after': last_fetch[state], 'last_fetched_id': last_fetched_id[state], 'fetch_delta': fetch_delta, 'state': state, 'fetch_limit': fetch_limit } id = last_fetched_id[state] incidents_list = get_incidents_batch_by_time_request(request_params) for incident in incidents_list: id = incident.get('id') inc = { 'name': 'ProofPoint_TRAP - ID {}'.format(id), 'rawJSON': json.dumps(incident), 'occurred': incident['created_at'] } incidents.append(inc) if incidents: last_fetch_time = incidents[-1]['occurred'] last_fetch[state] = \ (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split('.')[0] + 'Z' last_fetched_id[state] = id demisto.debug("End of current fetch function with last_fetch {} and last_fetched_id {}".format(str(last_fetch), str( last_fetched_id))) demisto.setLastRun({'last_fetch': last_fetch}) demisto.setLastRun({'last_fetched_incident_id': last_fetched_id}) demisto.info('extracted {} incidents'.format(len(incidents))) demisto.incidents(incidents)
5,323,714
def ascon_finalize(S, rate, a, key): """ Ascon finalization phase - internal helper function. S: Ascon state, a list of 5 64-bit integers rate: block size in bytes (8 for Ascon-128, Ascon-80pq; 16 for Ascon-128a) a: number of initialization/finalization rounds for permutation key: a bytes object of size 16 (for Ascon-128, Ascon-128a; 128-bit security) or 20 (for Ascon-80pq; 128-bit security) returns the tag, updates S """ assert(len(key) in [16,20]) S[rate//8+0] ^= bytes_to_int(key[0:8]) S[rate//8+1] ^= bytes_to_int(key[8:16]) S[rate//8+2] ^= bytes_to_int(key[16:]) ascon_permutation(S, a) S[3] ^= bytes_to_int(key[-16:-8]) S[4] ^= bytes_to_int(key[-8:]) tag = int_to_bytes(S[3], 8) + int_to_bytes(S[4], 8) if debug: printstate(S, "finalization:") return tag
5,323,715
def test_generate_end_connection(): """ Tests the generate_end_connection function of Road :return: Tests pass if end connection is generated properly. Fail if otherwise. """ start = Coordinates(90, 70) end = Coordinates(70, 70) length = 20 out_ln = 1 in_ln = 1 angle = math.pi / 2 speed_limit = 40 road = Road(start, end, length, out_ln, in_ln, angle, speed_limit, 'Test') assert (road.get_start_connection() is None) assert (road.get_end_connection() is None) generate_end_connection(road, 15, 25) assert (road.get_start_connection() is None) assert (road.get_end_connection() is not None) i = road.get_end_connection() assert i.get_radius() == 15 assert i.get_center().get_x() == 85 assert i.get_center().get_y() == 70
5,323,716
def _click_command( state: State, path: str, files: str, batch: int, runid_log: str = None, wait: bool = False, skip_existing: str = False, simulate: bool = False, ): """Ingest files into OSDU.""" return ingest(state, path, files, batch, runid_log, wait, skip_existing, simulate)
5,323,717
def datasheet_search_query(doctype, txt, searchfield, start, page_len, filters): """ :param doctype: :param txt: :param searchfield: :param start: :param page_len: :param filters: :return: """ db_name = frappe.conf.get("db_name") sql = f""" SELECT `m`.`name` , `m`.`title` FROM `{db_name}`.tabDC_Doc_Datasheet_Meta AS `m` WHERE (`m`.`name` LIKE %(search)s OR `m`.`title` LIKE %(search)s) ORDER BY `m`.`name` ASC, `m`.`title` ASC""" res = frappe.db.sql( sql + ';', { 'search': '%{}%'.format(txt) } ) return res
5,323,718
def client(): """Define client connection to server BaseManager Returns: BaseManager object """ port, auth = get_auth() mgr = BaseManager(address=('', port), authkey=auth) mgr.register('set_event') mgr.connect() return mgr
5,323,719
def test_SetView_ior(testcase, setview, other, exp_result): """ Test function for SetView: self |= other (= __ior__() / __or__() + assign) """ # Don't change the testcase data, but a copy setview_copy = SetView(setview) setview_copy_id = id(setview_copy) # The code to be tested setview_copy |= other # Ensure that exceptions raised in the remainder of this function # are not mistaken as expected exceptions assert testcase.exp_exc_types is None # Verify the left hand object is a new object. # In Python, augmented assignment (e.g. `+=` is not guaranteed to # modify the left hand object in place, but can result in a new object. # Reason is that not implementing __ior__() results in Python calling # __or__() and assignment. # For details, see # https://docs.python.org/3/reference/datamodel.html#object.__ior__. # For SetView, the `|=` operator results in a new SetView object on a new # underlying set object. assert id(setview_copy) != setview_copy_id assert_equal(setview_copy, exp_result)
5,323,720
def end_message(): """ Prints an end message to the log file. Parameters ---------- None Returns ------- None """ # get logger logger = logging.getLogger(sys._getframe().f_code.co_name) # pylint: disable=protected-access # dump logo logger.info( "Calculations finished.\n" "\n\n//// End of calculations ////\n" )
5,323,721
def set_remote_sense(is_remote=False): """Docstring""" built_packet = build_cmd(0x56, value=int(is_remote)) resp = send_recv_cmd(built_packet) return resp
5,323,722
def printImproperDihedral(dihedral, shift, molecule, alchemicalTransformation): """Generate improper dihedral line Parameters ---------- dihedral : Angle Object Angle Object shift : int Shift produced by structural dummy atoms molecule : molecule object Molecule object alchemicalTransformation : bool True if alchemical transformation Returns ------- dihedralLine : str Dihedral line data """ a = 180.0 V1 = (dihedral.V2*0.5)*kcalToKj V2 = 2.0 ftype = 4 line = '' atomAdihedral = molecule.atoms[dihedral.atomA.serialOriginal -1].serial-shift atomBdihedral = molecule.atoms[dihedral.atomB.serialOriginal -1].serial-shift atomCdihedral = molecule.atoms[dihedral.atomC.serialOriginal -1].serial-shift atomDdihedral = molecule.atoms[dihedral.atomD.serialOriginal -1].serial-shift if alchemicalTransformation: V1_B = (dihedral.V2_B*0.5)*kcalToKj line = '%5d%5d%5d%5d %d %6.3f %6.3f %6d %6.3f %6.3f %6d \n' % \ (atomAdihedral, atomBdihedral, atomCdihedral, atomDdihedral, ftype, a, V1, V2, a, V1_B, V2) else: line = '%5d%5d%5d%5d %d %6.3f %6.3f %6d \n' % \ (atomAdihedral, atomBdihedral, atomCdihedral, atomDdihedral, ftype, a, V1, V2) return line
5,323,723
def test_xibo_time_format_new_york(epoch_ms, expected_formatted_time, new_york_datetime_creator): """Test creating date/times in New York and formatting them for the Meetup API.""" a_datetime = new_york_datetime_creator.from_epoch_ms(epoch_ms) assert expected_formatted_time == xibo_time_format(a_datetime)
5,323,724
def check_null_records(field, table, cursor, record_count, report_width): """for for null records in the field <field> in the table <table>. Used by the script "check_stocking_data.py" Arguments: - `field`: - `table`: - `cursor`: - `record_count`: """ sql = "select stock_id from [{}] where [{}] is null or [{}]='';" cursor.execute(sql.format(table, field, field)) rs = cursor.fetchall() if len(rs): missing = [str(x[0]) for x in rs[:record_count]] msg = "Oh-oh! Found records where {} is null or empty(n={}) for example:\n\t" msg = msg.format(field, len(rs)) + ",\n\t".join(missing) else: msg = "Checking for records with null or empty {}".format(field) msg = "{msg:.<{width}}OK".format(width=report_width, msg=msg) print(msg)
5,323,725
def logDB2OwnedSemaphores(logFn="db2andsyssems.csv"): """ Retrieve the max semaphores in the system the semaphore count allocated by the db2instance user and fmp user and the rest of the apps them log a record in csv format into the logFn file. Row structure: ts,db2inst_semcnt,db2fmp_semcnt,sys_semcnt,max_sem """ db2instusr,db2fmpusr=get_db2_inst_fmp_user() ts = time.time() maxsems = getSystemWideMaxSemaphores() m = getDB2OwnedSemaphores(db2instusr,db2fmpusr) if os.path.exists(logFn): out = open(logFn, "a") else: out = open(logFn, "w") print >> out, "ts,db2inst_semcnt,db2fmp_semcnt,sys_semcnt,max_sem" print >> out, "%s,%s,%s,%s,%s" % ( ts, m[db2instusr], m[db2fmpusr], m['sys'], maxsems ) out.flush() out.close()
5,323,726
def initdb(drop): """Initialize the database.""" if drop: click.confirm('This operation will delete the database, do you want to continue?', abort=True) db.drop_all() click.echo('Drop tables.') db.create_all() click.echo('Initialized database.')
5,323,727
def mmd_est(x, y, c): """ Function for estimating the MMD between samples x and y using Gaussian RBF with scale c. Args: x (np.ndarray): (n_samples, n_dims) samples from first distribution. y (np.ndarray): (n_samples, n_dims) samples from second distribution. Returns: float: The mmd estimate.""" n_x = x.shape[0] n_y = y.shape[0] factor1 = 0. for i in range(n_x): for j in range(n_x): if (j == i): continue factor1 += _gauss_rbf(x[i:i+1], x[j:j+1], c) factor1 /= (n_x*(n_x-1)) factor2 = 0. for i in range(n_y): for j in range(n_y): if (j == i): continue factor2 += _gauss_rbf(y[i:i+1], y[j:j+1], c) factor2 /= (n_y*(n_y-1)) factor3 = 0. for i in range(n_x): for j in range(n_y): factor3 += _gauss_rbf(x[i:i+1], y[j:j+1], c) factor3 *= 2/(n_x*n_y) return factor1 + factor2 - factor3
5,323,728
def contents_append_notable_sequence_event_types(sequence, asset_sequence_id) -> Dict: """Appends a dictionary of filtered data to the base list for the context Args: sequence: sequence object asset_sequence_id: asset sequence ID Returns: A contents list with the relevant notable sequence event types """ content = { 'eventType': sequence.get('eventType'), 'displayName': sequence.get('displayName'), 'count': sequence.get('count'), 'sequenceId': asset_sequence_id } return content
5,323,729
def get_tx_in_db(session: Session, tx_sig: str) -> bool: """Checks if the transaction signature already exists for Challenge Disburements""" tx_sig_db_count = ( session.query(ChallengeDisbursement).filter( ChallengeDisbursement.signature == tx_sig ) ).count() exists = tx_sig_db_count > 0 return exists
5,323,730
def demo_version(): """Get version""" v = requests.get(end_point, headers=hdrs) LOG.info('Version: {0}\n'.format(v.text))
5,323,731
def saturation_correlate(Ch_L, L_L): """ Returns the correlate of *saturation* :math:`S_L`. Parameters ---------- Ch_L : numeric or array_like Correlate of *chroma* :math:`Ch_L`. L_L : numeric or array_like Correlate of *Lightness* :math:`L_L`. Returns ------- numeric or ndarray Correlate of *saturation* :math:`S_L`. Examples -------- >>> Ch_L = 0.008650662051714 >>> L_L = 37.368047493928195 >>> saturation_correlate(Ch_L, L_L) # doctest: +ELLIPSIS 0.0002314... """ Ch_L = as_float_array(Ch_L) L_L = as_float_array(L_L) S_L = Ch_L / L_L return S_L
5,323,732
def test_get_model_results__multiple_finds(ihme): """Expect an exception if no db and table are given and the mvid is found in multiple locations""" with pytest.raises(ValueError): model_version_id = 265844 db = None table = None _get_model_results(model_version_id, db, table)
5,323,733
def teardown(): """Called at end of any test @with_setup()""" for f in list(dakota_files.values()): if os.path.exists(f): os.remove(f)
5,323,734
async def test_async_setup_entry_hosts(opp, config_entry, config, soco): """Test static setup.""" await setup_platform(opp, config_entry, config) speakers = list(opp.data[DATA_SONOS].discovered.values()) speaker = speakers[0] assert speaker.soco == soco media_player = opp.states.get("media_player.zone_a") assert media_player.state == STATE_IDLE
5,323,735
def RHS(qmc_data): """ RHS(qmc_data) ------------- We solve A x = b with a Krylov method. This function extracts b from Sam's qmc_data structure by doing a transport sweep with zero scattering term. """ G = qmc_data.G Nx = qmc_data.Nx Nv = Nx*G zed = np.zeros((Nx,G)) bout = SI_Map(zed,qmc_data) return bout
5,323,736
def parseThesaurus(eInfo): """Return thesaurus object """ assert (isinstance(eInfo, pd.Series)) try: res = eInfo.apply(parseJsonDatum) except: # print "\nWarning: parseThesaurus(): blanks or non pd.Series" res = eInfo.apply(lambda x: "" if x is None else x) return res
5,323,737
def gen_pass(length=8, no_numerical=False, punctuation=False): """Generate a random password Parameters ---------- length : int The length of the password no_numerical : bool, optional If true the password will be generated without 0-9 punctuation : bool, optional If true the password will be generated with punctuation Returns ------- string The generated password """ characters = [string.ascii_letters] # Add user options to the character set if not no_numerical: characters.append(string.digits) if punctuation: characters.append(string.punctuation) # Shuffle the character set random.SystemRandom().shuffle(characters) chars_left = length - (len(characters) - 1) char_amounts = [] # Decide on number of characters per character set for char_set in characters: i = random.SystemRandom().randint(1, chars_left) char_amounts.append(i) chars_left -= i - 1 char_amounts[-1] += chars_left - 1 # Generate the password's characters password = '' for i, length in enumerate(char_amounts): password +=''.join(random.SystemRandom().choice(characters[i]) for _ in range(length)) # Shuffle the password password = list(password) random.SystemRandom().shuffle(password) password = ''.join(password) return password
5,323,738
def document_version_title(context): """Document version title""" return context.title
5,323,739
def get_validation_errors(schema, value, validate_invariants=True): """ Validate that *value* conforms to the schema interface *schema*. This includes checking for any schema validation errors (using `get_schema_validation_errors`). If that succeeds, and *validate_invariants* is true, then we proceed to check for any declared invariants. Note that this does not include a check to see if the *value* actually provides the given *schema*. :return: If there were any validation errors, either schema or invariant, return a two tuple (schema_error_dict, invariant_error_list). If there were no errors, returns a two-tuple where both members are empty. """ schema_error_dict = get_schema_validation_errors(schema, value) invariant_errors = [] # Only validate invariants if there were no previous errors. Previous # errors could be missing attributes which would most likely make an # invariant raise an AttributeError. if validate_invariants and not schema_error_dict: try: schema.validateInvariants(value, invariant_errors) except Invalid: # validateInvariants raises a wrapper error around # all the errors it got if it got errors, in addition # to appending them to the errors list. We don't want # that, we raise our own error. pass return (schema_error_dict, invariant_errors)
5,323,740
def kruskal_suboptimal_mst(graph): """ Computes the MST of a given graph using Kruskal's algorithm. Complexity: O(m*n) - it's dominated by determining if adding a new edge creates a cycle which is O(n). This implementation does not use union-find. This algorithm also works for directed graphs. Discovered in 1956 by Joseph Kruskal. Args: graph: object, data structure to hold the graph data. Returns: A Graph instance reperesenting the MST. """ mst_edges = [] edges = graph.get_edges() num_vertices = len(graph.get_vertices()) edges = graph.get_edges() edges.sort(key=lambda e: e[2]) # sort edges asc by length. index = 0 mst = Graph.build(edges=[], directed=False) while index < num_vertices: edge = edges[index] index += 1 # Make sure the picked edge does not create a cycle in the existing MST. [tail, head, __] = graph.split_edge(edge) explored = bfs(mst, tail) if head not in explored: mst.add_edge(edge) return mst
5,323,741
async def aenumerate(iterable: AsyncIterable[T], start: int = 0) -> AsyncIterator[Tuple[int, T]]: """enumerate for async iterators :param iterable: An async iterable :param start: The starting value """ __tracebackhide__ = True # for pytest i = start async for x in iterable: yield i, x i += 1
5,323,742
def set_url_for_recrawl(db, url): """Set url for recrawl later""" url_hash = urls.hash(url) result = db['Urls'].find_one_and_update({'_id': url_hash}, {'$set': {'queued': False, 'visited': False}}) return result is not None
5,323,743
def plotScores(data, palette, pdf): """ This function creates a PDF file with 3 scatter plots for the combinations of the 3 principal components. PC1 vs PC2, PC1 vs PC3, PC2 vs PC3. :Arguments: :type data: pandas.core.frame.DataFrame :param data: Data frame with the data to plot. :type outpath: string :param outpath: Path for the output file :type group: string :param group: Name of the column that contains the group information on the design file. :Return: :rtype PDF: file :retrn PDF: file with the 3 scatter plots for PC1 vs PC2, PC1 vs PC3, PC2 vs PC3. """ for x, y in list(itertools.combinations(data.columns.tolist(), 2)): # Creating a figure handler object fh = figureHandler(proj="2d", figsize=(14, 8)) # Creating title for the figure title = "{0} vs {1}".format(x, y) # Creating the scatterplot 2D scatter.scatter2D( ax=fh.ax[0], x=list(data[x]), y=list(data[y]), colorList=palette.design.colors.tolist(), ) # Despine axis fh.despine(fh.ax[0]) fh.makeLegend(ax=fh.ax[0], ucGroups=palette.ugColors, group=palette.combName) # Shinking the plot so everything fits fh.shrink() # Format Axis fh.formatAxis( figTitle=title, xTitle="Scores on {0}".format(x), yTitle="Scores on {0}".format(y), grid=False, ) # Adding figure to pdf fh.addToPdf(dpi=90, pdfPages=pdf)
5,323,744
def vertical(hfile): """Reads psipred output .ss2 file. @param hfile psipred .ss2 file @return secondary structure string. """ result = '' for l in hfile: if l.startswith('#'): continue if not l.strip(): continue l_arr = l.strip().split() result += l_arr[2] return result
5,323,745
def run(command, use_sudo=False, user='', group='', freturn=False, err_to_out=False, input=None, use_which=True, sumout='', sumerr='', status=0): """Dummy executing command on host via ssh or subprocess. If use_which is not False, original run command will be executed with 'which' command, and it returns will be used as new sumout, somerr, status if original is not exists. Args: command (str): command for executing use_sudo (bool): running with sudo prefix if True and current user not root, default is False user (str): username for sudo -u prefix group (str): group for sudo -g prefix freturn (bool): return tuple if True, else return str, default is False err_to_out (bool): redirect stderr to stdout if True, default is False input (str or tuple of str): str will be flushed to stdin after executed command, default is None use_which (bool): tries to strip command line and and run 'which' for each binary, default is True works only for unix sumout (str): fake string that contained all stdout messages, default is '' sumerr (str): fake string that contained all stderr, default is '' status (int): fake return code of command, default is 0 Return: str if freturn is False: string that contained all stdout messages tuple if freturn is True: string that contained all stdout messages string that contained all stderr int that mean return code of command """ logger = envs.connect.logger logger.debug('executing dry-run function') logger.debug('arguments for executing and another locals: %s', locals()) original_command = command command = command_patching_for_sudo(command, use_sudo, user, group) # logging write_message_to_log(command, 'dry-in: ') if use_which: # separate sudo modificator if original_command != command: st = command.find(original_command) command = command[:st] + '|' + command[st:] ncommand = '' command = re.split('\\&|\\||\\;', command) for part in command: ncommand += '{0} {1}; '.format( envs.common.which_binary, re.findall(r"[\w']+", part)[0] ) # import current run implementation try: run = envs.common.functions['run'] except KeyError: from operations import run if not (sumout and sumerr and status): sumout, sumerr, status = run(ncommand, freturn=True, err_to_out=err_to_out, force=True) else: run(ncommand, err_to_out=err_to_out, force=True) if freturn: logger.debug('return sumout %s, sumerr %s, status %s', sumout, sumerr, status) return (sumout, sumerr, status) logger.debug('return sumout %s', sumout) return sumout
5,323,746
def decide_play(lst): """ This function will return the boolean to control whether user should continue the game. ---------------------------------------------------------------------------- :param lst: (list) a list stores the input alphabet. :return: (bool) if the input character is alphabet and if only one character is in the string. """ if len(lst) == 4: for char in lst: if char.isalpha() and len(char) == 1: pass else: return False return True else: return False
5,323,747
def vocabfile_to_hashdict(vocabfile): """ A basic vocabulary hashing strategy just uses the line indices of each vocabulary word to generate sequential hashes. Thus, unique hashes are provided for each word in the vocabulary, and the hash is trivially reversable for easy re-translation. """ hash_dict = {} hash_index = 0 with open(vocabfile, "rb") as file: for line in file: line = line.decode('utf-8') line = line.strip().replace('\n', '') # to prevent bad encoding hash_dict[line] = hash_index hash_index += 1 return hash_dict
5,323,748
def lnprior(theta, ref_time, fit_qm=False, prior_params=prior_params_default): """ Function to compute the value of ln(prior) for a given set of parameters. We compute the prior using fixed definitions for the prior distributions of the parameters, allowing some optional parameters for some of them via the 'prior_params' dictionary. Except for T0 and P the priors on each of the parameters is taken to be independent of each other, and defined in the following way: - Omega_angle: uniform between 0-180 degrees - omega_angle: uniform between -180 - 180 degrees - i_angle: uniform between 0 - 90 degrees - a_axis: half-normal with optional loc/scale - ecc: uniform between 0 - 1 - period: uniform with optional loc/scale - T0: uniform between ref_time and period (this is to restrict the result to a single-valued parameter) - mu_delta, mu_alpha: normal with optional loc/scale - pi_p: halfnormal with optional loc/scale - Ddelta_ref, Dalpha_ref: normal with optional loc/scale - q_m (only considered if fit_qm=True): halfnormal with optional loc/scale INPUT: theta: array of parameters (ndim=12 or 13), contains the model parameters following the ordering defined by param_list_all (with or without q_m at the end) ref_time: reference time, used to define the prior on T0 [years] fit_qm: whether we are fitting for the mass ratio q_m or not prior_params: dictionary containing the optional parameters for some of the model parameters (should be defined as prior_params_default) OUTPUT: lprior: value of ln(prior) at this position in parameter space """ if fit_qm: Omega_angle, omega_angle, i_angle, a_axis, ecc, period, T0, mu_delta, \ mu_alpha, pi_p, Ddelta_ref, Dalpha_ref, q_m = theta else: Omega_angle, omega_angle, i_angle, a_axis, ecc, period, T0, mu_delta, \ mu_alpha, pi_p, Ddelta_ref, Dalpha_ref = theta lprior = 0 lprior += st.uniform.logpdf(Omega_angle, loc=0, scale=180) lprior += st.uniform.logpdf(omega_angle, loc=-180, scale=360) lprior += st.uniform.logpdf(i_angle, loc=0, scale=90) lprior += st.halfnorm.logpdf(a_axis, **prior_params['a_axis']) lprior += st.uniform.logpdf(ecc, loc=0, scale=1) lprior += st.uniform.logpdf(period, **prior_params['period']) lprior += st.uniform.logpdf(T0, loc=ref_time, scale=period) lprior += st.norm.logpdf(mu_delta, **prior_params['mu_delta']) lprior += st.norm.logpdf(mu_alpha, **prior_params['mu_alpha']) lprior += st.halfnorm.logpdf(pi_p, **prior_params['pi_p']) lprior += st.norm.logpdf(Ddelta_ref, **prior_params['Ddelta_ref']) lprior += st.norm.logpdf(Dalpha_ref, **prior_params['Dalpha_ref']) if fit_qm: lprior += st.halfnorm.logpdf(q_m, **prior_params['q_m']) return lprior
5,323,749
def doy_to_month(year, doy): """ Converts a three-digit string with the day of the year to a two-digit string representing the month. Takes into account leap years. :param year: four-digit year :param doy: three-digit day of the year :return: two-digit string month """ dt = datetime.datetime.strptime(f'{year} {doy}', '%Y %j') return dt.strftime('%m')
5,323,750
def process_shot(top, full_prefix): """ Given the top directory and full prefix, return essential info about the shot Parameters ---------- top: string directory place of the shot full_prefix: string shot description returns: tuple shot parameters """ shot_y, shot_z = names_helper.parse_shot( full_prefix ) radUnit, outerCup, innerCupSer, innerCupNum, coll = names_helper.parse_file_prefix( full_prefix ) tddose = get_3ddose(top, full_prefix) sh_x, sh_y, sh_z, dm = dmax_shot(tddose, shot_y, shot_z) bx, cx = dmax_curve_x(tddose, shot_y, shot_z) by, cy = dmax_curve_y(tddose, shot_y, shot_z) bz, cz = dmax_curve_z(tddose, shot_y, shot_z) # for k in range(0, len(cx)): # print(bx[k], bx[k+1], cx[k]) # print("=====================\n") # for k in range(0, len(cy)): # print(by[k], by[k+1], cy[k]) # print("=====================\n") # for k in range(0, len(cz)): # print(bz[k], bz[k+1], cz[k]) # print("=====================\n") xw25 = calc_window(bx, cx, 0.20*dm) xw50 = calc_window(bx, cx, 0.50*dm) xw75 = calc_window(bx, cx, 0.80*dm) yw25 = calc_window(by, cy, 0.20*dm) yw50 = calc_window(by, cy, 0.50*dm) yw75 = calc_window(by, cy, 0.80*dm) zw25 = calc_window(bz, cz, 0.20*dm) zw50 = calc_window(bz, cz, 0.50*dm) zw75 = calc_window(bz, cz, 0.80*dm) return (innerCupSer, innerCupNum, coll, shot_y, shot_z, dm, xw25[0], xw25[1], xw50[0], xw50[1], xw75[0], xw75[1], yw25[0], yw25[1], yw50[0], yw50[1], yw75[0], yw75[1], zw25[0], zw25[1], zw50[0], zw50[1], zw75[0], zw75[1])
5,323,751
def open(filename, debug=False): """This function opens an existing object pool, returning a :class:`PersistentObjectPool`. Raises RuntimeError if the file cannot be opened or mapped. :param filename: Filename must be an existing file containing an object pool as created by :func:`nvm.pmemlog.create`. The application must have permission to open the file and memory map it with read/write permissions. :return: a :class:`PersistentObjectPool` instance that manages the pool. When the pool is opened, if the previous shutdown was not clean the pool is cleaned up, including running the 'gc' method. """ log.debug('open: %s, debug=%s', filename, debug) # Make sure the file exists. return PersistentObjectPool(filename, flag='w', debug=debug)
5,323,752
def test_get_queryset(entry_comment_factory, entry_factory): """ The view should operate on the comments that belong to the journal entry specified in the URL. """ entry = entry_factory() entry_comment_factory(entry=entry) entry_comment_factory() view = views.EntryCommentListView() view.kwargs = {"pk": entry.pk} assert list(view.get_queryset()) == list(entry.comments.all())
5,323,753
def dplnckqn(spectral, temperature): """Temperature derivative of Planck function in wavenumber domain for photon rate. Args: | spectral (scalar, np.array (N,) or (N,1)): wavenumber vector in [cm^-1] | temperature (scalar, list[M], np.array (M,), (M,1) or (1,M)): Temperature in [K] Returns: | (scalar, np.array[N,M]): spectral radiant exitance in q/(s.m^2.cm^-1) Raises: | No exception is raised, returns None on error. """ xx=(pconst.c2n * spectral /temperature) f=xx*np.exp(xx)/(temperature*(np.exp(xx)-1)) y=pconst.c1qn * spectral **2 / (np.exp(pconst.c2n * spectral \ / temperature)-1) dplanckA = f*y; return dplanckA
5,323,754
def specificity(ground_true, predicted): """Computes the specificity. Args: ground_true ground_true (np.ndarray[bool]): ground true mask to be compared with predicted one. predicted predicted (np.ndarray[bool]): predicted mask. Should be the same dimension as `ground_true`. Returns: double: The specificity. """ N = np.prod(ground_true.shape) - np.sum(ground_true) TN = np.sum(np.logical_not(ground_true) * np.logical_not(predicted)) return N / TN
5,323,755
def _get_node_feature_mapper( node_feature_mapper_cls: Type[NodeFeatureMapper], current_state: FrozenSet[Proposition], problem: STRIPSProblem, ) -> NodeFeatureMapper: """ The node feature mappers need to be instantiated based on the current state and goal states. Hence, a separate one is needed for each state and planning problem Parameters ---------- current_state: the current state problem: the STRIPS problem Returns ------- NodeFeatureMapper """ if node_feature_mapper_cls == PropositionInStateAndGoal: # Create node feature mapper for current state and the goal return PropositionInStateAndGoal( current_state=current_state, goal_state=problem.goals ) else: raise RuntimeError( f"Unsupported node feature mapper {node_feature_mapper_cls}" )
5,323,756
def localize(pathToFolder, pathToDictionary, thresholdsAndLengths, support, confidence, allMalBehaviors, expId): """ Localize malicious segments by generating association rules on the maliciously classified files with the tool 'Apyori' :param pathToFolder: path to the folder containing the classification results :param pathToDictionary: path to the dictionary used to convert the method names of the traces into numerical values :param thresholdsAndLengths: list of the lengths to which the traces should be fixed with their regarding classification thresholds :param support: the minimum support value that the generated association rules have to fulfill :param confidence: the minimum confidence value that the generated association rules have to meet :param allMalBehaviors: list of the defined malicious behaviors that were inserted into the benign files :param expId: id of the current experiment in the database :type pathToFolder: string :type pathToDictionary: string :type thresholdsAndLengths: list :type support: float :type confidence: float :type allMalBehaviors: list :type expId: int """ resultingData = [] for i in range(len(thresholdsAndLengths)): threshold = thresholdsAndLengths[i][0] fixedLength = thresholdsAndLengths[i][1] # create a .tsv file from the input data, because this is needed for the 'Apyori' tool pathToTSVFile = createTSVFile(threshold, fixedLength, pathToFolder, pathToDictionary, len(allMalBehaviors)) # calculate the assocation rules with the 'Apyori' tool rules = calc(support, confidence, pathToTSVFile) # write rules to file and check whether the inserted malicious behaviors could be found foundMalBehaviors = writeRules(threshold, fixedLength, rules, pathToDictionary, pathToFolder, allMalBehaviors) resultingData.append([fixedLength, foundMalBehaviors]) writeResult(resultingData, pathToFolder, expId)
5,323,757
def curvInterp(curv,p1,p2,size): """ Args: curv: 2D ndarray N-by-2 matrix, N points p1,p2: list or ndarray length = 2, p1 left point, p2 right point size: int the size of new curv (number of points) """ if curv[0,0]>curv[-1,0]: print("\033[1;35mError occured. Feature points x coordinates will be exchanged\033[0m") curv[0,0],curv[-1,0] = curv[-1,0],curv[0,0] new_x = np.linspace(curv[0,0], curv[-1,0], size) new_y = np.interp(new_x, curv[:,0], curv[:,1]) new_x = (new_x-new_x[0])/(new_x[-1]-new_x[0])*(p2[0]-p1[0])+p1[0] new_y = (new_y-new_y[0])/(new_y[-1]-new_y[0])*(p2[1]-p1[1])+p1[1] return np.array(list(zip(new_x,new_y)))
5,323,758
def test_too_many_returns_in_fixture(absolute_path): """End-to-End test to check returns count.""" filename = absolute_path('fixtures', 'complexity', 'wrong_returns.py') process = subprocess.Popen( ['flake8', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, _ = process.communicate() assert stdout.count(b'WPS153') == 1
5,323,759
def create_key_pair(key_pair_name): """Create a new key pair with a provided name and story to a local file""" pem_outfile = open(f"{key_pair_name}.pem", "w") response = ec2.create_key_pair(KeyName=key_pair_name) key_pair = str(response.key_material) pem_outfile.write(key_pair) print(f"Create Key Pair: {response}") return response
5,323,760
async def 秒数3(ctx, zan,la,la2): """?秒数 戦闘時間 残HP ひとりめ ふたりめ =持越し秒数がでる フルタイム前提""" b = int(zan) - int(la2) c = 90 - (int(b) / int(la)) * 90 + 20 if c > 90: c = 90 b2 = int(zan) - int(la) c2 = 90 - (int(b2) / int(la2)) * 90 + 20 if c2 > 90: c2 = 90 await ctx.send(ctx.author.mention + '\nひとりめが後の場合\n' + str(math.ceil(c)) + '秒\nふたりめが後の場合\n' + str(math.ceil(c2)) + "秒")
5,323,761
def chunk_hash( data ): """ We need to hash data in a data stream chunk and store the hash in mongo. """ return hashlib.md5( data ).digest().encode('base64')
5,323,762
def jsonify(obj, builtin_types=(int, float, string_t), key=None, keyfilter=None, unknown_type_filter=None): """Transforms object making it suitable for json serialization""" from kombu.abstract import Object as KombuDictType _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, keyfilter=keyfilter, unknown_type_filter=unknown_type_filter) if isinstance(obj, KombuDictType): obj = obj.as_dict(recurse=True) if obj is None or isinstance(obj, builtin_types): return obj elif isinstance(obj, (tuple, list)): return [_jsonify(v) for v in obj] elif isinstance(obj, dict): return dict((k, _jsonify(v, key=k)) for k, v in items(obj) if (keyfilter(k) if keyfilter else 1)) elif isinstance(obj, datetime.datetime): # See "Date Time String Format" in the ECMA-262 specification. r = obj.isoformat() if obj.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, datetime.time): r = obj.isoformat() if obj.microsecond: r = r[:12] return r elif isinstance(obj, datetime.timedelta): return str(obj) else: if unknown_type_filter is None: raise ValueError( 'Unsupported type: {0!r} {1!r} (parent: {2})'.format( type(obj), obj, key)) return unknown_type_filter(obj)
5,323,763
def pack(code, *args): """Original struct.pack with the decorator applied. Will change the code according to the system's architecture. """ return struct.pack(code, *args)
5,323,764
def send_email( template, context, subject="", to=None, from_email=None, bcc=None, connection=None, attachments=None, headers=None, alternatives=None, cc=None, reply_to=None, ): """ Send an email given a template, context, and to email. """ html_content = render_to_string(template, context) text_content = html_to_text(html_content) send_to = to if isinstance(to, list) else [to] from_email = from_email if from_email is not None else email_settings.FROM_EMAIL msg = EmailMultiAlternatives( subject, text_content, from_email, send_to, bcc, connection, attachments, headers, alternatives, cc, reply_to, ) msg.attach_alternative(html_content, "text/html") msg.send(fail_silently=False)
5,323,765
def find_faces(image: Image) -> Iterable[CropData]: """ Get a list of the location of each face found in an image. """ detector = cv2.CascadeClassifier( str( MODELS_DIR / "haarcascades" / "haarcascade_frontalface_default.xml" ) ) grayscale_image: Image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return detector.detectMultiScale( grayscale_image, scaleFactor=1.3, minNeighbors=5 )
5,323,766
def getpath(): """ Generate filepath to the present file. :return: filepath to the present file. :rtype: str """ return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
5,323,767
async def test_websocket_transport(unused_tcp_port_factory): """ Test WebSocket Transport """ port = unused_tcp_port_factory() conn_queue = asyncio.Queue() transport = websocket.WebSocketInboundTransport(conn_queue, port=port) transport_task = create_task(transport.accept()) out_conn = await websocket.WebSocketOutboundConnection.open( serviceEndpoint='http://localhost:%d/' % port ) assert out_conn.is_duplex() in_conn = await conn_queue.get() assert in_conn.is_duplex() await out_conn.send(b'test') async for msg in in_conn.recv(): assert msg == b'test' break await in_conn.send(b'test2') async for msg in out_conn.recv(): assert msg == b'test2' break await asyncio.sleep(.05) await out_conn.close() await in_conn.close() with pytest.raises(ConnectionClosed): await out_conn.send(b'test') with pytest.raises(ConnectionClosed): await in_conn.send(b'test') transport_task.cancel() await asyncio.sleep(.25)
5,323,768
def connect_nodes(contents: List[str]) -> path.NODES: """Connect the nodes of the cave system by assigning input pairs. Args: contents (List[str]): the file contents Returns: path.NODES: a mapping of start to end in a path """ nodes: path.NODES = defaultdict(list) for line in contents: start, end = line.split('-') nodes[start].append(end) nodes[end].append(start) return nodes
5,323,769
def create_clients(KEY, SECRET): """ Creates the necessary recources and clients that will be used to create the redshift cluster :return: ec2, iam, redshift clients and resources """ ec2 = boto3.resource( 'ec2', region_name="us-west-2", aws_access_key_id=KEY, aws_secret_access_key=SECRET ) iam = boto3.client( 'iam', aws_access_key_id=KEY, aws_secret_access_key=SECRET, region_name='us-west-2' ) redshift = boto3.client( 'redshift', region_name="us-west-2", aws_access_key_id=KEY, aws_secret_access_key=SECRET ) user_feedback('Clients Received.') return ec2, iam, redshift
5,323,770
def create_app() -> FastAPI: """Create and do initial configuration of fastapi app""" db = Database() try: db.create_database() except Exception: # pylint: disable=broad-except sys.exit(1) app_ = FastAPI() # Add routers # app_.include_router(project_controller.router, prefix=API_VERSION_1) # app_.include_router(user_controller.router, prefix=API_VERSION_1) # app_.include_router(model_controller.router, prefix=API_VERSION_1) # app_.include_router(data_source_controller.router, prefix=API_VERSION_1) # Overide exception handlers # Override default class method return app_
5,323,771
def _md_fix(text): """ sanitize text data that is to be displayed in a markdown code block """ return text.replace("```", "``[`][markdown parse fix]")
5,323,772
def play_sequence(): """Play the entire sequence for a given round by animating the buttons""" #Change button label change_label("Playing!") #Without delay, all buttons will animate at the same time. The delay adds the 'time' variable to each .after() delay = 0 for value in game_sequence: if value == 1: root.after(delay, lambda:animate(white_button)) elif value == 2: root.after(delay, lambda:animate(magenta_button)) elif value == 3: root.after(delay, lambda:animate(cyan_button)) elif value == 4: root.after(delay, lambda:animate(yellow_button)) #Increment delay for next iteration of loop delay += time
5,323,773
def nameOrIdentifier(token): """ Determine if the given object is a name or an identifier, and return the textual value of that name or identifier. @rtype: L{str} """ if isinstance(token, Identifier): return token.get_name() elif token.ttype == Name: return token.value elif token.ttype == String.Single: return _destringify(token.value) elif token.ttype == Keyword: return token.value else: raise ViolatedExpectation("identifier or name", repr(token))
5,323,774
def exact_kinematic_aug_diff_f(t, y, args_tuple): """ """ _y, _, _ = y _params, _key, diff_f = args_tuple aug_diff_fn = lambda __y : diff_f(t, __y, (_params,)) _f, scales, translations = aug_diff_fn(_y) trace = jnp.sum(scales) return _f, trace, jnp.sum(scales**2) + jnp.sum(translations**2)
5,323,775
def inverse_hybrid_transform(value): """ Transform back from the IRAF-style hybrid log values. This takes the hybrid log value and transforms it back to the actual value. That value is returned. Unlike the hybrid_transform function, this works on single values not a numpy array. That is because one is not going to have a data array in the hybrid transformation form. Parameters ---------- value : A real number to be transformed back from the hybrid log scaling Returns ------- newvalue : The associated value that maps to the given hybrid log value. """ if value < 0.: workvalue = -1.0*value sign = -1.0 else: workvalue = value sign = +1.0 if workvalue < 1.0: newvalue = 10.*workvalue else: newvalue = 10.**workvalue newvalue = sign * newvalue return newvalue
5,323,776
def regress_trend_channel(arr): """ 通过arr计算拟合曲线及上下拟合通道曲线,返回三条拟合曲线,组成拟合通道 :param arr: numpy array :return: y_below, y_fit, y_above """ # 通过ABuRegUtil.regress_y计算拟合曲线和模型reg_mode,不使用缩放参数zoom reg_mode, y_fit = ABuRegUtil.regress_y(arr, zoom=False) reg_params = reg_mode.params x = np.arange(0, len(arr)) a = reg_params[0] b = reg_params[1] # 通过argmin寻找出原始序列和拟合序列差值的最小点,差值最小代表点位离拟合曲线远,eg: 100 - 80 < 100 - 90 min_ind = (arr.T - y_fit).argmin() # 根据a, b计算出below值, 注意这里是差,eg: below:100 - 80 = 20 below = x[min_ind] * b + a - arr[min_ind] # 计算x * b + a但- below,即拟合曲线保持相同的斜率整体下移below值 y_below = x * b + a - below # 通过argmax寻找出原始序列和拟合序列差值的最大点,差值最小代表点位离拟合曲线远,eg: 120 - 100 > 120 - 110 max_ind = (arr.T - y_fit).argmax() # 根据a, b计算出above值, 注意这里是差,eg: above 100 - 120 = -20, 即above是负数 above = x[max_ind] * b + a - arr[max_ind] # 计算x * b + a但整天- above,由于above是负数,即相加 即拟合曲线保持相同的斜率整体上移above值 y_above = x * b + a - above return y_below, y_fit, y_above
5,323,777
def test_graceful_exit_outside_notebook(): """Test if running interact outside of a notebook does fails gracefully.""" try: import bokeh tpf = KeplerTargetPixelFile(filename_tpf_one_center) result = tpf.interact() assert(result is None) except ImportError: # bokeh is an optional dependency pass
5,323,778
def assert_tap_mysql_row_count_equals(tap_mysql_query_runner_fn: callable, target_query_runner_fn: callable): """Count the rows in tap mysql and in a target database and compare row counts""" row_counts_in_tap_mysql = tap_mysql_query_runner_fn(""" SELECT tbl, row_count FROM ( SELECT 'address' AS tbl, COUNT(*) AS row_count FROM address UNION SELECT 'area_code' AS tbl, COUNT(*) AS row_count FROM area_code UNION SELECT 'order' AS tbl, COUNT(*) AS row_count FROM `order` UNION SELECT 'weight_unit' AS tbl, COUNT(*) AS row_count FROM weight_unit) x ORDER BY tbl, row_count """) row_counts_in_target_postgres = target_query_runner_fn(""" SELECT tbl, row_count FROM ( SELECT 'address' AS tbl, COUNT(*) AS row_count FROM mysql_grp24.address UNION SELECT 'area_code' AS tbl, COUNT(*) AS row_count FROM mysql_grp24.area_code UNION SELECT 'order' AS tbl, COUNT(*) AS row_count FROM mysql_grp24.order UNION SELECT 'weight_unit' AS tbl, COUNT(*) AS row_count FROM mysql_grp24.weight_unit) x ORDER BY tbl, row_count """) # Compare the results from source and target databases assert row_counts_in_target_postgres == row_counts_in_tap_mysql
5,323,779
def answers(provider): """Default answers data for copier""" answers = {} answers["class_name"] = "TemplateTestCharm" # Note "TestCharm" can't be used, that's the name of the deafult unit test class answers["charm_type"] = provider return answers
5,323,780
def test_register_boundary(): """capsys -- object created by pytest to capture stdout and stderr""" # testing boundary case 1 output = subprocess.run( ['python', '-m', 'qbay'], stdin=expected_in1, capture_output=True, ).stdout.decode() # remove all whitespace convert = re.sub(r'[\x00-\x1f]+', '', output) print('outputs', convert) assert convert.strip() == expected_succeeded_out.strip() # testing boundary case 2 output = subprocess.run( ['python', '-m', 'qbay'], stdin=expected_in2, capture_output=True, ).stdout.decode() # remove all whitespace convert = re.sub(r'[\x00-\x1f]+', '', output) print('outputs', convert) assert convert.strip() == expected_succeeded_out.strip() # testing boundary case 3 output = subprocess.run( ['python', '-m', 'qbay'], stdin=expected_in3, capture_output=True, ).stdout.decode() # remove all whitespace convert = re.sub(r'[\x00-\x1f]+', '', output) print('outputs', convert) assert convert.strip() == expected_succeeded_out.strip() # testing boundary case 4 output = subprocess.run( ['python', '-m', 'qbay'], stdin=expected_in4, capture_output=True, ).stdout.decode() # remove all whitespace convert = re.sub(r'[\x00-\x1f]+', '', output) print('outputs', convert) assert convert.strip() == expected_succeeded_out.strip() # testing boundary case 5 output = subprocess.run( ['python', '-m', 'qbay'], stdin=expected_in5, capture_output=True, ).stdout.decode() # remove all whitespace convert = re.sub(r'[\x00-\x1f]+', '', output) print('outputs', convert) assert convert.strip() == expected_succeeded_out.strip()
5,323,781
def pytest_configure(config): """add custom marker""" config.addinivalue_line('markers', 'test_number(number): test case number')
5,323,782
def encode(ds, is_implicit_vr, is_little_endian): """Encode a *pydicom* :class:`~pydicom.dataset.Dataset` `ds`. Parameters ---------- ds : pydicom.dataset.Dataset The dataset to encode is_implicit_vr : bool The element encoding scheme the dataset will be encoded with, ``True`` for implicit VR, ``False`` for explicit VR. is_little_endian : bool The byte ordering the dataset will be encoded in, ``True`` for little endian, ``False`` for big endian. Returns ------- bytes or None The encoded dataset as :class:`bytes` (if successful) or ``None`` if the encoding failed. """ # pylint: disable=broad-except fp = DicomBytesIO() fp.is_implicit_VR = is_implicit_vr fp.is_little_endian = is_little_endian try: write_dataset(fp, ds) except Exception as ex: LOGGER.error("pydicom.write_dataset() failed:") LOGGER.error(ex) fp.close() return None bytestring = fp.parent.getvalue() fp.close() return bytestring
5,323,783
def DetectGae(): """Determine whether or not we're running on GAE. This is based on: https://developers.google.com/appengine/docs/python/#The_Environment Returns: True iff we're running on GAE. """ server_software = os.environ.get('SERVER_SOFTWARE', '') return (server_software.startswith('Development/') or server_software.startswith('Google App Engine/'))
5,323,784
def set_static_assets(all_objects, log): """Save reloading the same thing over and over.""" new_objects = [] if len(all_objects) > 0: try: from PIL import Image, ImageDraw, ImageFont, ImageChops except ImportError: log.import_error('Pillow') for obj in all_objects: if obj._type == 'text': try: obj.font = ImageFont.truetype(obj.font, obj.size) except OSError: if obj.font == 'default': obj.font = ImageFont.load_default() else: log.error(f"Font '{obj.font}' not found.") if obj._type == 'image': source = Image.open(obj.src) source = source.convert('RGBA') source = source.rotate(obj.rotate, expand=True) source = ImageChops.multiply(source, Image.new('RGBA', source.size, (255, 255, 255, int(obj.opacity * 255)) ) ) obj.src = source new_objects.append(obj) return new_objects
5,323,785
def get_side_effects_from_sider(meddra_all_se_file): """ Get the most frequent side effects from SIDER """ pubchem_to_umls = {} umls_to_name = {} with open(meddra_all_se_file, 'r') as med_fd: for line in med_fd: fields = line.strip().split('\t') pubchem = str(int(fields[1].split('CID')[1])) concept_type = fields[3].upper() umls_id = fields[4].upper() umls_term = fields[5].lower() if concept_type == 'PT': pubchem_to_umls.setdefault(pubchem, set()).add(umls_id) umls_to_name[umls_id] = umls_term print('NUMBER OF PUBCHEM IDS ASSOCIATED WITH UMLS: {}'.format(len(pubchem_to_umls))) return pubchem_to_umls, umls_to_name
5,323,786
def beam_constraint_I_design_jac(samples): """ Jacobian with respect to the design variables Desired behavior is when constraint is less than 0 """ X,Y,E,R,w,t = samples L = 100 grad = np.empty((samples.shape[1],2)) grad[:,0] = (L*(12*t*X + 6*w*Y))/(R*t**2*w**3) grad[:,1] = (L*(6*t*X + 12*w*Y))/(R*t**3*w**2) return -grad
5,323,787
def get_env_var(name, default_value = None): """Get the value of an environment variable, if defined""" if name in os.environ: return os.environ[name] elif default_value is not None: return default_value else: raise RuntimeError('Required environment variable %s not found' % name)
5,323,788
def save_model(model: pl.LightningModule): """Saves the current model weight dir on disk :param model: model :type model: pl.LightningModule """ state_dict_dir = STATE_DICT_DIR if not os.path.isdir(state_dict_dir): pathlib.Path(state_dict_dir).mkdir(parents=True, exist_ok=True) state_dict_name = f"{model.__class__.__name__}_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.pt" state_dict_path = os.path.join(state_dict_dir, state_dict_name) print("Saving model at:", state_dict_path) torch.save(model.state_dict(), state_dict_path)
5,323,789
def check_usage_quota(vol_size_in_MB, tenant_uuid, datastore_url, privileges, vm_datastore_url): """ Check if the volume can be created without violating the quota. """ if privileges: error_msg, total_storage_used = get_total_storage_used(tenant_uuid, datastore_url, vm_datastore_url) if error_msg: # cannot get the total_storage_used, to be safe, return False return False usage_quota = privileges[auth_data_const.COL_USAGE_QUOTA] logging.debug("total_storage_used=%d, usage_quota=%d", total_storage_used, usage_quota) # if usage_quota which read from DB is 0, which means # no usage_quota, function should return True if usage_quota == 0: return True return vol_size_in_MB + total_storage_used <= usage_quota else: # no privileges return True
5,323,790
def _css_to_rect(css): """ Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object :param css: plain tuple representation of the rect in (top, right, bottom, left) order :return: a dlib `rect` object """ return dlib.rectangle(css[2], css[1], css[0], css[3])
5,323,791
def parse_arguments(): """ Use arparse to parse the input arguments and return it as a argparse.ArgumentParser. """ ap = standard_parser() add_annotations_arguments(ap) add_task_arguments(ap) return ap.parse_args()
5,323,792
def get_alert_contacts(alert_contact_name: Optional[str] = None, email: Optional[str] = None, ids: Optional[Sequence[str]] = None, name_regex: Optional[str] = None, output_file: Optional[str] = None, phone_num: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAlertContactsResult: """ This data source provides the Arms Alert Contacts of the current Alibaba Cloud user. > **NOTE:** Available in v1.129.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud ids = alicloud.arms.get_alert_contacts() pulumi.export("armsAlertContactId1", ids.contacts[0].id) name_regex = alicloud.arms.get_alert_contacts(name_regex="^my-AlertContact") pulumi.export("armsAlertContactId2", name_regex.contacts[0].id) ``` :param str alert_contact_name: The name of the alert contact. :param str email: The email address of the alert contact. :param Sequence[str] ids: A list of Alert Contact IDs. :param str name_regex: A regex string to filter results by Alert Contact name. :param str phone_num: The mobile number of the alert contact. """ __args__ = dict() __args__['alertContactName'] = alert_contact_name __args__['email'] = email __args__['ids'] = ids __args__['nameRegex'] = name_regex __args__['outputFile'] = output_file __args__['phoneNum'] = phone_num if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:arms/getAlertContacts:getAlertContacts', __args__, opts=opts, typ=GetAlertContactsResult).value return AwaitableGetAlertContactsResult( alert_contact_name=__ret__.alert_contact_name, contacts=__ret__.contacts, email=__ret__.email, id=__ret__.id, ids=__ret__.ids, name_regex=__ret__.name_regex, names=__ret__.names, output_file=__ret__.output_file, phone_num=__ret__.phone_num)
5,323,793
def read_response(rfile, request_method, body_size_limit, include_body=True): """ Return an (httpversion, code, msg, headers, content) tuple. By default, both response header and body are read. If include_body=False is specified, content may be one of the following: - None, if the response is technically allowed to have a response body - "", if the response must not have a response body (e.g. it's a response to a HEAD request) """ line = rfile.readline() # Possible leftover from previous message if line == b"\r\n" or line == b"\n": line = rfile.readline() if not line: raise HttpErrorConnClosed(502, "Server disconnect.") parts = parse_response_line(line) if not parts: raise HttpError(502, "Invalid server response: %s" % repr(line)) proto, code, msg = parts httpversion = parse_http_protocol(proto) if httpversion is None: raise HttpError(502, "Invalid HTTP version in line: %s" % repr(proto)) headers = read_headers(rfile) if headers is None: raise HttpError(502, "Invalid headers.") if include_body: content = read_http_body( rfile, headers, body_size_limit, request_method, code, False ) else: # if include_body==False then a None content means the body should be # read separately content = None return Response(httpversion, code, msg, headers, content)
5,323,794
def epi_approx_tiramisu(image_shape: tuple, num_classes: int, class_weights=None, initial_filters: int=48, growth_rate: int=16, layer_sizes: list=[4, 5, 7, 10, 12], bottleneck_size: int=15, dropout: float=0.2, learning_rate: float=1e-3, momentum: float=0.75, ): """ Build a Tiramisu model that estimates Epistemic uncertainty. Args: image_shape: the image shape to create the model for num_classes: the number of classes to segment for (e.g. c) class_weights: the weights for each class initial_filters: the number of filters in the first convolution layer growth_rate: the growth rate to use for the network (e.g. k) layer_sizes: a list with the size of each dense down-sample block. reversed to determine the size of the up-sample blocks bottleneck_size: the number of convolutional layers in the bottleneck dropout: the dropout rate to use in dropout layers learning_rate: the learning rate for the RMSprop optimizer momentum: the momentum for the exponential moving average Returns: a compiled model of the Tiramisu architecture + Epistemic approximation """ # build the base of the network inputs, logits = build_tiramisu(image_shape, num_classes, initial_filters=initial_filters, growth_rate=growth_rate, layer_sizes=layer_sizes, bottleneck_size=bottleneck_size, dropout=dropout, ) # pass the logits through the Softmax activation to get probabilities softmax = Activation('softmax')(logits) # build the Tiramisu model tiramisu = Model(inputs=[inputs], outputs=[softmax], name='tiramisu') # the inputs for the Monte Carlo model inputs = Input(image_shape) # pass the values through the Tiramisu network tiramisu_out = tiramisu(inputs) # create an exponential moving average of softmax to estimate a # Monte Carlo simulation and provide epistemic uncertainty mean = MovingAverage(momentum=momentum)(tiramisu_out) # calculate the epistemic uncertainty as the entropy of the means entropy = Entropy(name='entropy')(mean) # build the epistemic uncertainty model model = Model(inputs=[inputs], outputs=[tiramisu_out, entropy]) # compile the model model.compile( optimizer=RMSprop(lr=learning_rate), loss={'tiramisu': build_categorical_crossentropy(class_weights)}, metrics={'tiramisu': [build_categorical_accuracy(weights=class_weights)]}, ) return model
5,323,795
def matrix_modinv(matrix, m): """Return inverse of the matrix modulo m""" matrix_det = int(round(linalg.det(matrix))) return modinv(abs(matrix_det), m)*linalg.inv(matrix)*matrix_det*sign(matrix_det)
5,323,796
def watch( ctx, settings_file: click.Path, output_dir: Optional[click.Path], clean: bool ): """Watch input_dir and rebuild when changes are detected.""" ctx.ensure_object(dict) ctx.obj = populate_context(settings_file, output_dir) site = Site.from_settings_file(ctx.obj["settings_file"], ctx.obj["output_dir"]) dispatcher = MudiDispatcher(site) if clean: site.clean() site.build() dispatcher.watch()
5,323,797
def _check_java_jar_classes(sources, classes_dir): """Check if all the classes are generated into classes_dir completely. """ sources = sorted([os.path.basename(s) for s in sources]) sources = [s for s in sources if s[0].isupper()] classes = ['%s.class' % s[:-5] for s in sources] if not classes: return generated_classes = [] paths = set() retry = 0 while retry < 3: for dir, subdirs, files in os.walk(classes_dir): for f in files: if f.endswith('.class'): f = os.path.relpath(os.path.join(dir, f), classes_dir) if f not in paths: paths.add(f) name = os.path.basename(f) if '$' not in name: generated_classes.append(name) generated_classes.sort() i, j = 0, 0 while j != len(generated_classes): if classes[i] == generated_classes[j]: i += 1 if i == len(classes): return j += 1 time.sleep(0.5) retry += 1 console.debug('Classes: %s Generated classes: %s' % (classes, generated_classes)) console.error_exit('Missing class files in %s' % classes_dir)
5,323,798
def hexStringToRGB(hex): """ Converts hex color string to RGB values :param hex: color string in format: #rrggbb or rrggbb with 8-bit values in hexadecimal system :return: tuple containing RGB color values (from 0.0 to 1.0 each) """ temp = hex length = len(hex) if temp[0] == "#": temp = hex[1:length] if not len(temp) == 6: return None colorArr = bytearray.fromhex(temp) return colorArr[0], colorArr[1], colorArr[2]
5,323,799