content
stringlengths
22
815k
id
int64
0
4.91M
def calc_c_o(row): """ C or O excess if (C/O>1): excess = log10 [(YC/YH) - (YO/YH)] + 12 if C/O<1: excess = log10 [(YO/YH) - (YC/YH)] + 12 where YC = X(C12)/12 + X(C13)/13 YO = X(O16)/16 + X(O17)/17 + X(O18)/18 YH = XH/1.00794 """ yh = row['H'] / 1.00794 yc = row['C12'] / 12. + row['C13'] / 13. yo = row['O16'] / 16. + row['O17'] / 17. + row['O18'] / 18. if row['CO'] > 1: excess = np.log10((yc / yh) - (yo / yh)) + 12. else: excess = np.log10((yo / yh) - (yc / yh)) + 12. return excess
5,338,300
def trip_duration_stats(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() #display total travel time total_travel_time = df["Trip Duration"].sum() print("the total travel time in hours:", total_travel_time/3600) #display mean travel time mean_travel_time = df["Trip Duration"].mean() print("the mean travel time in hours:", mean_travel_time/3600) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40)
5,338,301
def create_job_from_file(job_file): """Creates a job from a JSON job specification. :param job_file: Path to job file. :type job_file: str :returns: Job object of specified type. """ logger.info("Creating Job from {}.".format(job_file)) with open(job_file) as f: params = json.loads(f.read()) try: if not params['type'] in job_types: raise utils.JobDescriptionValueError('Job type {} is not valid.'.format(params['type'])) except KeyError as e: raise utils.JobDescriptionKeyError(e.message) params['job_file'] = job_file return job_types[params['type']](params)
5,338,302
def test_user_config_json_error_conflict(): """Test that @arg.user_config_json causes CommandLineTool.run() to exit cleanly with return value 1 if both user_config (-c) and --user-config-json parameters are given """ class T(CommandLineTool): """Test class""" @arg.user_config @arg.user_config_json() @arg() def t(self): """t""" error_conflict_arg = ["t", "-c", "userconfkey=val", "--user-config-json", '{"foo":"bar"}'] test_class = T("avn") ret = test_class.run(args=error_conflict_arg) assert ret == 1
5,338,303
def compile_insert_unless_conflict( stmt: irast.InsertStmt, typ: s_objtypes.ObjectType, *, ctx: context.ContextLevel, ) -> irast.OnConflictClause: """Compile an UNLESS CONFLICT clause with no ON This requires synthesizing a conditional based on all the exclusive constraints on the object. """ pointers = _get_exclusive_ptr_constraints(typ, ctx=ctx) obj_constrs = typ.get_constraints(ctx.env.schema).objects(ctx.env.schema) select_ir, always_check, _ = compile_conflict_select( stmt, typ, constrs=pointers, obj_constrs=obj_constrs, parser_context=stmt.context, ctx=ctx) return irast.OnConflictClause( constraint=None, select_ir=select_ir, always_check=always_check, else_ir=None)
5,338,304
def plot_to_image(figure): """ Converts the matplotlib plot specified by "figure" to a PNG image and returns it. The supplied figure is closed and inaccessible after this call. """ # Save the plot to a PNG in memory buf = io.BytesIO() figure.savefig(buf, format="png") buf.seek(0) # Convert PNG buffer to TF image trans = transforms.ToTensor() image = buf.getvalue() image = Image.open(io.BytesIO(image)) image = trans(image) return image
5,338,305
def test_skasch() -> None: """ Run `python -m pytest ./day-19/part-2/skasch.py` to test the submission. """ assert ( SkaschSubmission().run( """ --- scanner 0 --- 404,-588,-901 528,-643,409 -838,591,734 390,-675,-793 -537,-823,-458 -485,-357,347 -345,-311,381 -661,-816,-575 -876,649,763 -618,-824,-621 553,345,-567 474,580,667 -447,-329,318 -584,868,-557 544,-627,-890 564,392,-477 455,729,728 -892,524,684 -689,845,-530 423,-701,434 7,-33,-71 630,319,-379 443,580,662 -789,900,-551 459,-707,401 --- scanner 1 --- 686,422,578 605,423,415 515,917,-361 -336,658,858 95,138,22 -476,619,847 -340,-569,-846 567,-361,727 -460,603,-452 669,-402,600 729,430,532 -500,-761,534 -322,571,750 -466,-666,-811 -429,-592,574 -355,545,-477 703,-491,-529 -328,-685,520 413,935,-424 -391,539,-444 586,-435,557 -364,-763,-893 807,-499,-711 755,-354,-619 553,889,-390 --- scanner 2 --- 649,640,665 682,-795,504 -784,533,-524 -644,584,-595 -588,-843,648 -30,6,44 -674,560,763 500,723,-460 609,671,-379 -555,-800,653 -675,-892,-343 697,-426,-610 578,704,681 493,664,-388 -671,-858,530 -667,343,800 571,-461,-707 -138,-166,112 -889,563,-600 646,-828,498 640,759,510 -630,509,768 -681,-892,-333 673,-379,-804 -742,-814,-386 577,-820,562 --- scanner 3 --- -589,542,597 605,-692,669 -500,565,-823 -660,373,557 -458,-679,-417 -488,449,543 -626,468,-788 338,-750,-386 528,-832,-391 562,-778,733 -938,-730,414 543,643,-506 -524,371,-870 407,773,750 -104,29,83 378,-903,-323 -778,-728,485 426,699,580 -438,-605,-362 -469,-447,-387 509,732,623 647,635,-688 -868,-804,481 614,-800,639 595,780,-596 --- scanner 4 --- 727,592,562 -293,-554,779 441,611,-461 -714,465,-776 -743,427,-804 -660,-479,-426 832,-632,460 927,-485,-438 408,393,-506 466,436,-512 110,16,151 -258,-428,682 -393,719,612 -211,-452,876 808,-476,-593 -575,615,604 -485,667,467 -680,325,-822 -627,-443,-432 872,-547,-609 833,512,582 807,604,487 839,-516,451 891,-625,532 -652,-548,-490 30,-46,-14 """.strip() ) == 3621 )
5,338,306
def sent2vec(model, words): """文本转换成向量 Arguments: model {[type]} -- Doc2Vec 模型 words {[type]} -- 分词后的文本 Returns: [type] -- 向量数组 """ vect_list = [] for w in words: try: vect_list.append(model.wv[w]) except: continue vect_list = np.array(vect_list) vect = vect_list.sum(axis=0) return vect / np.sqrt((vect ** 2).sum())
5,338,307
def postXML(server: HikVisionServer, path, xmldata=None): """ This returns the response of the DVR to the following POST request Parameters: server (HikvisionServer): The basic info about the DVR path (str): The ISAPI path that will be executed xmldata (str): This should be formatted using `utils.dict2xml` This is the data that will be transmitted to the server. It is optional. """ headers = {'Content-Type': 'application/xml'} responseRaw = requests.post( server.address() + path, data=xmldata, headers=headers, auth=HTTPDigestAuth(server.user, server.password)) if responseRaw.status_code == 401: raise Exception("Wrong username or password") responseXML = responseRaw.text return responseXML
5,338,308
def get_government_trading(gov_type: str, ticker: str = "") -> pd.DataFrame: """Returns the most recent transactions by members of government Parameters ---------- gov_type: str Type of government data between: 'congress', 'senate', 'house', 'contracts', 'quarter-contracts' and 'corporate-lobbying' ticker : str Ticker to get congress trading data from Returns ------- pd.DataFrame Most recent transactions by members of U.S. Congress """ if gov_type == "congress": if ticker: url = ( f"https://api.quiverquant.com/beta/historical/congresstrading/{ticker}" ) else: url = "https://api.quiverquant.com/beta/live/congresstrading" elif gov_type.lower() == "senate": if ticker: url = f"https://api.quiverquant.com/beta/historical/senatetrading/{ticker}" else: url = "https://api.quiverquant.com/beta/live/senatetrading" elif gov_type.lower() == "house": if ticker: url = f"https://api.quiverquant.com/beta/historical/housetrading/{ticker}" else: url = "https://api.quiverquant.com/beta/live/housetrading" elif gov_type.lower() == "contracts": if ticker: url = ( f"https://api.quiverquant.com/beta/historical/govcontractsall/{ticker}" ) else: url = "https://api.quiverquant.com/beta/live/govcontractsall" elif gov_type.lower() == "quarter-contracts": if ticker: url = f"https://api.quiverquant.com/beta/historical/govcontracts/{ticker}" else: url = "https://api.quiverquant.com/beta/live/govcontracts" elif gov_type.lower() == "corporate-lobbying": if ticker: url = f"https://api.quiverquant.com/beta/historical/lobbying/{ticker}" else: url = "https://api.quiverquant.com/beta/live/lobbying" else: return pd.DataFrame() headers = { "accept": "application/json", "X-CSRFToken": "TyTJwjuEC7VV7mOqZ622haRaaUr0x0Ng4nrwSRFKQs7vdoBcJlK9qjAS69ghzhFu", # pragma: allowlist secret "Authorization": f"Token {API_QUIVERQUANT_KEY}", } response = requests.get(url, headers=headers) if response.status_code == 200: if gov_type in ["congress", "senate", "house"]: return pd.DataFrame(response.json()).rename( columns={"Date": "TransactionDate", "Senator": "Representative"} ) return pd.DataFrame(response.json()) return pd.DataFrame()
5,338,309
def print_bytes(data): """ Prints a given string as an array of unsigned bytes :param data: :return: """ raw_arr = struct.unpack('<%dB' % len(data), data) print(raw_arr)
5,338,310
def pf_mobility(phi, gamma): """ Phase field mobility function. """ # return gamma * (phi**2-1.)**2 # func = 1.-phi**2 # return 0.75 * gamma * 0.5 * (1. + df.sign(func)) * func return gamma
5,338,311
def print_df_stats(df: pd.DataFrame, df_train: pd.DataFrame, df_val: pd.DataFrame, df_test: pd.DataFrame, label_encoder, prediction): """ Print some statistics of the splitted dataset. """ try: labels = list(label_encoder.classes_) except AttributeError: labels = [] headers = ["Images"] for label in labels: headers.append("-> " + str(label)) def get_stats(df): lenghts = [len(df)] for label in range(len(labels)): df_label = df[df[DF_DICT[prediction]] == label] lenghts.append( str(len(df_label)) + " (" + str(round((len(df_label) / len(df)), 2)) + ")" ) return lenghts stats = [] stats.append(["All"] + get_stats(df)) stats.append(["Train"] + get_stats(df_train)) stats.append(["Val"] + get_stats(df_val)) stats.append(["Test"] + get_stats(df_test)) print(tabulate(stats, headers=headers)) print()
5,338,312
def hivtrace(id, input, reference, ambiguities, threshold, min_overlap, compare_to_lanl, fraction, strip_drams_flag=False, filter_edges="no", handle_contaminants="remove", skip_alignment=False, save_intermediate=True, prior=None ): """ PHASE 1) Pad sequence alignment to HXB2 length with bealign PHASE 2) Convert resulting bam file back to FASTA format PHASE 2b) Rename any duplicates in FASTA file PHASE 3) Strip DRAMs if requested PHASE 3b) Filtering contaminants before TN93 run if requested PHASE 4) TN93 analysis on the supplied FASTA file alone PHASE 5) Run hivclustercsv to return clustering information in JSON format PHASE 5b) Attribute annotations to results from (4) PHASE 6) Run tn93 against LANL if user elects to PHASE 6b) Concatenate results from pre-run LANL tn93, user tn93, and (5) analyses PHASE 6c) Flag any potential HXB2 sequences PHASE 7) Run hivclustercsv to return clustering information in json format """ results_json = {} # Declare reference file resource_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'rsrc') # These should be defined in the user's environment env_dir = os.path.dirname(sys.executable) PYTHON = sys.executable # Try python's system executable first, then the user's path. if (os.path.isfile(os.path.join(env_dir, 'bealign'))): BEALIGN = os.path.join(env_dir, 'bealign') else: BEALIGN = 'bealign' if (os.path.isfile(os.path.join(env_dir, 'bam2msa'))): BAM2MSA = os.path.join(env_dir, 'bam2msa') else: BAM2MSA = 'bam2msa' if (os.path.isfile(os.path.join(env_dir, 'hivnetworkcsv'))): HIVNETWORKCSV = os.path.join(env_dir, 'hivnetworkcsv') else: HIVNETWORKCSV = 'hivnetworkcsv' TN93DIST = 'tn93' # This will have to be another parameter LANL_FASTA = os.path.join(resource_dir, 'LANL.FASTA') LANL_TN93OUTPUT_CSV = os.path.join(resource_dir, 'LANL.TN93OUTPUT.csv') DEFAULT_DELIMITER = '|' # Check if LANL files exists. If not, then check if zip file exists, # otherwise throw error try: if not os.path.isfile(LANL_FASTA): lanl_zip = os.path.join(resource_dir, 'LANL.FASTA.gz') gunzip_file(lanl_zip, LANL_FASTA) if not os.path.isfile(LANL_TN93OUTPUT_CSV): lanl_tn93output_zip = os.path.join(resource_dir, 'LANL.TN93OUTPUT.csv.gz') gunzip_file(lanl_tn93output_zip, LANL_TN93OUTPUT_CSV) except e: # pragma: no cover print("Oops, missing a resource file") raise # Python Parameters SCORE_MATRIX = 'HIV_BETWEEN_F' OUTPUT_FORMAT = 'csv' SEQUENCE_ID_FORMAT = 'plain' # Intermediate filenames tmp_path = tempfile.mkdtemp(prefix='hivtrace-') basename = os.path.basename(input) BAM_FN = os.path.join(tmp_path, basename + '_output.bam') # Check if save output_fasta_fn OUTPUT_FASTA_FN = os.path.join(tmp_path, basename + '_output.fasta') if save_intermediate: OUTPUT_FASTA_FN = input + '_output.fasta' OUTPUT_TN93_FN = os.path.join(tmp_path, basename + '_user.tn93output.csv') OUTPUT_TN93_CONTAM_FN = os.path.join(tmp_path, basename + '_contam.tn93output.csv') DEST_TN93_FN = OUTPUT_TN93_FN if save_intermediate: DEST_TN93_FN = input + '_user.tn93output.csv' JSON_TN93_FN = os.path.join(tmp_path, basename + '_user.tn93output.json') JSON_TN93_CONTAM_FN = os.path.join(tmp_path, basename + '_contam.tn93output.json') OUTPUT_COMBINED_SEQUENCE_FILE = os.path.join( tmp_path, basename + "_combined_user_lanl.fasta") OUTPUT_CLUSTER_JSON = os.path.join(tmp_path, basename + '_user.trace.json') LANL_OUTPUT_CLUSTER_JSON = os.path.join(tmp_path, basename + '_lanl_user.trace.json') OUTPUT_USERTOLANL_TN93_FN = os.path.join( tmp_path, basename + '_usertolanl.tn93output.csv') USER_LANL_TN93OUTPUT = os.path.join(tmp_path, basename + '_userlanl.tn93output.csv') USER_FILTER_LIST = os.path.join(tmp_path, basename + '_user_filter.csv') CONTAMINANT_ID_LIST = os.path.join(tmp_path, basename + '_contaminants.csv') # File handler for output we don't care about DEVNULL = open(os.devnull, 'w') EXCLUSION_LIST = None # Check for incompatible statements if skip_alignment and compare_to_lanl: raise Exception( "You have passed arguments that are incompatible! You cannot compare to the public database if you elect to submit a pre-made alignment! Please consider the issue before trying again." ) if skip_alignment: # Check for equal length in all sequences seqs = fasta_iter(input) seq_length = len(seqs.__next__()[1]) if (any(len(seq[1]) != seq_length for seq in seqs)): raise Exception("Not all input sequences have the same length!") # copy input file to output fasta file shutil.copyfile(input, OUTPUT_FASTA_FN) else: # PHASE 1 update_status(id, phases.ALIGNING, status.RUNNING) if handle_contaminants is None: handle_contaminants = 'no' bealign_process = [ BEALIGN, '-q', '-r', reference, '-m', SCORE_MATRIX, '-R', input, BAM_FN ] if handle_contaminants != 'no': bealign_process.insert(-3, '-K') logging.debug(' '.join(bealign_process)) subprocess.check_call(bealign_process, stdout=DEVNULL) update_status(id, phases.ALIGNING, status.COMPLETED) # PHASE 2 update_status(id, phases.BAM_FASTA_CONVERSION, status.RUNNING) bam_process = [BAM2MSA, BAM_FN, OUTPUT_FASTA_FN] logging.debug(' '.join(bam_process)) subprocess.check_call(bam_process, stdout=DEVNULL) update_status(id, phases.BAM_FASTA_CONVERSION, status.COMPLETED) if handle_contaminants != 'no' and handle_contaminants != 'separately': with (open(OUTPUT_FASTA_FN, 'r')) as msa: reference_name = next(SeqIO.parse(msa, 'fasta')).id logging.debug('Reference name set to %s' % reference_name) with open(CONTAMINANT_ID_LIST, 'w') as contaminants: print(reference_name, file=contaminants) # Ensure unique ids # Warn of duplicates by annotating with an attribute rename_duplicates(OUTPUT_FASTA_FN, DEFAULT_DELIMITER) attribute_map = ('SOURCE', 'SUBTYPE', 'COUNTRY', 'ACCESSION_NUMBER', 'YEAR_OF_SAMPLING') # PHASE 3 # Strip DRAMS if strip_drams_flag: OUTPUT_FASTA_FN_TMP = OUTPUT_FASTA_FN + ".spool" with open(str(OUTPUT_FASTA_FN_TMP), 'w') as output_file: for (seq_id, data) in sd.strip_drams(OUTPUT_FASTA_FN, strip_drams_flag): print(">%s\n%s" % (seq_id, data), file=output_file) shutil.move(OUTPUT_FASTA_FN_TMP, OUTPUT_FASTA_FN) # PHASE 3b Filter contaminants if handle_contaminants == 'separately': update_status(id, phases.FILTER_CONTAMINANTS, status.RUNNING) with open(JSON_TN93_CONTAM_FN, 'w') as tn93_contam_fh: tn93_contam_process = [ TN93DIST, '-q', '-o', OUTPUT_TN93_CONTAM_FN, '-t', '0.015', '-a', 'resolve', '-l', min_overlap, '-g', '1.0', '-s', reference, '-f', OUTPUT_FORMAT, OUTPUT_FASTA_FN ] logging.debug(' '.join(tn93_contam_process)) subprocess.check_call( tn93_contam_process, stdout=tn93_contam_fh, stderr=tn93_contam_fh) # shutil.copyfile(OUTPUT_TN93_FN, DEST_TN93_FN) update_status(id, phases.FILTER_CONTAMINANTS, status.COMPLETED) # Process output for contaminants and remove them from the file # Store the contaminants for reporting later with open(OUTPUT_TN93_CONTAM_FN, 'r') as tn93_contam_fh: tn93reader = csv.reader( tn93_contam_fh, delimiter=',', quotechar='|') tn93reader.__next__() contams = [row[0] for row in tn93reader] OUTPUT_FASTA_FN_TMP = OUTPUT_FASTA_FN + ".contam.tmp" # Remove contams from FASTA file with (open(OUTPUT_FASTA_FN, 'r')) as msa_fn: msa = SeqIO.parse(msa_fn, 'fasta') filtered_msa = filter(lambda x: x.id not in contams, msa) # Write to new TMP file with open(OUTPUT_FASTA_FN_TMP, "w") as output_handle: SeqIO.write(filtered_msa, output_handle, "fasta") shutil.move(OUTPUT_FASTA_FN_TMP, OUTPUT_FASTA_FN) # PHASE 4 update_status(id, phases.COMPUTE_TN93_DISTANCE, status.RUNNING) with open(JSON_TN93_FN, 'w') as tn93_fh: tn93_process = [ TN93DIST, '-q', '-0', '-o', OUTPUT_TN93_FN, '-t', threshold, '-a', ambiguities, '-l', min_overlap, '-g', fraction if ambiguities == 'resolve' else '1.0', '-f', OUTPUT_FORMAT, OUTPUT_FASTA_FN ] logging.debug(' '.join(tn93_process)) subprocess.check_call(tn93_process, stdout=tn93_fh, stderr=tn93_fh) if OUTPUT_TN93_FN != DEST_TN93_FN: shutil.copyfile(OUTPUT_TN93_FN, DEST_TN93_FN) update_status(id, phases.COMPUTE_TN93_DISTANCE, status.COMPLETED) # raise an exception if tn93 file is empty if is_tn93_file_empty(DEST_TN93_FN): raise Exception(' '.join(tn93_process) + "returned empty file") # send contents of tn93 to status page id_dict = id_to_attributes(OUTPUT_TN93_FN, attribute_map, DEFAULT_DELIMITER) if type(id_dict) is ValueError: update_status(id, "Error: " + id_dict.args[0]) raise id_dict # PHASE 5 update_status(id, phases.INFERRING_NETWORK, status.RUNNING) output_cluster_json_fh = open(OUTPUT_CLUSTER_JSON, 'w') hivnetworkcsv_process = [ HIVNETWORKCSV, '-i', OUTPUT_TN93_FN, '-t', threshold, '-f', SEQUENCE_ID_FORMAT, '-J', '-q' ] if filter_edges and filter_edges != 'no': hivnetworkcsv_process.extend( ['-n', filter_edges, '-s', OUTPUT_FASTA_FN]) if handle_contaminants == 'report' or handle_contaminants == 'remove': hivnetworkcsv_process.extend( ['-C', handle_contaminants, '-F', CONTAMINANT_ID_LIST]) if prior: hivnetworkcsv_process.extend( ['--prior', prior]) # hivclustercsv uses stderr for status updates complete_stderr = '' returncode = None logging.debug(' '.join(hivnetworkcsv_process)) with subprocess.Popen( hivnetworkcsv_process, stdout=output_cluster_json_fh, stderr=PIPE, bufsize=1, universal_newlines=True) as p: for line in p.stderr: complete_stderr += line update_status(id, phases.INFERRING_NETWORK, status.RUNNING, complete_stderr) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError( returncode, ' '.join(hivnetworkcsv_process), complete_stderr) update_status(id, phases.INFERRING_NETWORK, status.COMPLETED, complete_stderr) output_cluster_json_fh.close() # Read and print output_cluster_json results_json["trace_results"] = json.loads( open(OUTPUT_CLUSTER_JSON, 'r').read()) # Place singleton count in Network Summary # Place contaminant nodes in Network Summary if handle_contaminants == 'separately': results_json['trace_results']['Network Summary'][ 'contaminant_sequences'] = contams if not compare_to_lanl: return results_json if compare_to_lanl: # PHASE 6 update_status(id, phases.PUBLIC_COMPUTE_TN93_DISTANCE, status.RUNNING) lanl_tn93_process = '' if ambiguities != 'resolve': lanl_tn93_process = [ TN93DIST, '-q', '-o', OUTPUT_USERTOLANL_TN93_FN, '-t', threshold, '-a', ambiguities, '-f', OUTPUT_FORMAT, '-l', min_overlap, '-s', LANL_FASTA, OUTPUT_FASTA_FN ] else: lanl_tn93_process = [ TN93DIST, '-q', '-o', OUTPUT_USERTOLANL_TN93_FN, '-t', threshold, '-a', ambiguities, '-f', OUTPUT_FORMAT, '-g', fraction, '-l', min_overlap, '-s', LANL_FASTA, OUTPUT_FASTA_FN ] logging.debug(' '.join(lanl_tn93_process)) subprocess.check_call(lanl_tn93_process, stdout=DEVNULL) update_status(id, phases.PUBLIC_COMPUTE_TN93_DISTANCE, status.COMPLETED) # send contents of tn93 to status page # PHASE 6b # Perform concatenation # This is where reference annotation becomes an issue concatenate_data(USER_LANL_TN93OUTPUT, LANL_TN93OUTPUT_CSV, OUTPUT_USERTOLANL_TN93_FN, OUTPUT_TN93_FN) lanl_id_dict = id_to_attributes(OUTPUT_TN93_FN, attribute_map, DEFAULT_DELIMITER) # Create a list from TN93 csv for hivnetworkcsv filter create_filter_list(OUTPUT_TN93_FN, USER_FILTER_LIST) # PHASE 7 update_status(id, phases.PUBLIC_INFERRING_CONNECTIONS, status.RUNNING) lanl_output_cluster_json_fh = open(LANL_OUTPUT_CLUSTER_JSON, 'w') if filter_edges and filter_edges != 'no': with open(OUTPUT_COMBINED_SEQUENCE_FILE, 'w') as combined_fasta: for f_path in (LANL_FASTA, OUTPUT_FASTA_FN): with open(f_path) as src_file: shutil.copyfileobj(src_file, combined_fasta) print("\n", file=combined_fasta) lanl_hivnetworkcsv_process = [ PYTHON, HIVNETWORKCSV, '-i', USER_LANL_TN93OUTPUT, '-t', threshold, '-f', SEQUENCE_ID_FORMAT, '-J', '-q', '-k', USER_FILTER_LIST, '-n', filter_edges, '-s', OUTPUT_COMBINED_SEQUENCE_FILE ] else: lanl_hivnetworkcsv_process = [ PYTHON, HIVNETWORKCSV, '-i', USER_LANL_TN93OUTPUT, '-t', threshold, '-f', SEQUENCE_ID_FORMAT, '-J', '-q', '-k', USER_FILTER_LIST ] if handle_contaminants == 'report' or handle_contaminants == 'remove': lanl_hivnetworkcsv_process.extend( ['-C', handle_contaminants, '-F', CONTAMINANT_ID_LIST]) logging.debug(' '.join(lanl_hivnetworkcsv_process)) # hivclustercsv uses stderr for status updates complete_stderr = '' with subprocess.Popen( lanl_hivnetworkcsv_process, stdout=lanl_output_cluster_json_fh, stderr=PIPE, bufsize=1, universal_newlines=True) as p: for line in p.stderr: complete_stderr += line update_status(id, phases.PUBLIC_INFERRING_CONNECTIONS, status.RUNNING, complete_stderr) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError( returncode, ' '.join(lanl_hivnetworkcsv_process), complete_stderr) lanl_output_cluster_json_fh.close() update_status(id, phases.PUBLIC_INFERRING_CONNECTIONS, status.COMPLETED) #Annotate LANL nodes with id json_info = open(LANL_OUTPUT_CLUSTER_JSON, 'r').read() if json_info: # Only include clusters that are connected to supplied nodes annotate_lanl(LANL_OUTPUT_CLUSTER_JSON, LANL_FASTA) lanl_trace_results = json.loads(json_info) results_json['lanl_trace_results'] = lanl_trace_results else: logging.debug('no lanl results!') DEVNULL.close() return results_json
5,338,313
def parse_author_mail(author): """从形如 ``author <author-mail>`` 中分离author与mail""" pat = author_mail_re.search(author) return (pat.group(1), pat.group(2)) if pat else (author, None)
5,338,314
def tag(name, content='', nonclosing=False, **attrs): """ Wraps content in a HTML tag with optional attributes. This function provides a Pythonic interface for writing HTML tags with a few bells and whistles. The basic usage looks like this:: >>> tag('p', 'content', _class="note", _id="note1") '<p class="note" id="note1">content</p>' Any attribute names with any number of leading underscores (e.g., '_class') will have the underscores strpped away. If content is an iterable, the tag will be generated once per each member. >>> tag('span', ['a', 'b', 'c']) '<span>a</span><span>b</span><span>c</span>' It does not sanitize the tag names, though, so it is possible to specify invalid tag names:: >>> tag('not valid') '<not valid></not valid> .. warning:: Please ensure that ``name`` argument does not come from user-specified data, or, if it does, that it is properly sanitized (best way is to use a whitelist of allowed names). Because attributes are specified using keyword arguments, which are then treated as a dictionary, there is no guarantee of attribute order. If attribute order is important, don't use this function. This module contains a few partially applied aliases for this function. These mostly have hard-wired first argument (tag name), and are all uppercase: - ``A`` - alias for ``<a>`` tag - ``BUTTON`` - alias for ``<button>`` tag - ``HIDDEN`` - alias for ``<input>`` tag with ``type="hidden"`` attribute - ``INPUT`` - alias for ``<input>`` tag with ``nonclosing`` set to ``True`` - ``LI`` - alias for ``<li>`` tag - ``OPTION`` - alias for ``<option>`` tag - ``P`` - alias for ``<p>`` tag - ``SELECT`` - alias for ``<select>`` tag - ``SPAN`` - alias for ``<span>`` tag - ``SUBMIT`` - alias for ``<button>`` tag with ``type="submit"`` attribute - ``TEXTAREA`` - alias for ``<textarea>`` tag - ``UL`` - alias for ``<ul>`` tag """ open_tag = '<%s>' % name close_tag = '</%s>' % name attrs = ' '.join([attr(k.lstrip('_'), to_unicode(v)) for k, v in attrs.items()]) if attrs: open_tag = '<%s %s>' % (name, attrs) if nonclosing: content = '' close_tag = '' if not isinstance(content, basestring): try: return ''.join(['%s%s%s' % (open_tag, to_unicode(c), close_tag) for c in content]) except TypeError: pass return '%s%s%s' % (open_tag, to_unicode(content), close_tag)
5,338,315
def _reconcile_phenotype(meth, fba_model_id, phenotype_id, out_model_id): """Run Gapfilling on an FBA Model [16] :param fba_model_id: an FBA model id [16.1] :type fba_model_id: kbtypes.KBaseFBA.FBAModel :ui_name fba_model_id: FBA Model ID :param phenotype_id: a phenotype simulation ID [16.2] :type phenotype_id: kbtypes.KBasePhenotypes.PhenotypeSimulationSet :ui_name phenotype_id: Phenotype Simulation Dataset ID :param out_model_id: a name for the generated FBA Model (optional) [16.3] :type out_model_id: kbtypes.KBaseFBA.FBAModel :ui_name out_model_id: Output FBA Model Name :return: something :rtype: kbtypes.Unicode :output_widget: kbaseModelMetaNarrative """ if not out_model_id: out_model_id = "model_" + ''.join([chr(random.randrange(0, 26) + ord('A')) for _ in xrange(8)]) token = os.environ['KB_AUTH_TOKEN'] workspace = os.environ['KB_WORKSPACE_ID'] fbaClient = fbaModelServices(service.URLS.fba) wildtype_phenotype_reconciliation_params = { 'auth': token, 'model_workspace': workspace, 'model': fba_model_id, 'phenotypeSet_workspace': workspace, 'phenotypeSet': phenotype_id, 'workspace': workspace, 'out_model': out_model_id, } job_id = fbaClient.queue_wildtype_phenotype_reconciliation(wildtype_phenotype_reconciliation_params)['id'] return json.dumps({'ws_name': workspace, 'model_id': out_model_id, 'job_id': job_id})
5,338,316
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10): """ Label mesh topology entities using global ids. """ coors = cmesh.get_centroids(edim) coors = _to2d(coors) dim = cmesh.dim ax = _get_axes(ax, dim) for ii, cc in enumerate(coors): ax.text(*cc.T, s=ii, color=color, fontsize=fontsize) return ax
5,338,317
def build_filename(): """Build out the filename based on current UTC time.""" now = datetime.datetime.utcnow() fname = now.strftime('rib.%Y%m%d.%H00.bz2') hour = int(now.strftime('%H')) if not hour % 2 == 0: if len(str(hour)) == 1: hour = "0%d" % (hour - 1) else: hour = hour - 1 fname = now.strftime('rib.%Y%m%d.') fname = fname + str(hour) + '00.bz2' return fname
5,338,318
def sample_sequence(model, length, context=None, temperature=1.0, top_k=10, sample=True, device='cuda', use_constrained_decoding=False, constrained_decoding_threshold=0.3, person_to_category_to_salient_ngram_embed=(), word_embeds=(), tokenizer=None): """ :param model: :param length: :param context: :param temperature: :param top_k: :param sample: :param device: :param use_constrained_decoding: :param constrained_decoding_threshold: :param person_to_category_to_salient_ngram_embed: :param word_embeds: :param tokenizer: :return: """ # Assume batch size of 1. context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0) orig_context_length = context.size()[-1] prev = context output = context past = None k_sample_history = torch.tensor([], device=device, dtype=torch.float) sampling_path = [] # List of (timestep, token)s tried. Could be moving forward, alternate, or backward in timestep. backtrack = 0 with torch.no_grad(): while output.size()[-1] < orig_context_length + length: # when using `past`, the context for the next call should be only # the previous token: https://github.com/huggingface/transformers/issues/1749 logits, past = model(prev, past=past) logits = logits[:, -1, :] / temperature logits = top_k_logits(logits, k=top_k) log_probs = F.softmax(logits, dim=-1) prev, output, k_sample_history, backtrack, past = sampling( output, log_probs, k_sample_history, use_constrained_decoding, constrained_decoding_threshold, sample, sampling_path, backtrack, person_to_category_to_salient_ngram_embed, word_embeds, past, tokenizer, device) if prev == tokenizer.eos_token_id: break return output, sampling_path
5,338,319
def test_hive_partition_sensor_async_execute_failure(context): """Tests that an AirflowException is raised in case of error event""" task = HivePartitionSensorAsync( task_id="task-id", table=TEST_TABLE, partition=TEST_PARTITION, metastore_conn_id=TEST_METASTORE_CONN_ID, ) with pytest.raises(AirflowException): task.execute_complete(context=None, event={"status": "error", "message": "test failure message"})
5,338,320
def test_cache_add(cache: Cache): """Test that cache.add() sets a cache key but only if it doesn't exist.""" key, value = ("key", "value") ttl = 2 cache.add(key, value, ttl) assert cache.get(key) == value assert cache.expire_times()[key] == ttl cache.add(key, value, ttl + 1) assert cache.expire_times()[key] == ttl cache.set(key, value, ttl + 1) assert cache.expire_times()[key] == ttl + 1
5,338,321
def main(debug=False, args=None): """Start the app. We will see if we need this anyway.""" log.info('>>>>> Starting development server at http://{}/api/ <<<<<'.format( flask_app.config['SERVER_NAME'])) # flask_app.run(debug=settings.FLASK_DEBUG) # flask_app.run(debug=config_json["FLASK_DEBUG"]) flask_app.run(debug=debug) return 0
5,338,322
def tf1 ( fun , **kwargs ) : """Convert function object to TF1 """ from ostap.math.models import tf1 as _tf1 return _tf1 ( fun , **kwargs )
5,338,323
def normalize_requires(filename, **kwargs): """Return the contents of filename, with all [Require]s split out and ordered at the top. Preserve any leading whitespace/comments. """ if filename[-2:] != '.v': filename += '.v' kwargs = fill_kwargs(kwargs) lib = lib_of_filename(filename, **kwargs) all_imports = run_recursively_get_imports(lib, **kwargs) v_name = filename_of_lib(lib, ext='.v', **kwargs) contents = get_file(v_name, **kwargs) header, contents = split_leading_comments_and_whitespace(contents) contents = strip_requires(contents) contents = ''.join('Require %s.\n' % i for i in all_imports[:-1]) + '\n' + contents.strip() + '\n' return header + contents
5,338,324
def deprecate_module_with_proxy(module_name, module_dict, deprecated_attributes=None): """ Usage: deprecate_module_with_proxy(__name__, locals()) # at bottom of module """ def _ModuleProxy(module, depr): """Return a wrapped object that warns about deprecated accesses""" # http://stackoverflow.com/a/922693/2127762 class Wrapper(object): def __getattr__(self, attr): if depr is None or attr in depr: warnings.warn("Property %s is deprecated" % attr) return getattr(module, attr) def __setattr__(self, attr, value): if depr is None or attr in depr: warnings.warn("Property %s is deprecated" % attr) return setattr(module, attr, value) return Wrapper() deprecated_import(module_name) deprs = set() for key in deprecated_attributes or module_dict: if key.startswith('_'): continue if callable(module_dict[key]) and not isbuiltin(module_dict[key]): module_dict[key] = deprecated(module_dict[key]) else: deprs.add(key) sys.modules[module_name] = _ModuleProxy(sys.modules[module_name], deprs or None)
5,338,325
def nexthop_vr(pano, base_xpath, static_route_name, nexthop, destination): """ Sets static route with a nexthop VR Parameters ---------- pano : Panorama A PanDevice for Panorama base_xpath : str The initial API command for the virtual router static_route_name : str The name of the static route nexthop : str The VR of the next hop destination : str The route IP address and netmask """ entry_element = ('<entry name="{}"><path-monitor><enable>no</enable>' '<failure-condition>any</failure-condition><hold-time>2' '</hold-time></path-monitor><nexthop><next-vr>{}' '</next-vr></nexthop><bfd><profile>None</profile></bfd>' '<metric>10</metric><destination>{}</destination>' '<route-table><unicast/></route-table></entry>' .format(static_route_name, nexthop, destination)) pano.xapi.set(xpath=base_xpath, element=entry_element)
5,338,326
def index(): """ Returns: render_template (flask method): contains data required to render visualizations """ graphs = [] # extract data needed for visuals # TODO: Below is an example - modify to extract data for your own visuals genre_counts = df.groupby('genre')['message'].count().reset_index().sort_values( 'message',ascending=False) genre_names = list(genre_counts.genre) graph_one=[] graph_one.append( Bar( x = genre_names, y = genre_counts.message.tolist()) ) layout_one = dict(title = 'Distribution of Message Genres', xaxis = dict(title = 'Count',), yaxis = dict(title = 'Genre'), ) graphs.append(dict(data=graph_one, layout=layout_one)) most_common_categories = Y.sum().sort_values(ascending=False).head() graph_two = [] graph_two.append( Bar( x = list(most_common_categories.index), y =list(most_common_categories.values) )) layout_two = dict(title = 'Most Common Categories in Training Data', xaxis = dict(title = 'Count',), yaxis = dict(title = 'Category'), ) graphs.append(dict(data=graph_two, layout=layout_two)) # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template('master.html', ids=ids, graphJSON=graphJSON)
5,338,327
def test_pseudonymize__day__1(): """It makes sure that the pseudonymized day is not bigger than 28.""" from gocept.pseudonymize import day assert 19 == pseudo(10, day)
5,338,328
def date( instance, fg_color=[255, 255, 255], bg_color=[0, 0, 0], scroll=0.1 ): """Display the date in the french format (dd/mm/yyyy)""" instance.show_message( strftime("%d/%m/%Y"), text_colour=fg_color, back_colour=bg_color, scroll_speed=scroll )
5,338,329
def uniform_dec(num): """ Declination distribution: uniform in sin(dec), which leads to a uniform distribution across all declinations. Parameters ---------- num : int The number of random declinations to produce. """ return (numpy.pi / 2.) - numpy.arccos(2 * random.random_sample(num) - 1)
5,338,330
def run_vcfeval(job, context, sample, vcf_tbi_id_pair, vcfeval_baseline_id, vcfeval_baseline_tbi_id, fasta_path, fasta_id, bed_id, out_name = None, score_field=None): """ Run RTG vcf_eval to compare VCFs. Return a results dict like: { "f1": f1 score as float, "summary": summary file ID, "archive": output archive ID, "snp": ROC .tsv.gz data file ID for SNPs, "non_snp": ROC .tsv.gz data file ID for non-SNP variants, "weighted": ROC .tsv.gz data file ID for a weighted combination of SNP and non-SNP variants } Some ROC data file IDs may not be present if they were not calculated. """ # make a local work directory work_dir = job.fileStore.getLocalTempDir() # download the vcf call_vcf_id, call_tbi_id = vcf_tbi_id_pair[0], vcf_tbi_id_pair[1] call_vcf_name = "calls.vcf.gz" job.fileStore.readGlobalFile(vcf_tbi_id_pair[0], os.path.join(work_dir, call_vcf_name)) job.fileStore.readGlobalFile(vcf_tbi_id_pair[1], os.path.join(work_dir, call_vcf_name + '.tbi')) # and the truth vcf vcfeval_baseline_name = 'truth.vcf.gz' job.fileStore.readGlobalFile(vcfeval_baseline_id, os.path.join(work_dir, vcfeval_baseline_name)) job.fileStore.readGlobalFile(vcfeval_baseline_tbi_id, os.path.join(work_dir, vcfeval_baseline_name + '.tbi')) # download the fasta (make sure to keep input extension) fasta_name = "fa_" + os.path.basename(fasta_path) job.fileStore.readGlobalFile(fasta_id, os.path.join(work_dir, fasta_name)) # download the bed regions bed_name = "bed_regions.bed" if bed_id else None if bed_id: job.fileStore.readGlobalFile(bed_id, os.path.join(work_dir, bed_name)) # use out_name if specified, otherwise sample if sample and not out_name: out_name = sample if out_name: out_tag = '{}_vcfeval_output'.format(out_name) else: out_tag = 'vcfeval_output' # output directory out_name = out_tag # indexed sequence sdf_name = fasta_name + ".sdf" # make an indexed sequence (todo: allow user to pass one in) context.runner.call(job, ['rtg', 'format', fasta_name, '-o', sdf_name], work_dir=work_dir) # run the vcf_eval command cmd = ['rtg', 'vcfeval', '--calls', call_vcf_name, '--baseline', vcfeval_baseline_name, '--template', sdf_name, '--output', out_name, '--threads', str(context.config.vcfeval_cores)] if bed_name is not None: cmd += ['--evaluation-regions', bed_name] if context.config.vcfeval_opts: cmd += context.config.vcfeval_opts # override score field from options with one from parameter if score_field: for opt in ['-f', '--vcf-score-field']: if opt in cmd: opt_idx = cmd.index(opt) del cmd[opt_idx] del cmd[opt_idx] cmd += ['--vcf-score-field', score_field] if sample: # Pass the sample name along, since it is needed if the truth VCF has multiple samples cmd += ['--sample', sample] try: context.runner.call(job, cmd, work_dir=work_dir) except: # Dump everything we need to replicate the alignment logging.error("VCF evaluation failed. Dumping files.") context.write_output_file(job, os.path.join(work_dir, call_vcf_name)) context.write_output_file(job, os.path.join(work_dir, vcfeval_baseline_name)) # TODO: Dumping the sdf folder doesn't seem to work right. But we can dump the fasta context.write_output_file(job, os.path.join(work_dir, fasta_name)) if bed_name is not None: context.write_output_file(job, os.path.join(work_dir, bed_name)) raise # copy results to outstore # vcfeval_output_summary.txt out_summary_id = context.write_output_file(job, os.path.join(work_dir, out_tag, 'summary.txt'), out_store_path = '{}_summary.txt'.format(out_tag)) # vcfeval_output.tar.gz -- whole shebang context.runner.call(job, ['tar', 'czf', out_tag + '.tar.gz', out_tag], work_dir = work_dir) out_archive_id = context.write_output_file(job, os.path.join(work_dir, out_tag + '.tar.gz')) # truth VCF context.write_output_file(job, os.path.join(work_dir, vcfeval_baseline_name)) context.write_output_file(job, os.path.join(work_dir, vcfeval_baseline_name + '.tbi')) # vcfeval_output_f1.txt (used currently by tests script) f1 = parse_f1(os.path.join(work_dir, os.path.basename(out_name), "summary.txt")) f1_path = os.path.join(work_dir, "f1.txt") with open(f1_path, "w") as f: f.write(str(f1)) context.write_output_file(job, f1_path, out_store_path = '{}_f1.txt'.format(out_tag)) # Start the output dict out_dict = { "f1": f1, "summary": out_summary_id, "archive": out_archive_id } # roc data (written to outstore to allow re-plotting) for roc_name in ['snp', 'non_snp', 'weighted']: roc_file = os.path.join(work_dir, out_tag, '{}_roc.tsv.gz'.format(roc_name)) if os.path.isfile(roc_file): # Save this one dest_file = os.path.join('roc', out_tag, '{}_roc.tsv.gz'.format(roc_name)) out_dict[roc_name] = context.write_output_file(job, roc_file, dest_file) return out_dict
5,338,331
def helicsInputGetBytes(ipt: HelicsInput) -> bytes: """ Get the raw data for the latest value of a subscription. **Parameters** - **`ipt`** - The input to get the data for. **Returns**: Raw string data. """ if HELICS_VERSION == 2: f = loadSym("helicsInputGetRawValue") else: f = loadSym("helicsInputGetBytes") err = helicsErrorInitialize() maxDataLen = helicsInputGetByteCount(ipt) + 1024 data = ffi.new("char[{maxDataLen}]".format(maxDataLen=maxDataLen)) actualSize = ffi.new("int[1]") f(ipt.handle, data, maxDataLen, actualSize, err) if err.error_code != 0: raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode()) else: return ffi.unpack(data, length=actualSize[0])
5,338,332
def named_char_class(char_class, min_count=0): """Return a predefined character class. The result of this function can be passed to :func:`generate_password` as one of the character classes to use in generating a password. :param char_class: Any of the character classes named in :const:`CHARACTER_CLASSES` :param min_count: The minimum number of members of this class to appear in a generated password """ assert char_class in CHARACTER_CLASSES return CharClass(frozenset(_char_class_members[char_class]), min_count)
5,338,333
def parse_args(): """Parse commandline arguments.""" parser = argparse.ArgumentParser() parser.add_argument('--minSdkVersion', default='', dest='min_sdk_version', help='specify minSdkVersion used by the build system') parser.add_argument('--targetSdkVersion', default='', dest='target_sdk_version', help='specify targetSdkVersion used by the build system') parser.add_argument('--raise-min-sdk-version', dest='raise_min_sdk_version', action='store_true', help='raise the minimum sdk version in the manifest if necessary') parser.add_argument('--library', dest='library', action='store_true', help='manifest is for a static library') parser.add_argument('--uses-library', dest='uses_libraries', action='append', help='specify additional <uses-library> tag to add. android:requred is set to true') parser.add_argument('--optional-uses-library', dest='optional_uses_libraries', action='append', help='specify additional <uses-library> tag to add. android:requred is set to false') parser.add_argument('--uses-non-sdk-api', dest='uses_non_sdk_api', action='store_true', help='manifest is for a package built against the platform') parser.add_argument('--logging-parent', dest='logging_parent', default='', help=('specify logging parent as an additional <meta-data> tag. ' 'This value is ignored if the logging_parent meta-data tag is present.')) parser.add_argument('--use-embedded-dex', dest='use_embedded_dex', action='store_true', help=('specify if the app wants to use embedded dex and avoid extracted,' 'locally compiled code. Must not conflict if already declared ' 'in the manifest.')) parser.add_argument('--extract-native-libs', dest='extract_native_libs', default=None, type=lambda x: (str(x).lower() == 'true'), help=('specify if the app wants to use embedded native libraries. Must not conflict ' 'if already declared in the manifest.')) parser.add_argument('--has-no-code', dest='has_no_code', action='store_true', help=('adds hasCode="false" attribute to application. Ignored if application elem ' 'already has a hasCode attribute.')) parser.add_argument('input', help='input AndroidManifest.xml file') parser.add_argument('output', help='output AndroidManifest.xml file') return parser.parse_args()
5,338,334
def make_grid(spatial_dim: Sequence[int]) -> torch.Tensor: """Make the grid of coordinates for the Fourier neural operator input. Args: spatial_dim: A sequence of spatial deimensions `(height, width)`. Returns: A torch.Tensor with the grid of coordinates of size `(1, height, width, 2)`. """ grids = [] grids.append(np.linspace(0, 1, spatial_dim[0])) grids.append(np.linspace(0, 1, spatial_dim[1])) grid = np.vstack([u.ravel() for u in np.meshgrid(*grids)]).T grid = grid.reshape(1, spatial_dim[0], spatial_dim[1], 2) grid = grid.astype(np.float32) return torch.tensor(grid)
5,338,335
def _default_handlers(stream, logging_level, include_time): """Return a list of the default logging handlers to use. Args: stream: See the configure_logging() docstring. include_time: See the configure_logging() docstring. """ # Create the filter. def should_log(record): """Return whether a logging.LogRecord should be logged.""" if record.name.startswith('blinkpy.third_party'): return False return True logging_filter = logging.Filter() logging_filter.filter = should_log # Create the handler. handler = logging.StreamHandler(stream) if include_time: prefix = '%(asctime)s - ' else: prefix = '' if logging_level == logging.DEBUG: formatter = logging.Formatter(prefix + '%(name)s: [%(levelname)s] %(message)s') else: formatter = logging.Formatter(prefix + '%(message)s') handler.setFormatter(formatter) handler.addFilter(logging_filter) return [handler]
5,338,336
def find_spec2d_from_spec1d(spec1d_files): """ Find the spec2d files corresponding to the given list of spec1d files. This looks for the spec2d files in the same directory as the spec1d files. It will exit with an error if a spec2d file cannot be found. Args: spec1d_files (list of str): List of spec1d files generated by PypeIt. Returns: list of str: List of the matching spec2d files. """ spec2d_files = [] for spec1d_file in spec1d_files: # Check for a corresponding 2d file (path, filename) = os.path.split(spec1d_file) spec2d_file = os.path.join(path, filename.replace('spec1d', 'spec2d', 1)) if not os.path.exists(spec2d_file): msgs.error(f'Could not find matching spec2d file for {spec1d_file}') spec2d_files.append(spec2d_file) return spec2d_files
5,338,337
def bootstrap_cfg(): """Allow PyScaffold to be used to package itself. Usually, running ``python setup.py egg_info --egg-base .`` first is a good idea. """ src_dir = os.path.join(__location__, 'src') egg_info_dir = os.path.join(__location__, 'PyScaffold.egg-info') has_entrypoints = os.path.isdir(egg_info_dir) import pkg_resources sys.path.insert(0, src_dir) pkg_resources.working_set.add_entry(src_dir) from pyscaffold.utils import check_setuptools_version from pyscaffold.contrib.setuptools_scm import get_version from pyscaffold.contrib.setuptools_scm.hacks import parse_pkginfo from pyscaffold.contrib.setuptools_scm.git import parse as parse_git from pyscaffold.integration import local_version2str, version2str check_setuptools_version() def parse(root): try: return parse_pkginfo(root) except IOError: return parse_git(root) config = dict( version_scheme=version2str, local_scheme=local_version2str, ) if has_entrypoints: return dict(use_pyscaffold=True) else: return dict( version=get_version( root=__location__, parse=parse, **config) )
5,338,338
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
5,338,339
def tan(data): """Compute elementwise tan of data. Parameters ---------- data : relay.Expr The input data Returns ------- result : relay.Expr The computed result. """ return _make.tan(data)
5,338,340
def first_facility(): """Last business day before or on 5th day of the Submission month, 8:00am""" facility_partial(5)
5,338,341
def default_main(puzzle_class: Type[Puzzle]): """A default main function for puzzle scripts.""" parser = argparse.ArgumentParser() parser.add_argument("-i", "--interactive", action="store_true") parser.add_argument("file", help="file containing Alloy instance txt") args = parser.parse_args() with open(args.file) as f: puzzle = puzzle_class(parse_instance(f)) if args.interactive: puzzle.interactive() else: puzzle.dump()
5,338,342
def get_battery_data(battery, user=None, start = None, end = None): """ Returns a DataFrame with battery data for a user. Parameters ---------- battery: DataFrame with battery data user: string, optional start: datetime, optional end: datetime, optional """ assert isinstance(battery, pd.core.frame.DataFrame), "data is not a pandas DataFrame" if(user!= None): assert isinstance(user, str),"user not given in string format" battery_data = battery[(battery['user']==user)] else: battery_data = battery if(start!=None): start = pd.to_datetime(start) else: start = battery_data.iloc[0]['datetime'] if(end!= None): end = pd.to_datetime(end) else: end = battery_data.iloc[len(battery_data)-1]['datetime'] battery_data = battery_data[(battery_data['datetime']>=start) & (battery_data['datetime']<=end)] battery_data['battery_level'] = pd.to_numeric(battery_data['battery_level']) #df['column'].fillna(pd.Timedelta(seconds=0)) #df.dropna() battery_data = battery_data.drop_duplicates(subset=['datetime','user','device'],keep='last') battery_data = battery_data.drop(['user','device','time','datetime'],axis=1) return battery_data
5,338,343
def fix_brushes(brushes, thresh, vmf_in, snaplo, snaphi): """ Find and fix brushes with floating point plane coordinates. Returns a tuple containing the total number of brushes whose coordinates were rounded, a list of tuples which pairs suspicious brush IDs with the greatest deviation any one of their coordinates makes from the nearest multiple of snaplo, and a fixed version of vmf_in. Keyword arguments: brushes: list of brush strings to search thresh: threshold between snaplo and snaphi vmf_in: string containing input VMF contents snaplo: deviations less than thresh will be rounded to the nearest multiple of this value snaphi: deviations equal to or greater than thresh will be rounded to the nearest multiple of this value """ vmf_out = vmf_in rounded_count = 0 percent = len(brushes) / 100.0 suspects = [] for i, brush in enumerate(brushes): brush_id = int(re.search(r'"id"\s"(\d+)"', brush).group(1)) float_planes = [] for plane in re.findall(r'"plane"\s".*?"', brush, re.DOTALL): if '.' in plane: float_planes.append(plane) if not float_planes: continue max_dev = get_max_dev(float_planes, snaplo) if max_dev < thresh or snaphi is not None: brush_new = brush for plane in float_planes: plane_new = fix_plane(plane, thresh, snaplo, snaphi) brush_new = brush_new.replace(plane, plane_new) vmf_out = vmf_out.replace(brush, brush_new) rounded_count += 1 else: suspects.append((brush_id, max_dev)) sys.stdout.write('\r%s%% complete' % str(int(i / percent))) sys.stdout.flush() sys.stdout.write("\r \n") sys.stdout.flush() return (rounded_count, suspects, vmf_out)
5,338,344
def mutate(): """ Handles the '/mutate' path and accepts CREATE and UPDATE requests. Sends its response back, which either denies or allows the request. """ try: logging.debug(request.json) admission_request = AdmissionRequest(request.json) response = __admit(admission_request) except Exception as err: if isinstance(err, BaseConnaisseurException): err_log = str(err) msg = err.user_msg # pylint: disable=no-member else: err_log = str(traceback.format_exc()) msg = "unknown error. please check the logs." send_alerts(admission_request, False, msg) logging.error(err_log) return jsonify( get_admission_review( admission_request.uid, False, msg=msg, detection_mode=DETECTION_MODE, ) ) send_alerts(admission_request, True) return jsonify(response)
5,338,345
def _data_writer(data, file_path, sr = 16000): """ A wrapper to write raw binary data or waveform """ file_name, file_ext = os.path.splitext(file_path) if file_ext == '.wav': nii_wav_tk.waveFloatToPCMFile(data, file_path, sr = sr) elif file_ext == '.txt': nii_warn.f_die("Cannot write to %s" % (file_path)) else: nii_io_tk.f_write_raw_mat(data, file_path) return
5,338,346
def ascii_to_raster(input_ascii, output_raster, input_type=np.float32, input_proj=None): """Convert an ASCII raster to a different file format Args: input_ascii (str): output_raster (str): input_type (): input_proj (): Returns: None """ if input_proj is None: input_proj = env.snap_proj ## Read in the ASCII header with open(input_ascii, 'r') as input_f: input_header = input_f.readlines()[:6] input_cols = float(input_header[0].strip().split()[-1]) input_rows = float(input_header[1].strip().split()[-1]) ## DEADBEEF - I need to check cell corner vs. cell center here input_xmin = float(input_header[2].strip().split()[-1]) input_ymin = float(input_header[3].strip().split()[-1]) input_cs = float(input_header[4].strip().split()[-1]) input_nodata = float(input_header[5].strip().split()[-1]) input_geo = ( input_xmin, input_cs, 0., input_ymin + input_cs * input_rows, 0., -input_cs) output_array, output_nodata = ascii_to_array( input_ascii, input_type, input_nodata) ## Save the array to a raster array_to_raster(output_array, output_raster, input_geo, input_proj)
5,338,347
def pds_p_score(studydata, column, context): """Please split this field between 'pds_pv_boy_tanner' for boys and 'pds_pv_girl_tanner' for girls.""" studydata['pds_pv_girl_tanner'] = column.where(studydata.gender=='F') studydata['pds_pv_boy_tanner'] = column.where(studydata.gender=='M')
5,338,348
def generate_options_for_resource_group(control_value=None, **kwargs) -> List: """Dynamically generate options for resource group form field based on the user's selection for Environment.""" if control_value is None: return [] # Get the environment env = Environment.objects.get(id=control_value) # Get the Resource Groups as defined on the Environment. The Resource Group is a # CustomField that is only updated on the Env when the user syncs this field on the # Environment specific parameters. resource_groups = env.custom_field_options.filter(field__name="resource_group_arm") return [rg.str_value for rg in resource_groups]
5,338,349
def printallspoff(pths): """ Print SP_OFF values for all CORRTAG files (should be same for RAWTAG files) Parameters ---------- pths : list of str list of paths to the files Returns ------- """ for ifldpth in pths: raws = glob.glob(ifldpth + '*corrtag_*.fits') for raw in raws: with fits.open(raw) as f: print(raw, f[1].header['SP_OFF_A'], f[1].header['SP_OFF_B'])
5,338,350
def plot_map(fvcom, tide_db_path, threshold=np.inf, legend=False, **kwargs): """ Plot the tide gauges which fall within the model domain (in space and time) defined by the given FileReader object. Parameters ---------- fvcom : PyFVCOM.read.FileReader FVCOM model data as a FileReader object. tide_db_path : str Path to the tidal database. threshold : float, optional Give a threshold distance (in spherical units) beyond which a gauge is considered too far away. legend : bool, optional Set to True to add a legend to the plot. Defaults to False. Any remaining keyword arguments are passed to PyFVCOM.plot.Plotter. Returns ------- plot : PyFVCOM.plot.Plotter The Plotter object instance for the map """ tide_db = TideDB(tide_db_path) gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True) gauges_in_domain = [] fvcom_nodes = [] for gi, gauge in enumerate(gauge_locations): river_index = fvcom.closest_node(gauge, threshold=threshold) if river_index: gauge_id, gauge_dist = tide_db.get_nearest_gauge_id(*gauge) times, data = tide_db.get_tidal_series(gauge_id, np.min(fvcom.time.datetime), np.max(fvcom.time.datetime)) if not np.any(data): continue gauges_in_domain.append(gi) fvcom_nodes.append(river_index) plot = Plotter(fvcom, **kwargs) fx, fy = plot.m(fvcom.grid.lon, fvcom.grid.lat) plot.plot_field(-fvcom.grid.h) plot.axes.plot(fx[fvcom_nodes], fy[fvcom_nodes], 'ro', markersize=3, zorder=202, label='Model') # Add the gauge locations. rx, ry = plot.m(gauge_locations[:, 0], gauge_locations[:, 1]) plot.axes.plot(rx, ry, 'wo', label='Gauges') for xx, yy, name in zip(rx, ry, gauge_names[gauges_in_domain]): plot.axes.text(xx, yy, name, fontsize=10, rotation=45, rotation_mode='anchor', zorder=203) if legend: plot.axes.legend(numpoints=1, scatterpoints=1, ncol=2, loc='upper center', fontsize=10) return plot
5,338,351
def generate_vocab_file(corpus_dir): """ Generate the vocab.txt file for the training and prediction/inference. Manually remove the empty bottom line in the generated file. """ data_list = [] vocab_list = [] freq_dist = {} seq_len_dict = {} # Special tokens, with IDs: 0, 1, 2 for t in ['_unk_', '_bos_', '_eos_']: vocab_list.append(t) # The word following this punctuation should be capitalized in the prediction output. for t in ['.', '!', '?']: vocab_list.append(t) # The word following this punctuation should not precede with a space in the prediction output. for t in ['(', '[', '{', '``', '$']: vocab_list.append(t) for fd in range(2, -1, -1): if fd == 0: file_dir = os.path.join(corpus_dir, AUG0_FOLDER) elif fd == 1: file_dir = os.path.join(corpus_dir, AUG1_FOLDER) else: file_dir = os.path.join(corpus_dir, AUG2_FOLDER) for data_file in sorted(os.listdir(file_dir)): full_path_name = os.path.join(file_dir, data_file) if os.path.isfile(full_path_name) and data_file.lower().endswith('.txt'): with open(full_path_name, 'r') as f: for line in f: l = line.strip() if not l: # If skipped, we still need to write it to the final file data_list.append(l) continue if l.startswith("Q:") or l.startswith("A:"): # Tokenize (excluding Q/A) tokens = word_tokenize(l[2:]) # Store tokenized string (including Q/A) token_str = l[:2] + ' ' + ' '.join(tokens) data_list.append(token_str) # Cache long sequences n = len(tokens) if n > DEFAULT_SEQ_LEN: seq_len_dict[token_str] = n # Handle tokens for vocabulary for t in tokens: if len(t) and t != ' ': # Add token to vocabulary if t not in vocab_list: vocab_list.append(t) # If token is in vocabulary, increment its frequency if t in freq_dist.keys(): freq_dist[t] += 1 else: # Otherwise add it and set to 1 freq_dist[t] = 1 print("Vocab size after all base data files scanned: {}".format(len(vocab_list))) # clear generated files from prior runs and create blanks for f in [DATA_FILE, VOCAB_FILE, FREQ_FILE, SEQ_LEN_FILE]: if os.path.isfile(f): os.remove(f) if not os.path.isfile(f): open(f, 'w').close() # Write objects to files (could be abstracted but more clear this way) with open(DATA_FILE, 'a') as f_out: for line in data_list: f_out.write(f"{line}\n") with open(VOCAB_FILE, 'a') as f_voc: for v in vocab_list: f_voc.write(f"{v}\n") with open(FREQ_FILE, 'a') as f_freq: for t, f in freq_dist.items(): f_freq.write(f"{t}|{f}\n") with open(SEQ_LEN_FILE, 'w') as f_seq: for seq, n in seq_len_dict.items(): f_seq.write(f"{seq}|{n}\n") print("The final vocab file generated. Vocab size: {}".format(len(vocab_list)))
5,338,352
def test_cli_with_no_login_or_password(config, capsys, valid_connection): """Test empty login parameters.""" testargs = ["yessssms", "-m", "test"] # "-l", "\"\"", "-p", "\"\""] # print("test:..." + str(YesssSMS.const.CONFIG_FILE_PATHS)) with (mock.patch.object(sys, "argv", testargs)): with pytest.raises(SystemExit) as wrapped_e: CLI() assert wrapped_e.type == SystemExit assert wrapped_e.value.code == 2 captured = capsys.readouterr() assert "error: no username or password defined " in captured.out
5,338,353
def insertDataCaller(columns): """Calls the insertData function a few times to insert info into the DB.""" cols = ['transformer', 'timestamp', 'vlt_a', 'vlt_b', 'vlt_c', 'volt'] insertData(['transformerOutput.csv'], 'TransformerData', cols) cols = ['circuit', 'timestamp', 'amp_a', 'amp_b', 'amp_c', 'mvar', 'mw'] insertData(['circuitOutput.csv'], 'CircuitData', cols) cols = ['sensor_id', 'irradiance_w_per_m2', 'timestamp'] insertData(['irradianceOutput.csv'], 'IrradianceData', cols) if columns.has_key('tapCol'): cols = ['timestamp', 'tap_setting', 'substation', 'transformer'] insertData(['tapOutput.csv'], 'TapData', cols) if columns.has_key('humidityCol'): cols = ['timestamp', 'met_air_temp_degf', 'met_rel_humid_pct'] insertData(['weatherOutput.csv'], 'KiheiSCADATemperatureHumidity', cols) if columns.has_key('batterySoc'): cols = ['timestamp', 'kvar', 'kw', 'soc', 'pwr_ref_volt'] insertData(['batteryOutput.csv'], 'BatteryWailea', cols)
5,338,354
def test_get_set_head(): """test public header CRUD via slashpath """ mydat = mkh5.mkh5(TEST_H5) mydat.reset_all() mydat.create_mkdata(S01["gid"], S01["eeg_f"], S01["log_f"], S01["yhdr_f"]) # test get->set->get round trip head_pattern = "S01/dblock_0/streams/MiPa/" before_head = mydat.gethead(head_pattern) mydat.sethead(before_head) after_head = mydat.gethead(head_pattern) assert before_head == after_head # test get-set with changes print("# ------------------------------------------------------------") print("# Before sethead ...") print("# ------------------------------------------------------------") mydat = mkh5.mkh5(TEST_H5) # re-open w/out obliteration mydat.headinfo("S01/dblock_0.*(experiment\b|dblock_ticks|crw_ticks)") mydat.sethead( [ ("S01/dblock_0/streams/dblock_ticks/new_key", "new_value"), ("S01/dblock_0/streams/dblock_ticks/name", "new_dblock_ticks"), ("S01/dblock_0/streams/crw_ticks/stream", "new_crw_ticks"), ("S01/dblock_0/experiment", "new_expt_name"), ("S01/dblock_0/runsheet/new_data", [1, 2, 3]), ("S01/dblock_0/streams/dblock_ticks/name", "new_dblock_ticks"), ] ) print("# ------------------------------------------------------------") print("# After sethead ...") print("# ------------------------------------------------------------") mydat = mkh5.mkh5(TEST_H5) # re-open w/out obliteration mydat.headinfo("S01/dblock_0.*(experiment\b|dblock_ticks|crw_ticks)") # inspect via h5py directly hio = mydat.HeaderIO() with h5py.File(TEST_H5, "r") as h5: myblock = h5[S01["gid"] + "/dblock_0"] hio.get(myblock) for c in ["dblock_ticks", "crw_ticks", "MiPa"]: pprint.pprint(hio.header["streams"][c]) mydat = mkh5.mkh5(TEST_H5) # re-open w/out obliteration new_info = mydat.gethead( "S01/dblock_0.*(experiment|(MiPa|dblock_ticks|crw_ticks))" ) test_vals = [ (k, v) for k, v in new_info if "dblock_0/streams" in k or "/experiment" in k ] h5_path = "S01/dblock_0" for k, v in test_vals: # print(k) if k == h5_path + "/experiment" and v != "new_expt_name": msg = ( "sethead failed to assign key=value: " "experiment='new_expt_name'" ) raise ValueError(msg) if ( k == h5_path + "streams/dblock_ticks/name" and v != "new_dblock_ticks" ): msg = "sethead failed to assign {0} new_dblock_ticks: {1}".format( k, v ) raise ValueError(msg) if k == h5_path + "streams/crw_ticks/stream" and v != "new_crw_ticks": msg = "sethead failed to assign {0}: new_crw_ticks".format(k, v) raise ValueError(msg) if k == h5_path + "streams/dblock_ticks/new_key" and v != "new_value": msg = "sethead failed to create new {0}: {1}d".format(k, v) raise ValueError(msg) os.remove(TEST_H5)
5,338,355
def create_transfer_event(imsi, old_credit, new_credit, reason, from_number=None, to_number=None): """Creates a credit transfer event.""" _create_event(imsi, old_credit, new_credit, reason, from_number=from_number, to_number=to_number)
5,338,356
def get_element_block( xml_string: str, first_name: str, second_name: str = None, include_initial: bool = True, include_final: bool = True ) -> str: """ warning: use great caution if attempting to apply this function, or anything like it, to tags that that may appear more than once in the label. this _general type of_ approach to XML parsing works reliably only in the special case where tag names (or sequences of tag names, etc.) are unique (or their number of occurrences are otherwise precisely known) """ if second_name is None: element_names = [first_name] else: element_names = [first_name, second_name] split = tuple(split_at( xml_string.splitlines(), are_in(element_names, or_), keep_separator=True )) chunk = split[2] if include_initial: chunk = split[1] + chunk if include_final: chunk = chunk + split[3] return "\n".join(chunk)
5,338,357
def eval_eu_loss(ambiguity_values, dfs_ambiguity): """Calculate the expected utility loss that results from a setting that incorporates different levels of ambiguity. Args: ambiguity_values (dict): Dictionary with various levels of ambiguity to be implemented (key = name of scenario). dfs_ambiguity (list): List of pd.DataFrame objects that containt the of simulated models. Returns: df_EU (pd.DataFrame): Dataframe that summarizes that expected utility loss under the various ambiguity scenarios. """ EU, EU_Loss = {}, {} ambiguity_labels = get_dict_labels(ambiguity_values) # KW94 specific index_value_func = [ "Value_Function_A", "Value_Function_B", "Value_Function_Edu", "Value_Function_Home", ] # Calculate the Expected Utility and EU loss for each ambiguity value # Expected utility = value function at the initial period for df, ambiguity_label in zip(dfs_ambiguity, ambiguity_labels): EU[ambiguity_label] = [] EU_Loss[ambiguity_label] = [] # Retrieve the last identifier within looped dataframe for i in range(0, df.index[-1][0] + 1): EU[ambiguity_label].append(df[index_value_func].loc[(i, 0)].max()) EU[ambiguity_label] = np.mean(EU[ambiguity_label]) EU_Loss[ambiguity_label] = np.abs( (EU[ambiguity_label] - EU["absent"]) / EU["absent"] ) # Assemble data frames df_EU = pd.DataFrame.from_dict(EU, orient="index", columns=["EU"]) df_EU["EU_Loss"] = pd.Series(EU_Loss) return df_EU
5,338,358
def reset(ip: str = None, username: str = None) -> int: """ Reset records that match IP or username, and return the count of removed attempts. This utility method is meant to be used from the CLI or via Python API. """ attempts = AccessAttempt.objects.all() if ip: attempts = attempts.filter(ip_address=ip) if username: attempts = attempts.filter(username=username) count, _ = attempts.delete() log.info('AXES: Reset %s access attempts from database.', count) return count
5,338,359
def try_patch_column(meta_column: MetaColumn) -> bool: """Try to patch the meta column from request.json. Generator assignment must be checked for errors. Disallow column type change when a generator is assigned and when the column is imported. An error is raised in that case. """ if 'col_type' in request.json and request.json['col_type'] != meta_column.col_type: if meta_column.reflected_column_idf is not None: raise ColumnError('cannot change the type of an imported column', meta_column) if meta_column.generator_setting is not None: raise ColumnError('cannot change the type of a column with an assigned generator', meta_column) patch_all_from_json(meta_column, ['name', 'col_type', 'nullable']) generator_setting_id = request.json.get('generator_setting_id') if generator_setting_id is not None: facade = inject(GeneratorFacade) return facade.update_column_generator(meta_column, generator_setting_id) return True
5,338,360
def get_subvs(parent): """ :param parent: :return: """ import btrfsutil #ls_dirs=[os.path.join(parent, name) for name in os.listdir(parent) if os.path.isdir(os.path.join(parent, name))] return [directory for directory in os.listdir(parent) if btrfsutil.is_subvolume(directory)]
5,338,361
def readout(x, mask, aggr='add'): """ Args: x: (B, N_max, F) mask: (B, N_max) Returns: (B, F) """ return aggregate(x=x, dim=1, aggr=aggr, mask=mask, keepdim=False)
5,338,362
def _update_ipython_ns(shell, globals, locals): """Update the IPython 0.11 namespace at every visit""" shell.user_ns = locals.copy() try: shell.user_global_ns = globals except AttributeError: class DummyMod: """A dummy module used for IPython's interactive namespace.""" pass user_module = DummyMod() user_module.__dict__ = globals shell.user_module = user_module shell.init_history() shell.init_user_ns() shell.init_completer()
5,338,363
def debug_ssh(function): """Decorator to generate extra debug info in case off SSH failure""" def wrapper(self, *args, **kwargs): try: return function(self, *args, **kwargs) except tempest.lib.exceptions.SSHTimeout: try: original_exception = sys.exc_info() caller = test_utils.find_test_caller() or "not found" if self.server: msg = 'Caller: %s. Timeout trying to ssh to server %s' LOG.debug(msg, caller, self.server) if self.log_console and self.servers_client: try: msg = 'Console log for server %s: %s' console_log = ( self.servers_client.get_console_output( self.server['id'])['output']) LOG.debug(msg, self.server['id'], console_log) except Exception: msg = 'Could not get console_log for server %s' LOG.debug(msg, self.server['id']) # re-raise the original ssh timeout exception six.reraise(*original_exception) finally: # Delete the traceback to avoid circular references _, _, trace = original_exception del trace return wrapper
5,338,364
def editTags( tagPaths, # type: List[String] attributes, # type: Dict parameters, # type: Dict accessRights, # type: String overrides, # type: Dict alarmList, # type: String alarmConfig, # type: Dict provider="", # type: Optional[String] json=None, # type: Optional[String] ): # type: (...) -> None """Edit multiple existing Tags in Ignition with a single call. This will not work on Client Tags, because there is a Client Provider for each project. Args: tagPaths: The full path to the Tag you want to edit. For members of UDT instances, the tagPath will be the path to the UDT instance, with the overrides parameter listing out the member Tags to edit. Note: you can specify the Tag provider name in square brackets at the beginning of the parentPath string. Example: "[myTagProvider]MyTagsFolder". If the Tag provider name is left off then the project default provider will be used. attributes: The Tag's configuration attributes. parameters: The parameters for a UDT instance Tag. accessRights: The access rights for a Tag. Possible values are Read_Only, Read_Write, and Custom. overrides: All of the overrides for a UDT instance Tag. alarmList: List of legacy alarms for the Tag. The legacy alarm system was retired in 7.6.0, so the alarmConfig parameter should be utilized on newer versions. alarmConfig: The alarm configuration for the Tag. provider: The name of the Tag provider, used in conjunction with the JSON argument. The default value is the default Tag provider. Optional. json: The properties to edit on Tags, represented as a JSON object. When using this, it acts as a replacement for other parameters. Optional. """ print( tagPaths, attributes, parameters, accessRights, overrides, alarmList, alarmConfig, provider, json, )
5,338,365
def cases(): """ Loads all filenames of the pre-calculated test cases. """ case_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'cases' ) cases = [] for dir_path, _, files in os.walk(case_dir): cases = cases + [os.path.join(dir_path, f) for f in files] return cases
5,338,366
def remove_old_ckpts(model_dir, reverse=False): """ reverse=False->loss, reverse=True->reward, Only keep the highest three checkpoints. """ ckpts = os.listdir(join(model_dir, 'ckpt')) score_list = [float(ckpt.split('-')[-1]) for ckpt in ckpts] ckpts_score_sorted = sorted(zip(score_list, ckpts), key=lambda p: p[0], reverse=reverse) _, ckpts_sorted = zip(*ckpts_score_sorted) for ckpt in ckpts_sorted[3:]: os.remove(join(model_dir, 'ckpt', ckpt)) logging.info("Best model: {}".format(join(model_dir, 'ckpt', ckpts_sorted[0])))
5,338,367
def axisAligned(angle, tol=None, axis=None): """ Determine if a line (represented by its angle) is aligned with an axis. Parameters ---------- angle : float The line's angle of inclination (in radians) tol : float Maximum distance from `axis` for which `angle` is still considered to be aligned. axis : {'horizontal', 'vertical'} The reference axis. Returns ------- is_aligned : bool True if `angle` is within `tol` radians of `axis`. """ if axis == 'horizontal': target_angle = 1.57 # about pi / 2 elif axis == 'vertical': target_angle = 0.0 distance = abs(target_angle - abs(angle)) is_aligned = distance < tol return is_aligned
5,338,368
def gpio_pin_expression(conf): """Generate an expression for the given pin option. This is a coroutine, you must await it with a 'yield' expression! """ if conf is None: return from esphome import pins for key, (func, _) in pins.PIN_SCHEMA_REGISTRY.items(): if key in conf: yield coroutine(func)(conf) return number = conf[CONF_NUMBER] mode = conf[CONF_MODE] inverted = conf.get(CONF_INVERTED) yield GPIOPin.new(number, RawExpression(mode), inverted)
5,338,369
def make_results_dict( mesh_data,key_descriptor, key_transformation=None, verbose=False ): """Load mesh data into dictionary, using specified parameter tuple as key. Example key descriptor: (("Nsigmamax",int),("Nmax",int),("hw",float)) Example: >>> KEY_DESCRIPTOR_NMAX_HW = (("Nmax",int),("hw",float)) For now, in the event that the same mesh point arises multiple times on input (i.e., a given value for the key tuple is duplicated), the final occurrence overwrites any earlier occurrences. In the future, a more sophisticated "merging" process might be appropriate. An optional key transformation is useful for, e.g., shifting the Nmax value stored in the dictionary when results are to be used as reference results for the space of opposite parity. Arguments: mesh_data (list of ResultsData): data for mesh points key_descriptor (tuple of tuple): dtype descriptor for key key_transformation (callable,optional): transformation function to apply to key tuple verbose (bool,optional): verbose output Returns: (dict): mapping from key tuple to data object """ key_function = make_key_function(key_descriptor) results_dict = dict() for mesh_point in mesh_data: # make key key = key_function(mesh_point) if (key_transformation is not None): key = key_transformation(key) if (verbose): print(" make_results_dict: filename {} key {}".format(mesh_point.filename,key)) # store data point if (key not in results_dict): # save mesh point results_dict[key] = mesh_point else: # TODO: do smart merge "update" on existing mesh point # overwrite mesh point results_dict[key] = mesh_point return results_dict
5,338,370
def plot_1d(x_test, mean, var): """ Description ---------- Function to plot one dimensional gaussian process regressor mean and variance. Parameters ---------- x_test: array_like Array containing one dimensional inputs of the gaussian process model. Mean: array_like An array with the values of the mean function of the guassian process. Var: array_like The variance around the values of the mean function of the gaussian process. Returns ---------- Matplotlib plot of mean function and variance of the gaussian process model. """ x_test = exactly_1d(x_test) mean = exactly_1d(mean) var = exactly_1d(var) plt.fill_between(x_test, mean-.674*np.sqrt(var), mean+.674*np.sqrt(var), color='k', alpha=.4, label='50% Credible Interval') plt.fill_between(x_test, mean-1.150*np.sqrt(var), mean+1.150*np.sqrt(var), color='k', alpha=.3, label='75% Credible Interval') plt.fill_between(x_test, mean-1.96*np.sqrt(var), mean+1.96*np.sqrt(var), color='k', alpha=.2, label='95% Credible Interval') plt.fill_between(x_test, mean-2.326*np.sqrt(var), mean+2.326*np.sqrt(var), color='k', alpha=.1, label='99% Credible Interval') plt.plot(x_test, mean, c='w') return None
5,338,371
def sentence_to_windows(sentence, min_window, max_window): """ Create window size chunks from a sentence, always starting with a word """ windows = [] words = sentence.split(" ") curr_window = "" for idx, word in enumerate(words): curr_window += (" " + word) curr_window = curr_window.lstrip() next_word_len = len(words[idx+1]) + 1 if idx+1 < len(words) else 0 if len(curr_window) + next_word_len > max_window: curr_window = clean_sentence(curr_window) if validate_sentence(curr_window, min_window): windows.append(curr_window.strip()) curr_window = "" if len(curr_window) >= min_window: windows.append(curr_window) return windows
5,338,372
def get_master_name(els): """Function: get_master_name Description: Return name of the master node in a Elasticsearch cluster. Arguments: (input) els -> ElasticSearch instance. (output) Name of master node in ElasticSearch cluster. """ return els.cat.master().strip().split(" ")[-1]
5,338,373
def flop_turn_river(dead: Sequence[str]) -> Sequence[str]: """ Get flop turn and river cards. Args: dead: Dead cards. Returns: 5 cards. """ dead_concat = "".join(dead) deck = [card for card in DECK if card not in dead_concat] return random.sample(deck, 5)
5,338,374
def smith_gassmann(kstar, k0, kfl2, phi): """ Applies the Gassmann equation. Returns Ksat2. """ a = (1 - kstar/k0)**2.0 b = phi/kfl2 + (1-phi)/k0 - (kstar/k0**2.0) ksat2 = kstar + (a/b) return ksat2
5,338,375
async def _preflight_cors(request): """Respond to preflight CORS requests and load parameters.""" if request.method == "OPTIONS": return textify("ok", headers=generate_cors_headers(request)) request['args'] = {} if request.form: for key in request.form: key_lower = key.lower() if key_lower in _MUST_BE_GET_PARAM: raise UserException(CANNOT_BE_POST_PARAM % key) request['args'][key_lower] = request.form[key][0] elif request.json: for key in request.json: key_lower = key.lower() if key_lower in _MUST_BE_GET_PARAM: raise UserException(CANNOT_BE_POST_PARAM % key) # Make all url parameters strings if isinstance(request.json[key], list): request['args'][key_lower] = json.dumps(request.json[key]) else: request['args'][key_lower] = str(request.json[key]) # Take all Get parameters for key, value in list(request.raw_args.items()): key_lower = key.lower() if key_lower in _MUST_BE_POST_PARAM: raise UserException(CANNOT_BE_GET_PARAM % key) request['args'][key_lower] = value
5,338,376
def login_teacher(): """ Login User and redirect to index page. """ # forget any user session.clear() # if user reached via route POST if request.method == "POST": # check user credentials email_id = request.form.get("email_id") passw = request.form.get("password") result = db.execute("SELECT * FROM registrants WHERE email_id = :email", email = email_id) if len(result) != 1 or not pwd_context.verify(passw, result[0]['hash']): return "INVALID USERNAME/PASSWORD" else: folder_id = db.execute('SELECT folder_id FROM shared_folder WHERE user_id = :user_id', user_id = result[0]['id']) print(folder_id) session["user_id"] = result[0]["id"] session['folder_id'] = folder_id[0]['folder_id'] return redirect(url_for('index')) else: return render_template('login.html')
5,338,377
def upload_file(): """Upload files""" print("UPLOADED FILES", len(request.files)) if not os.path.exists(FILE_START_PATH): os.makedirs(FILE_START_PATH) # Set the upload folder for this user if it hasn't been set yet # pylint: disable=consider-using-with if 'upload_folder' not in session or session['upload_folder'] is None or not os.path.isdir(session['upload_folder']): session['upload_folder'] = tempfile.mkdtemp(dir=FILE_START_PATH) loaded_filenames = [] for file_id in request.files: one_file = request.files[file_id] save_path = os.path.join(session['upload_folder'], secure_filename(one_file.filename)) if os.path.exists(save_path): os.unlink(save_path) one_file.save(save_path) loaded_filenames.append(one_file.filename) return json.dumps(loaded_filenames)
5,338,378
def mult_to_bytes(obj: object) -> bytes: """Convert given {array of bits, bytes, int, str, b64} to bytes""" if isinstance(obj, list): i = int("".join(["{:01b}".format(x) for x in obj]), 2) res = i.to_bytes(bytes_needed(i), byteorder="big") elif isinstance(obj, int): res = obj.to_bytes(bytes_needed(obj), "big") elif isBase64(obj): res = base64.b64decode(obj) elif isinstance(obj, bytes): res = obj elif isinstance(obj, str): alphabet = max([int(c) for c in obj]) + 1 res = int(obj, alphabet) return mult_to_bytes(res) else: res = bytes(obj) return res
5,338,379
def get_barrier(loopy_opts, local_memory=True, **loopy_kwds): """ Returns the correct barrier type depending on the vectorization type / presence of atomics Parameters ---------- loopy_opts: :class:`loopy_utils.loopy_opts` The loopy options used to create this kernel. local_memory: bool [True] If true, this barrier will be used for memory in the "local" address spaces. Only applicable to OpenCL loopy_kwds: dict Any other loopy keywords to put in the instruction options Returns ------- barrier: str The built barrier instruction """ mem_kind = '' barrier_kind = 'nop' if use_atomics(loopy_opts): mem_kind = 'local' if local_memory else 'global' barrier_kind = 'lbarrier' loopy_kwds['mem_kind'] = mem_kind return '...' + barrier_kind + '{' + ', '.join([ '{}={}'.format(k, v) for k, v in six.iteritems(loopy_kwds)]) + '}'
5,338,380
def envset(**kwargs): """ Set environment variables that will last for the duration of the with statement. To unset a variable temporarily, pass its value as None. """ prev = {} try: # record the original values for name in kwargs: prev[name] = os.getenv(name) # set the new values for name in kwargs: if kwargs[name] is None: if name in os.environ: del os.environ[name] else: os.environ[name] = kwargs[name] yield finally: for name in kwargs: if prev[name] is not None: os.environ[name] = prev[name] elif os.getenv(name) is not None: del os.environ[name]
5,338,381
def monthly_rain(year, from_month, x_months, bound): """ This function downloaded the data embedded tif files from the SILO Longpaddock Dataset and creates a cumulative annual total by stacking the xarrays. This function is embedded in the get_rainfall function or can be used separately Parameters ---------- input : year (integer) value of the year for the data to be pulled month (integer) value of the first month for the data to be pulled x_months (integer) number of months to be pulled bound (shapefile) area of interest for the final calculated tif to be clipped to Returns ------ output : rioxarray item representing each of the months pulled and summed up for the months selected """ #create month string as pandas frame mon_string = pd.DataFrame({'mon': ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']}) #assign year column mon_string['year'] = str(year) #assign yearmon column mon_string['yearmon'] = mon_string['year'] + mon_string['mon'] #filter to first x months mon_select = mon_string[from_month-1:x_months] #set base url base = 'https://s3-ap-southeast-2.amazonaws.com/silo-open-data/monthly/monthly_rain' rain_stack = [] #loop to download tifs, reporoject, stack, sum and clip for index, i in mon_select.iterrows(): call = base + '/' + i['year'] + '/' + i['yearmon'] + '.monthly_rain.tif' month_rain = rxr.open_rasterio(call, masked = True).squeeze() rain_stack.append(month_rain) bound_crs = bound.to_crs(rain_stack[1].rio.crs) stacked_rain = sum(rain_stack).rio.clip(bound_crs.geometry) return stacked_rain
5,338,382
def organize_by_chromosome(genes, transcripts): """ Iterate through genes and transcripts and group them by chromosome """ gene_dict = {} transcript_dict = {} for ID in genes: gene = genes[ID] chromosome = gene.chromosome if chromosome not in gene_dict: chrom_genes = {} chrom_genes[ID] = gene gene_dict[chromosome] = chrom_genes gene_dict[chromosome][ID] = gene for ID in transcripts: transcript = transcripts[ID] chromosome = transcript.chromosome if chromosome not in transcript_dict: chrom_transcripts = {} chrom_transcripts[ID] = transcript transcript_dict[chromosome] = chrom_transcripts transcript_dict[chromosome][ID] = transcript transcript_dict[chromosome][ID] = transcript return gene_dict, transcript_dict
5,338,383
def get_synth_stations(settings, wiggle=0): """ Compute synthetic station locations. Values for mode "grid" and "uniform" and currently for tests on global Earth geometry. TODO: incorporate into settings.yml :param settings: dict holding all info for project :type settings: dict :param wiggle: adds random variations in interval [-wiggle, wiggle] to locations, defaults to 0 :type wiggle: float, optional :return: array containing station locations longitude/x and latitude/y coordinates. shape = (n, 2) :rtype: numpy.ndarray """ from itertools import product mode = settings["synth_stations_mode"] n = settings["synth_stations_n"] if mode == "grid": lons = np.linspace(-180, 180 - (360 / int(np.sqrt(n))), int(np.sqrt(n))) lats = np.linspace(-75, 75, int(np.sqrt(n))) station_locations = list(product(lons, lats)) elif mode == "uniform": lons = np.random.uniform(low=-180, high=180, size=n) lats = np.random.uniform(low=-75, high=75, size=n) station_locations = list(zip(lons, lats)) elif mode == "partial_circle": n_total = settings["synth_stations_circle_max"] radius = settings["synth_stations_circle_radius"] n_used = settings["synth_stations_circle_n"] azimuths = np.linspace(0, 2 * np.pi, n_total) azimuths_used = azimuths[:n_used] lons = radius * np.cos(azimuths_used) lats = radius * np.sin(azimuths_used) station_locations = list(zip(lons, lats)) elif mode == "file": import pandas as pd df = pd.read_csv(settings["synth_stations_file"]) lons = df["x"].values lats = df["y"].values station_locations = list(zip(lons, lats)) if wiggle != 0: station_locations = [ [ sta_lon + np.random.uniform(-wiggle, wiggle), sta_lat + np.random.uniform(-wiggle, wiggle), ] for sta_lon, sta_lat in product(lons, lats) ] station_locations = np.array(station_locations) return station_locations
5,338,384
def test_show_chromosome_labels(dash_threaded): """Test the display/hiding of chromosomes labels.""" prop_type = 'bool' def assert_callback(prop_value, nclicks, input_value): answer = '' if nclicks is not None: answer = FAIL if PROP_TYPES[prop_type](input_value) == prop_value: answer = PASS return answer template_test_component( dash_threaded, APP_NAME, assert_callback, ideogram_test_props_callback, 'showChromosomeLabels', 'True', prop_type=prop_type, component_base=COMPONENT_REACT_BASE, **BASIC_PROPS ) driver = dash_threaded.driver # assert the absence of chromosomes' labels labels = driver.find_elements_by_class_name('chrLabel') assert len(labels) == 0 # trigger a change of the component prop btn = wait_for_element_by_css_selector(driver, '#test-{}-btn'.format(APP_NAME)) btn.click() # assert the presence of chromosomes' labels labels = wait_for_elements_by_css_selector(driver, '.chrLabel') assert len(labels) > 0
5,338,385
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" name = entry.data.get(CONF_NAME) ha = get_ha(hass, name) if ha is not None: await ha.async_remove() clear_ha(hass, name) return True
5,338,386
def relay_state(pin): """Take in pin, return string state of the relay""" logger.debug("relay_state() for pin %s", pin) disabled = GPIO.digitalRead(pin) logger.debug("Pin %s disabled: %s", pin, disabled) state = "off" if not disabled: state = "on" logger.debug("Relay state for pin %s is %s", pin, state) return state
5,338,387
def calc_fn(grid, size, coefficients=(-0.005, 10)): """ Apply the FitzHugh-Nagumo equations to a given grid""" a, b, *_ = coefficients out = np.zeros(size) out[0] = grid[0] - grid[0] ** 3 - grid[1] + a out[1] = b * (grid[0] - grid[1]) return out
5,338,388
def test_str(): """ Проверка текстового представления """ c = Carousel(window=4) assert str(c) == 'Carousel([], window=4)' c = Carousel([1, 2, 3]) assert str(c) == 'Carousel([1, 2, 3], window=3)' c = Carousel([1, 2, 3], window=2) assert str(c) == 'Carousel([2, 3], window=2)'
5,338,389
def destr(screenString): """ should return a valid screen object as defined by input string (think depickling) """ #print "making screen from this received string: %s" % screenString rowList = [] curRow = [] curAsciiStr = "" curStr = "" for ch in screenString: if ch == '\n': # then we are done with the row and append it # and start a new row rowList.append(curRow) curRow = [] elif ch == '|': # then we're ready to make our current asciipixel curAsciiPixel = AsciiPixel(int(curAsciiStr), int(curStr)) curAsciiStr = curColorStr = "" curRow.append(curAsciiPixel) curStr = "" elif ch == ',': # then we're now building the color string curAsciiStr = curStr[:] curStr = "" else: curStr += ch ret = Screen(rowList) return ret
5,338,390
def analytical_solution_with_penalty(train_X, train_Y, lam, poly_degree): """ 加惩罚项的数值解法 :param poly_degree: 多项式次数 :param train_X: 训练集的X矩阵 :param train_Y: 训练集的Y向量 :param lam: 惩罚项系数 :return: 解向量 """ X, Y = normalization(train_X, train_Y, poly_degree) matrix = np.linalg.inv(X.T.dot(X) + lam * np.eye(X.shape[1])).dot(X.T).dot(Y) w_result = np.poly1d(matrix[::-1].reshape(poly_degree + 1)) # print("w result analytical") # print(w_result) return w_result
5,338,391
def formule_haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float: """ Description: Calcule la distance entre deux points par la formule de Haversine. Paramètres: lat1: {float} -- Latitude du premier point. lon1: {float} -- Longitude du premier point. lat2: {float} -- Latitude du second point. lon2: {float} -- Longitude du second point. Retourne: {float} -- Distance entre les deux points. Exemple: >>> formule_haversine(0, 0, 1, 1) 157.24938127194397 """ EARTH_RADIUS = 6371e3 dLat = radians(lat2 - lat1) dLon = radians(lon2 - lon1) lat1 = radians(lat1) lat2 = radians(lat2) a = sin(dLat/2)**2 + cos(lat1) * cos(lat2) * sin(dLon/2)**2 c = 2 * atan2(sqrt(a), sqrt(1-a)) return (EARTH_RADIUS * c) / 1000
5,338,392
def __load_functions__ (symtbl): """Loads all Python functions from the module specified in the ``functions`` configuration parameter (in config.yaml) into the given symbol table (Python dictionary). """ modname = ait.config.get('functions', None) if modname: module = pydoc.locate(modname) if module is None: msg = 'No module named %d (from config.yaml functions: parameter)' raise ImportError(msg % modname) for name in dir(module): func = getattr(module, name) if callable(func): symtbl[name] = func
5,338,393
def get_word_combinations(word): """ 'one-two-three' => ['one', 'two', 'three', 'onetwo', 'twothree', 'onetwothree'] """ permutations = [] parts = [part for part in word.split(u'-') if part] for count in range(1, len(parts) + 1): for index in range(len(parts) - count + 1): permutations.append(u''.join(parts[index:index+count])) return permutations
5,338,394
def env_to_file(env_variables, destination_path=None, posix=True): """ Write environment variables to a file. :param env_variables: environment variables :param destination_path: destination path of a file where the environment variables will be stored. the stored variables will be a bash script you can then source. :param posix: false if the target of the generated file will be a windows machine """ if not env_variables: return None if not destination_path: destination_path = tempfile.mkstemp(suffix='env')[1] if posix: linesep = '\n' else: linesep = '\r\n' with open(destination_path, 'w') as f: if posix: f.write('#!/bin/bash') f.write(linesep) f.write('# Environmnet file generated by Cloudify. Do not delete ' 'unless you know exactly what you are doing.') f.write(linesep) f.write(linesep) else: f.write('rem Environmnet file generated by Cloudify. Do not ' 'delete unless you know exactly what you are doing.') f.write(linesep) for key, value in env_variables.iteritems(): if posix: f.write('export {0}={1}'.format(key, value)) f.write(linesep) else: f.write('set {0}={1}'.format(key, value)) f.write(linesep) f.write(linesep) return destination_path
5,338,395
def _phase_norm(signal, reference_channel=0): """Unit normalization. Args: signal: STFT signal with shape (..., T, D). Returns: Normalized STFT signal with same shape. """ angles = np.angle(signal[..., [reference_channel]]) return signal * np.exp(-1j * angles)
5,338,396
def maintenance_(): """Render a maintenance page while on maintenance mode.""" return render_template("maintenance/maintenance.html")
5,338,397
def CanEditHotlist(effective_ids, hotlist): """Return True if a user is editor(add/remove issues and change rankings).""" return any([user_id in (hotlist.owner_ids + hotlist.editor_ids) for user_id in effective_ids])
5,338,398
def key_inbetween(): """ keys inbetweens of selected objects (only selected channels) """ selected = cmds.ls(sl=True) for sel in selected: cmds.setKeyframe(sel, hierarchy="none", shape=False, an=True)
5,338,399