content
stringlengths
22
815k
id
int64
0
4.91M
def move_songpos(context, songpos, to): """See :meth:`move_range`.""" songpos = int(songpos) to = int(to) context.core.tracklist.move(songpos, songpos + 1, to)
18,700
def photo_fit_bprior(time, ptime, nflux, flux_err, guess_transit, guess_ew, rho_star, e, w, directory, nwalk, nsteps, ndiscard, plot_transit=True, plot_burnin=True, plot_corner=True, plot_Tburnin=True, plot_Tcorner=True): """Fit eccentricity for a planet. Applies Bayesian beta-dist prior from Kipping 2014 Parameters ---------- time: np.array Light curve time nflux: np.array Light curve flux flux_err: np.array Light curve flux errors guess_transit: np.array (length 4) Initial guess for MCMC transit fitting. Passed into mcmc_fitter(). guess_ew: np.array (length 2) Initial guess for MCMC e and w fitting. [e guess, w guess] rho_star: np.array "True" stellar density distribution e: float True eccentricity (just to name plots) w: float True longitude of periastron (just to name plots) directory: str Directory to save plots nwalk: int Number of walkers nsteps: int Number of steps to run in MCMC. Passed into mcmc_fitter(). ndiscard: int Number of steps to discard in MCMC. Passed into mcmc_fitter(). plot_transit: boolean, default True Save transit light curve plot + fit in specified directory. Returns ------- fite: float Best-fit eccentricity (mean of MCMC distribution) fitw: float Best-fit longitude of periastron (mean of MCMC distribution) gs: np.array "g" distribution for planet g_mean: float Mean of g distribution g_sigmas: list (length 2) [(-) sigma, (+) sigma] of g distribution zsc: list (length 2) Number of sigmas away [fit e, fit w] are from true [e, w] """ # EMCEE Transit Model Fitting _, _, pdist, rdist, adist, idist, t0dist = mcmc_fitter(guess_transit, time, ptime, nflux, flux_err, nwalk, nsteps, ndiscard, e, w, directory, plot_Tburnin=True, plot_Tcorner=True) p_f, perr_f = mode(pdist), get_sigmas(pdist) rprs_f, rprserr_f = mode(rdist), get_sigmas(rdist) a_f, aerr_f = mode(adist), get_sigmas(adist) i_f, ierr_f = mode(idist), get_sigmas(idist) t0_f, t0err_f = mode(t0dist), get_sigmas(t0dist) # Create a light curve with the fit parameters # Boobooboo fit = integratedlc_fitter(time, p_f, rprs_f, a_f, i_f, t0_f) if plot_transit==True: plt.cla() plt.errorbar(time, nflux, yerr=flux_err, c='blue', fmt='o', alpha=0.5, label='Original LC') plt.scatter(time, fit, c='red', alpha=1.0) plt.plot(time, fit, c='red', alpha=1.0, label='Fit LC') #plt.xlim(-0.1, 0.1) plt.legend() plt.savefig(directory + 'lightcurve_fitp' + str(p_f) + '_fitrprs' + str(rprs_f) + '_fitars' + str(a_f) + '_fiti' + str(i_f) + '.png') plt.close() print('Fit params:') print('Period (days): ', p_f) print('Rp/Rs: ', rprs_f) print('a/Rs: ', a_f) print('i (deg): ', i_f) T14dist = get_T14(pdist, rdist, adist, idist) T23dist = get_T23(pdist, rdist, adist, idist) gs, rho_c = get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist) g_mean = mode(gs) g_sigma_min, g_sigma_plus = get_sigmas(gs) g_sigmas = [g_sigma_min, g_sigma_plus] #Guesses w_guess = guess_ew[1] e_guess = guess_ew[0] solnx = (w_guess, e_guess) pos = solnx + 1e-4 * np.random.randn(32, 2) nwalkers, ndim = pos.shape sampler = emcee.EnsembleSampler(nwalkers, ndim, bprior_log_probability, args=(g_mean, np.nanmean(g_sigmas)), threads=4) print('-------MCMC------') sampler.run_mcmc(pos, 5000, progress=True); flat_samples_e = sampler.get_chain(discard=1000, thin=15, flat=True) if plot_burnin==True: fig, axes = plt.subplots(2, figsize=(10, 7), sharex=True) samples = sampler.get_chain() labels = ["w", "e"] for i in range(ndim): ax = axes[i] ax.plot(samples[:, :, i], "k", alpha=0.3) ax.set_xlim(0, len(samples)) ax.set_ylabel(labels[i]) ax.yaxis.set_label_coords(-0.1, 0.5) axes[-1].set_xlabel("step number"); fig.savefig(directory + 'e_g_burnin.png') plt.close(fig) edist = flat_samples_e[:,1] wdist = flat_samples_e[:,0] fite = np.percentile(edist, 50) fitw = np.percentile(wdist, 50) mcmc_e = np.percentile(edist, [16, 50, 84]) q_e = np.diff(mcmc_e) mcmc_w = np.percentile(wdist, [16, 50, 84]) q_w = np.diff(mcmc_w) if plot_corner==True: fig = corner.corner(flat_samples_e, labels=labels, show_titles=True, title_kwargs={"fontsize": 12}, truths=[w, e], quantiles=[0.16, 0.5, 0.84], plot_contours=True); fig.savefig(directory + 'corner_fit_e' + str(fite) + '_fit_w' + str(fitw) + '_fit_g' + str(g_mean) + '.png') plt.close(fig) return p_f, rprs_f, a_f, i_f, fite, fitw, edist, wdist, gs, g_mean, g_sigmas, T14dist, T23dist
18,701
def ClientInit(): """Run all startup routines for the client.""" registry_init.RegisterClientActions() stats_collector_instance.Set(default_stats_collector.DefaultStatsCollector()) config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() client_logging.LogInit() all_parsers.Register() if not config.CONFIG.ContextApplied(contexts.CLIENT_BUILD_CONTEXT): config.CONFIG.Persist("Client.labels") config.CONFIG.Persist("Client.proxy_servers") config.CONFIG.Persist("Client.tempdir_roots")
18,702
def runSwitchVerifier(node_address): """Wrap the SwitchVerifier binary. Configuration parameters of the switchVerifier binary are retrieved from the driverHPESettings class. """ logger.debug("Running switchVerifier binary") # Run switchVerifier -c config.json -k pub.key -conf refconf -json [ip_addr] command = (SWITCH_VERIFIER_PATH + " -c " + SWITCH_VER_CONFIG_PATH + " -k " + SWITCH_PCR_PUB_KEY_PATH + " -conf " + SWITCH_REF_CONFIG_PATH + " -json " + node_address) logger.debug("SwitchVerifier run with args: " + command) output = runSSHCommand(command) logger.debug("SwitchVerifier returned output: " + output) return output
18,703
def unary_op(op, suty): """/Users/mahmoud/src/py-in-java/pycryptodome/lib/transform.py /Users/mahmoud/src/py2star/transform1.py Converts a method like: ``self.failUnless(True)`` to asserts.assert_that(value).is_true() """ sut = _codegen.code_for_node(suty.value) return cst.parse_expression(f"asserts.assert_that({sut}).{op}()")
18,704
def canny_edges(image, minedges=5000, maxedges=15000, low_thresh=50, minEdgeRadius=20, maxEdgeRadius=None): """ Compute Canny edge detection on an image """ t0 = time.time() dx = ndimage.sobel(image,0) dy = ndimage.sobel(image,1) mag = numpy.hypot(dx, dy) mag = mag / mag.max() ort = numpy.arctan2(dy, dx) edge_map = non_maximal_edge_suppresion(mag, ort, minEdgeRadius, maxEdgeRadius) edge_map = numpy.logical_and(edge_map, mag > low_thresh) labels, numlabels = ndimage.measurements.label(edge_map, numpy.ones((3,3))) #print "labels", len(labels) #print maxs maxs = ndimage.measurements.maximum(mag, labels, range(1,numlabels+1)) maxs = numpy.array(maxs, dtype=numpy.float64) high_thresh = maxs.mean() minThresh = maxs.min() #print time.time() - t0 edge_count = edge_map.sum() count = 0 while count < 25: t0 = time.time() count += 1 maxs = ndimage.measurements.maximum(mag, labels, range(1,numlabels+1)) maxs = numpy.array(maxs, dtype=numpy.float64) good_label = (maxs > high_thresh) good_label = numpy.append([False, ], good_label) numgood = good_label.sum() if numgood == numlabels and high_thresh > minThresh: print "ERROR" maxs.sort() print high_thresh print maxs[:3], maxs[-3:] print maxs[0], ">", high_thresh, "=", maxs[0] > high_thresh good_label = numpy.zeros((numlabels+1,), dtype=numpy.bool) good_label[1:] = maxs > high_thresh print good_label[:3], good_label[-3:] time.sleep(10) newedge_map = good_label[labels] #for i in range(len(maxs)): # #if max(mag[labels==i]) < high_thresh: # if maxs[i] < high_thresh: # edge_map[labels==i] = False edge_count = newedge_map.sum() print "canny edges=%d, (thresh=%.3f) time=%.6f"%(edge_count, high_thresh, time.time() - t0) if edge_count > maxedges: rand = math.sqrt(random.random()) new_thresh = high_thresh / rand # fix for too large values #print rand, new_thresh if new_thresh < 1.0: high_thresh = new_thresh else: high_thresh = math.sqrt(high_thresh) elif edge_count < minedges and high_thresh > minThresh: rand = math.sqrt(random.random()) new_thresh = high_thresh * rand #print rand, new_thresh, minThresh high_thresh = new_thresh else: break #print time.time() - t0 return newedge_map
18,705
def test_exception_connection_error(port): """ ConnectionError exception raising test. Wrong network address, firewall or rest api connection limit ... """ array = hpestorapi.StoreServ('wrong-address', 'user', 'password', ssl=False, port=port) with pytest.raises(requests.exceptions.ConnectionError): array.open()
18,706
def test_classifier(executable): """Test the classification task.""" num_machines = 2 data = create_data(task='binary-classification') partitions = np.array_split(data, num_machines) train_params = { 'objective': 'binary', 'num_machines': num_machines, } clf = DistributedMockup(executable) clf.fit(partitions, train_params) y_probas = clf.predict() y_pred = y_probas > 0.5 assert accuracy_score(clf.label_, y_pred) == 1.
18,707
def reduce_false_positives(txt_file_path, mean_brightness_retained_patches, max_fp, output_folder_path, out_filename, mapping_centers_masks): """If more than "max_fp" candide centers were predicted, this function reduces this number to exactly "max_fp", retaining only the most probable (i.e. brightest) candidate aneurysm centers. The rationale behind this choice is that it's extremely unlikely that a subject has more than "max_fp" aneurysms in one scan. Args: txt_file_path (str): path to the output txt file mean_brightness_retained_patches (list): contains the average intensity of the patches corresponding to the predicted centers max_fp (int): maximum allowed number of aneurysms per patient output_folder_path (str): path to output folder for this subject out_filename (str): filename of the output segmentation mask mapping_centers_masks (dict): it contains the centers of the predictions as keys and the coordinates of non-zero voxels as values Returns: None Raises: AssertionError: if input path does not exist """ assert os.path.exists(txt_file_path), "Path {} does not exist".format(txt_file_path) if not os.stat(txt_file_path).st_size == 0: # if the output file is not empty (i.e. there's at least one predicted aneurysm location) df_txt_file = pd.read_csv(txt_file_path, header=None) # type: pd.DataFrame # load txt file with pandas new_rows = [] # type: list # will contain the new rows of the output file if df_txt_file.shape[0] > max_fp: # if the dataframe has more than max_fp rows (i.e. if there are more than max_fp predicted aneurysms) # find indexes corresponding to the "max_fp" most probable aneurysms (i.e. the brightest) idxs_brightest_patches = sorted(range(len(mean_brightness_retained_patches)), key=lambda k: mean_brightness_retained_patches[k])[-max_fp:] for idx in idxs_brightest_patches: lesion_center = np.asarray(df_txt_file.iloc[idx]) new_rows.append(lesion_center) # save modified txt file (it overwrites the previous one by default) np.savetxt(txt_file_path, np.asarray(new_rows), delimiter=",", fmt='%i') # also remove less probable connected components from segmentation map reduce_fp_in_segm_map(txt_file_path, output_folder_path, out_filename, mapping_centers_masks)
18,708
def scale_matrix(t): """ Given a d-dim vector t, returns (d+1)x(d+1) matrix M such that left multiplication by M on a homogenuous (d+1)-dim vector v scales v by t (assuming the last coordinate of v is 1). """ t = asarray(t).ravel() d = len(t) m = identity(d+1) for i in xrange(d): m[i,i] = t[i] return asmatrix(m)
18,709
def validate_rule_paths(sched: schedule.Schedule) -> schedule.Schedule: """A validator to be run after schedule creation to ensure each path contains at least one rule with a temperature expression. A ValueError is raised when this check fails.""" for path in sched.unfold(): if path.is_final and not list(path.rules_with_temp): raise ValueError( "No temperature specified for any rule along the path {}." .format(path) ) return sched
18,710
def descendants(ctx, *args, **kwargs): """Recursively get all children of an Interface.""" callbacks.list_subcommand( ctx, display_fields=DISPLAY_FIELDS, my_name=ctx.info_name )
18,711
def check_input_array(xarr,shape=None,chunks=None,\ grid_location=None,ndims=None): """Return true if arr is a dataarray with expected shape, chunks at grid_location attribute. Raise an error if one of the tests fails. Parameters ---------- xarr : xarray.DataArray xarray dataarray which attributes should be tested. shape : tuple expected shape of the xarray dataarray xarr chunks : list-like of list-like object expected chunks of the xarray dataarray xarr grid_location : str string describing the expected grid location : eg 'u','v','t','f'... ndims : int number of dimensions over which chunks should be compared. Returns ------- test : bool boolean value of the test. """ if hasattr(xarr,'name'): arrayname = xarr.name else: arrayname = 'array' if not(is_xarray(xarr)): raise TypeError(arrayname + 'is expected to be a xarray.DataArray') if not(_chunks_are_compatible(xarr.chunks,chunks,ndims=ndims)): raise ChunkError() if not(_grid_location_equals(xarr,grid_location)): raise GridLocationError() return True
18,712
def test_load_dump(engine): """_load and _dump should be symmetric""" user = User(id="user_id", name="test-name", email="email@domain.com", age=31, joined=datetime.datetime.now(datetime.timezone.utc)) serialized = { "id": {"S": user.id}, "age": {"N": "31"}, "name": {"S": "test-name"}, "email": {"S": "email@domain.com"}, "j": {"S": user.joined.isoformat()} } loaded_user = engine._load(User, serialized) missing = object() for attr in (c.name for c in User.Meta.columns): assert getattr(loaded_user, attr, missing) == getattr(user, attr, missing) assert engine._dump(User, user) == serialized assert engine._dump(User, loaded_user) == serialized
18,713
def ancestor_width(circ, supp, verbose=False): """ Args: circ(list(list(tuple))): Circuit supp(list): List of integers Returns: int: Width of the past causal cone of supp """ circ_rev= circ[::-1] supp_coded = 0 for s in supp: supp_coded |= (1<<s) for unitcirc in circ_rev: for gate in unitcirc: if verbose: print("gate={}".format(gate)) if (1<<gate[0]) & supp_coded: if not ((1<<gate[1]) & supp_coded): supp_coded |= (1<<gate[1]) elif (1<<gate[1]) & supp_coded: supp_coded |= (1<<gate[0]) return bin(supp_coded).count('1')
18,714
def find_genes( interactions, fragment_database_fp, gene_bed_fp, gene_dict_fp, output_dir, suppress_intermediate_files=False): """Identifies genes in fragments that interact with SNP fragments. Args: interactions: The dictionary fragements that interact with SNP fragments returned from find_interactions fragment_database_fp: ../../lib/Homo_sapiens.GRCh37.75.dna.fragments.db gene_bed_fp: ../../lib/gene_reference.bed gene_dict_fp: The database containing GENCODE ids for genes output_dir: User-specified directory for results. Defaults to inputs directory. suppress_intermediate_files: if 'False', snps.txt file is written to output_dir Returns: A dict named 'genes' containing genes in fragments that interact with SNP fragments e.g. {'rs9462794':{ # SNP rsID 'PHACTR1':{ # Gene 'gene_id': 'ENSG00000112137.12', 'cell_lines': 'GM12878_Rao2014': {'interactions': 2, 'replicates': 23, 'rep_present': ['GSM1551552_HIC003_merged_nodups.db', 'GSM1551553_HIC004_merged_nodups.db']}, 'KBM7_Rao2014': {'interactions': 1, 'replicates': 5, 'rep_present': ['GSM1551624_HIC075_merged_nodups.db']} } 'EDN1':{...} } 'rs12198798':{..} 'rs6909834':{...} } If suppress_intermediate_files=False, SNP-gene pairs with HiC contacts in only one libary replicate in only one cell line are written to genes_to_remove.txt.The others interactions are written to genes.txt. Each file has the ff columns: 1. SNP rsID 2. Gene name 3. Gene ID 4. Cell line 5. HiC contact counts 6. Replicates in which contact is found 7. Number of cell line replicates """ print("Identifying interactions with genes...") global hs_gene_bed hs_gene_bed = pybedtools.BedTool(gene_bed_fp) genes = {} enzyme_count = 0 for enzyme in interactions: print("\tin libraries restricted with " + enzyme) enzyme_count += 1 manager = multiprocessing.Manager() genes_dict = manager.dict() current_process = psutil.Process() num_processes = 8 pool = multiprocessing.Pool(processes=min(num_processes, len(current_process.cpu_affinity()))) arglist = [(snp, enzyme, interactions[enzyme][snp], genes_dict, fragment_database_fp, gene_bed_fp, gene_dict_fp, output_dir) for snp in interactions[enzyme]] pool.map(find_snp_genes, arglist) pool.close() pool.join() if enzyme_count < 2: genes.update(genes_dict) genes.update() else: genes = dict_merge(genes, genes_dict) temp_files = [os.path.join(output_dir, temp_file) for temp_file in os.listdir(output_dir) if temp_file.startswith('temp_snp_bed')] for temp_file in temp_files: os.remove(temp_file) snps_to_remove = {} for enzyme in interactions: snps_to_remove[enzyme] = [] for snp in interactions[enzyme]: if not snp in genes: print("\tNo SNP-gene spatial interactions detected for %s, \ removing from analysis" % (snp,)) snps_to_remove[enzyme].append(snp) for enzyme in snps_to_remove: # Update snps and interactions mappings for snp in snps_to_remove[enzyme]: for i, frag in enumerate(snps[snp]['fragments']): if frag['enzyme'] == enzyme: snps[snp]['fragments'].remove(snps[snp]['fragments'][i]) del interactions[enzyme][snp] genes_to_remove = [] del_genefile = open(os.path.join(output_dir, 'genes_removed.txt'), 'w') dwriter = csv.writer(del_genefile, delimiter='\t') for snp in genes: for gene in genes[snp]: num_cell_line = len(genes[snp][gene]['cell_lines']) for cell_line in genes[snp][gene]['cell_lines']: rep_present = len( set(genes[snp][gene]['cell_lines'][cell_line]['rep_present'])) interactions = genes[snp][gene]['cell_lines'][cell_line]['interactions'] replicates = genes[snp][gene]['cell_lines'][cell_line]['replicates'] if interactions/replicates <= 1 and rep_present < 2 and\ num_cell_line < 2: genes_to_remove.append((snp, gene)) dwriter.writerow((snp, gene, genes[snp][gene]['gene_id'], cell_line, interactions, rep_present, replicates)) del_genefile.close() for pair in genes_to_remove: del genes[pair[0]][pair[1]] if not suppress_intermediate_files: genefile = open(output_dir + "/genes.txt", 'w') gwriter = csv.writer(genefile, delimiter='\t') for snp in genes: for gene in genes[snp]: num_cell_line = len(genes[snp][gene]) for cell_line in genes[snp][gene]['cell_lines']: rep_present = len( set(genes[snp][gene]['cell_lines'][cell_line]['rep_present'])) interactions = genes[snp][gene]['cell_lines'][cell_line]['interactions'] replicates = genes[snp][gene]['cell_lines'][cell_line]['replicates'] gwriter.writerow((snp, gene, genes[snp][gene]['gene_id'], cell_line, genes[snp][gene]['cell_lines'][cell_line]['interactions'], genes[snp][gene]['cell_lines'][cell_line]['rep_present'], genes[snp][gene]['cell_lines'][cell_line]['replicates'])) return genes
18,715
def get_bprop_argmax(self): """Generate bprop for Argmax""" def bprop(x, out, dout): return (zeros_like(x),) return bprop
18,716
def register_extensions(app): """Register models.""" db.init_app(app) login_manager.init_app(app) # flask-admin configs admin.init_app(app) admin.add_view(ModelView(User)) admin.add_view(ModelView(Role)) login_manager.login_view = 'auth.login' @login_manager.user_loader def load_user(user_id): return User.objects(id=user_id).first() # jwt config def jwt_authenticate(username, password): logging.info("username:{}\npassword:{}\n".format(username, password)) user = User.objects(name=username, password=password).first() return user def jwt_identity(payload): logging.info("payload:{}".format(payload)) user_id = payload['identity'] return User.objects(id=user_id).first() def make_payload(identity): iat = datetime.utcnow() exp = iat + current_app.config.get('JWT_EXPIRATION_DELTA') nbf = iat + current_app.config.get('JWT_NOT_BEFORE_DELTA') identity = str(identity.id) return {'exp': exp, 'iat': iat, 'nbf': nbf, 'identity': identity} jwt.authentication_handler(jwt_authenticate) jwt.identity_handler(jwt_identity) jwt.jwt_payload_handler(make_payload) jwt.init_app(app)
18,717
def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['cache'] = 86400 desc['description'] = """This plot presents a histogram of the change in some observed variable over a given number of hours.""" desc['arguments'] = [ dict(type='zstation', name='zstation', default='DSM', label='Select Station:', network='IA_ASOS'), dict( type='select', options=PDICT, default='tmpf', name='var', label='Select Variable' ), dict(type='int', name='hours', default=24, label='Hours:'), dict(type='float', name='interval', default=1, label="Histogram Binning Width (unit of variable)"), ] return desc
18,718
def extract_content(basepath, exclude=None): """ Get all non-comment lines from BUILD files under the given basepath. :param Path basepath: The path to recursively crawl :param list exclude: The paths to exclude :rtype: str """ if basepath.is_file(): content = basepath.read_text(errors='replace') elif basepath.is_dir(): content = '' for dirpath, dirnames, filenames in os.walk(str(basepath)): # skip sub-directories starting with a dot dirnames[:] = filter(lambda d: not d.startswith('.'), dirnames) dirnames.sort() for name in sorted(filenames): if name != 'BUILD.bazel': continue path = Path(dirpath) / name if path in (exclude or []): continue content += path.read_text(errors='replace') + '\n' else: return '' return _remove_bazel_comments(content)
18,719
def read_format_from_metadata(text, ext): """Return the format of the file, when that information is available from the metadata""" metadata = read_metadata(text, ext) rearrange_jupytext_metadata(metadata) return format_name_for_ext(metadata, ext, explicit_default=False)
18,720
def request_requires_retry(err: Exception) -> bool: """Does the error mean that a retry should be performed?""" if not isinstance(err, ClientError): return False code = err.response.get('Error', {}).get('Code', '').lower() message = err.response.get('Error', {}).get('Message', '') # This covers: # ExpiredToken # OperationAborted # RequestTimeout # SlowDown # Busy # RequestLimitExceeded # It might need to cover these, but it doesn't. # RestoreAlreadyInProgress m_low = message.lower() if ( 'exceeded' in m_low or 'exceeded' in code or 'expire' in m_low or 'expire' in code or 'aborted' in m_low or 'aborted' in code or 'timeout' in m_low or 'timeout' in code or 'slow' in m_low or 'slow' in code or 'busy' in m_low or 'busy' in code ): log('INFO', "Reporting error {msg} as requiring a retry", msg=message) return True return False
18,721
def update_DMSP_ephemeris(self, ephem=None): """Updates DMSP instrument data with DMSP ephemeris Parameters ---------- ephem : pysat.Instrument or NoneType dmsp_ivm_ephem instrument object Returns --------- Updates 'mlt' and 'mlat' """ # Ensure the right ephemera is loaded if ephem is None: print('No ephemera provided for {:}'.format(self.date)) self.data = pds.DataFrame(None) return if ephem.sat_id != dmsp.sat_id: raise ValueError('ephemera provided for the wrong satellite') if ephem.date != self.date: ephem.load(date=self.date, verifyPad=True) if ephem.data.empty: print('unable to load ephemera for {:}'.format(date)) self.data = pds.DataFrame(None) return # Reindex the ephemeris data ephem.data = ephem.data.reindex(index=self.data.index, method='pad') ephem.data = ephem.data.interpolate('time') # Update the DMSP instrument self['mlt'] = ephem['SC_AACGM_LTIME'] self['mlat'] = ephem['SC_AACGM_LAT'] return
18,722
def show_all_in_dict(dict): """ Print all the key in dict. Arguments: dict -- a dictionary needed to be print it's key. """ print('We know the birthdays of:') for key in dict: print(key)
18,723
def to_jd(y, m, d, method=None): """Convert Armenian date to Julian day count. Use the method of Sarkawag if requested.""" # Sanity check values legal_date(y, m, d, method) yeardays = (m - 1) * 30 + d if method == "sarkawag": # Calculate things yeardelta = y - 533 leapdays = trunc(yeardelta / 4) return EPOCH_SARKAWAG + (365 * yeardelta) + leapdays + yeardays else: return EPOCH + (365 * y) + yeardays
18,724
def unitVector(vector): """ Returns the unit vector of a given input vector. Params: vector -> input vector. Returns: numpy.array(). """ # Divide the input vector by its magnitude. return vector / np.linalg.norm(vector)
18,725
def find_poly_intervals(p): """ Find the intervals of 1D-polynomial (numpy.polynomial) where the polynomial is negative. """ assert(np.abs(p.coef[-1]) > 1e-14) r=p.roots() # remove imaginary roots, multiple roots, and sort r=np.unique(np.extract(np.abs(r.imag)<1e-14, r).real) ints = [] for ii in range(r.size-1): rmean = 0.5*(r[ii]+r[ii+1]) if p(rmean)<0: ints.append([r[ii],r[ii+1]]) sign_pinf = np.sign(p.coef[-1]) if p.coef[-1] < 0: # polynomial sign at plus infinity ints.append([r[-1], np.inf]) if (-1)**p.degree()*sign_pinf<0: # polynomial sign at minus infinity ints.append([-np.inf, r[0]]) return np.array(ints)
18,726
def user_numforms_next(*args): """ user_numforms_next(p) -> user_numforms_iterator_t Move to the next element. @param p (C++: user_numforms_iterator_t) """ return _ida_hexrays.user_numforms_next(*args)
18,727
def test_task_test_all(): """Test task_test_all.""" result = task_test_all() actions = result['actions'] assert len(actions) == 1 assert str(actions[0]).endswith('" --ff -vv')
18,728
def main(): """ annotate a file with the neearest features in another. """ p = argparse.ArgumentParser(description=__doc__, prog=sys.argv[0]) p.add_argument("-a", dest="a", help="file to annotate") p.add_argument("-b", dest="b", help="file with annotations") p.add_argument("--upstream", dest="upstream", type=int, default=None, help="distance upstream of [a] to look for [b]") p.add_argument("--downstream", dest="downstream", type=int, default=None, help="distance downstream of [a] to look for [b]") p.add_argument("--report-distance", dest="report_distance", default=False, help="report the distance, not just the genes", action="store_true") args = p.parse_args() if (args.a is None or args.b is None): sys.exit(not p.print_help()) c = add_closest(args.a, args.b) b = BedTool(args.b) # TODO: support --report-distance for up/downstream. if args.upstream: c = add_xstream(c, b, args.upstream, "up", args.report_distance) if args.downstream: c = add_xstream(c, b, args.downstream, "down", args.report_distance) for row in c.sort(): print(row)
18,729
def list_cms() -> Dict[Text, CalculationModule]: """List all cms available on a celery queue.""" app = get_celery_app() try: app_inspector = app.control.inspect() nodes = app_inspector.registered("cm_info") except (redis.exceptions.ConnectionError, kombu.exceptions.OperationalError) as err: # If redis is down, we just don't expose any calculation module logging.error("Connection to celery broker failed with error: %s", err) return {} if not nodes: return {} cms = {} for node in nodes.values(): for entry in node: try: cm = from_registration_string(entry) except InvalidRegistrationString as e: # invalid cm was encountered, skip it logging.error(e) continue cms[cm.name] = cm return cms
18,730
def _a_in_b(first, second): """Check if interval a is inside interval b.""" return first.start >= second.start and first.stop <= second.stop
18,731
def validate(val_loader, c_model, r_model, c_criterion, r_criterion): """ One epoch's validation. : param val_loader: DataLoader for validation data : param model: model : param criterion: MultiBox loss : return: average validation loss """ c_model.eval() # eval mode disables dropout r_model.eval() # eval mode disables dropout batch_time = AverageMeter() losses = AverageMeter() losses2 = AverageMeter() start = time.time() # Prohibit gradient computation explicity because I had some problems with memory with torch.no_grad(): # Batches for i_batch, (images, labels, coords) in enumerate(train_loader): # Move to default device images = images.to(device) labels = labels.to(device) coords = coords.to(device) # CLASSIFICATION Eval predicted_class, all_crops, cropCoords = c_model(images) loss1 = c_criterion(predicted_class, labels) all_crops = all_crops.to(device) cropCoords = cropCoords.to(device) # REGRESSION Eval for i in range(9): batchcrop = all_crops[:, i, :, :] batchcrop.unsqueeze_(1) offset = cropCoords[i] offset = offset.repeat(all_crops.size(0), 1) offset = torch.cat((offset, torch.zeros((all_crops.size(0), 1)).to(device)), dim=1) center_truth = coords[:, i, :] center_est = r_model(batchcrop).to(device) center_est = center_est + offset loss2 = regrCriterion(center_truth, center_est) losses2.update(loss2.item()) losses.update(loss1.item()) batch_time.update(time.time() - start) start = time.time() # Print status if i_batch % print_freq == 0: print( "[{0}/{1}]\t" "Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" "Loss {loss.val:.4f} ({loss.avg:.4f})\t".format(i_batch, len(val_loader), batch_time=batch_time, loss=losses), "Regr Loss {loss.val:.4f} ({loss.avg:.4f})\t".format(i_batch, len(val_loader), batch_time=batch_time, loss=losses2) ) print("\n * LOSS - {loss.avg:.3f}\n".format(loss=losses)) print(" * REGR LOSS - {loss.avg:.3f}\n".format(loss=losses2)) return losses.avg, losses2.avg
18,732
def test_monotonic(example_df): """Columns that should be monotonic but are not""" example_df['cycle_number'] = [2, 1] with raises(ValueError) as exc: CyclingData.validate_dataframe(example_df) assert 'monotonic' in str(exc) example_df['cycle_number'] = [1, 1] CyclingData.validate_dataframe(example_df)
18,733
def radixsort(list, k=10, d=0): """ Sort the list. This method has been used to sort punched cards. @param k: number different characters in a number (base) @param d: maximum number of digits of list elements """ if len(list) == 0: return [] elif d == 0: d = max(map(lambda x : len(str(abs(x))), list)) for x in range(d): # create an empty bin for each possible digit bins = [[] for i in xrange(k)] # sort the number according to the digits in the bins for el in list: bins[(el / 10**x ) % k].append(el) # merge all bins to one list list = [] for section in bins: list.extend(section) return list
18,734
def create_datacatalog(glue_client): """ used to create a data catalog """ # TODO: # check if this is needed # and to be improved accordingly response = glue_client.create_data_catalog( Name="string", Type="LAMBDA" | "GLUE" | "HIVE", Description="Test catalog", Parameters={"string": "string"}, Tags=[ {"Key": "string", "Value": "string"}, ], ) return response
18,735
def feature_stat_str(x, y, delimiter='~', n_lines=40, width=20): """Compute the input feature's sample distribution in string format for printing. The distribution table returned (in string format) concains the sample sizes, event sizes and event proportions of each feature value. Parameters ---------- x: numpy.array, shape (number of examples,) The discretizated feature array. Each value represent a right-closed interval of the input feature. e.g. '1~8' y: numpy.array, shape (number of examples,) The binary dependent variable with 1 represents the target event (positive class). delimiter: python string. Default is '~' The symbol that separates the boundaries of a interval in array x. n_lines: integer. Default is 40. The number of '- ' used. This Controls the length of horizontal lines in the table. width: integer. Default is 20. This controls the width of each column. Returns ------- table_string: python string The feature distribution table in string format """ res = feature_stat(x,y,delimiter) # Compute the feature distrition table list_str = [] # String table will be constructed line by line # Table header for i in range(res.shape[1]): list_str.extend([str(res.columns[i]),' '*(width-len(res.columns[i].encode('gbk')))]) list_str.append('\n(right-closed)') list_str.extend(['\n','- '*n_lines,'\n']) # Table body for i in range(res.shape[0]): for j in range(res.shape[1]): list_str.extend([str(res.iloc[i,j]),' '*(width-len(str(res.iloc[i,j])))]) list_str.extend(['\n','- '*n_lines,'\n']) # Put everything together table_string = ''.join(list_str) return table_string
18,736
def dice_similarity_u(output, target): """Computes the Dice similarity""" #batch_size = target.size(0) total_dice = 0 output = output.clone() target = target.clone() # print('target:',target.sum()) for i in range(1, output.shape[1]): target_i = torch.zeros(target.shape) target_i = target_i.cuda().clone() target_i[target == i] = 1 output_i = output[:, i:i+1].clone() dice_i = dice_similarity(output_i, target_i) # print('dice_: ',i,dice_i.data) # print('target_i: ',target_i.sum()) # print('output_i: ',output_i.sum()) total_dice += dice_i total_dice = total_dice / (output.shape[1] - 1) #print(intersection, union, dice) return total_dice
18,737
def establish_service(): """ Authorizes and establishes the Gmail service using the API. :return: authorized Gmail service instance """ # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/gmail.send'] creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server() # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('gmail', 'v1', credentials=creds) return service
18,738
def get_meaning(searchterm): """ Fetches the meaning of the word specified :param searchterm: the word for which you want to fetch the meaning :return: json object of the meaning """ # finds the input field by id in the webpage sbox = driver.find_element_by_id('word') sbox.clear() # clears the input field sbox.send_keys(searchterm) # enters the word specified in the input field # find the 'CALL THE API' button submit = driver.find_element_by_id("getWord") submit.click() # invoking the click event on the button # waiting for the results to come time.sleep(1) # find the code tag in the webpage where the meaning of the word (result) is present code = driver.find_element_by_tag_name("code") # condition if the meaning is not found if code.text == "No results for that word.": return { 'word':searchterm } # converting the meaning of the word from string to json meaning = json.loads(code.text) # returning the meaning of word in json formart return meaning
18,739
def set_permissions_manager(permissions_manager): """Set the IPermissionsManager implementation to use.""" global _permissions_manager _permissions_manager = permissions_manager
18,740
def main(): """Main logic""" # logger.info("Check hour range: {}:00:00 - {}:00:00".format(start_hour, end_hour)) with open(LIST_YAML_PATH, encoding="utf8") as list_file: yml_data = yaml.load(list_file, Loader=yaml.FullLoader) # Add schedule for each item for item_obj in yml_data: # Create helper object and add add to schedule for urls_type in ["comic_urls", "novel_urls"]: helper = get_helper(item_obj, urls_type) if helper: add_schedule(helper, urls=item_obj[urls_type]) if len(schedule.jobs) > 0: logger.info("Scheduled %s checker(s).", len(schedule.jobs)) else: raise ValueError( "No schedule job found, please check format in list.yaml" ) # Run the scheduler while True: schedule.run_pending() time.sleep(1)
18,741
def cat_artifact(cache_path, pk, artifact_rpath): """Print the contents of a cached artefact.""" db = get_cache(cache_path) with db.cache_artefacts_temppath(pk) as path: artifact_path = path.joinpath(artifact_rpath) if not artifact_path.exists(): click.secho("Artifact does not exist", fg="red") sys.exit(1) if not artifact_path.is_file(): click.secho("Artifact is not a file", fg="red") sys.exit(1) text = artifact_path.read_text(encoding="utf8") click.echo(text)
18,742
def test_xdawn_picks(): """Test picking with Xdawn.""" data = np.random.RandomState(0).randn(10, 2, 10) info = create_info(2, 1000., ('eeg', 'misc')) epochs = EpochsArray(data, info) xd = Xdawn(correct_overlap=False) xd.fit(epochs) epochs_out = xd.apply(epochs)['1'] assert epochs_out.info['ch_names'] == epochs.ch_names assert not (epochs_out.get_data()[:, 0] != data[:, 0]).any() assert_array_equal(epochs_out.get_data()[:, 1], data[:, 1])
18,743
def get_state(entity_id): """ Return the state of an entity """ try: entity_state = '' entity_state = Gb.hass.states.get(entity_id).state if entity_state in IOS_TRIGGER_ABBREVIATIONS: state = IOS_TRIGGER_ABBREVIATIONS[entity_state] else: state = Gb.state_to_zone.get(entity_state, entity_state.lower()) # _trace(f"{entity_id=} {entity_state=}-->{state=} ") except Exception as err: #When starting iCloud3, the device_tracker for the iosapp might #not have been set up yet. Catch the entity_id error here. #_LOGGER.exception(err) state = NOT_SET #if Gb.log_rawdata_flag: # _trace(f" > {entity_id} > {entity_state=} {state=}") return state
18,744
def show(): """ show source data """ SUP = "".join(str(i) * 10 for i in range(ALL//10))[:ALL] METR = "0123456789" * (ALL//10) print ("\n" + SUP) print (METR, EOL) for s in a: print (" " * (s[1]-1), "*" * s[0], EOL, end="") print (METR, "\n"+SUP, EOL)
18,745
def loss_eval(net, data, labels, numclass=2, rs=40): """Evaluate the network performance on test samples""" loss = np.zeros([labels.shape[0]]) for i in range(len(loss)): label_input = condition_reshape( label=labels[i,np.newaxis], numclass=numclass, imgshape=(rs, rs)) img_est = net.sess.run( net.output_flatten_de, feed_dict={ net.inputs: data[i].reshape(-1, rs, rs, 1), net.conditions: labels[i].reshape([1, labels[i].shape[0]]), net.conditions_input: label_input, net.is_training: False, net.keep_prob: 1.0}) img_est = (img_est - img_est.min()) / (img_est.max() - img_est.min()) loss[i] = np.mean((data[i] - img_est)**2) # loss[i] = np.sum((data[i] - img_est)**2) / (40**2) # print(data[i].shape) # evaluation loss_mean = np.mean(loss) loss_std = np.std(loss) return loss,loss_mean,loss_std
18,746
def send_instructions(message): """/start, /help""" msg_content: str = ( "*Available commands:*\n\n/download - downloads pinterest images" ) bot.send_message( message.chat.id, msg_content, parse_mode="markdown", )
18,747
def wide_resnet101(input_shape, num_classes, dense_classifier=False, pretrained=False): """ return a ResNet 101 object """ return _resnet('resnet101', BottleNeck, [3, 4, 23, 3], 64 * 2, num_classes, dense_classifier, pretrained)
18,748
def is_empty(ir: irast.Base) -> bool: """Return True if the given *ir* expression is an empty set or an empty array. """ return ( isinstance(ir, irast.EmptySet) or (isinstance(ir, irast.Array) and not ir.elements) or ( isinstance(ir, irast.Set) and ir.expr is not None and is_empty(ir.expr) ) )
18,749
def read_official_corner_lut(filename, y_grid='lat_grid', x_grid='lon_grid', x_corners = ['nwlon', 'swlon', 'selon', 'nelon'], y_corners = ['nwlat', 'swlat', 'selat', 'nelat']): """ Read a MATLAB file containing corner point lookup data. Returns lons, lats, corner_lut. lons, lats: arrays, shape (N,M), of longitude and latitude giving the locations of the corresponding offsets in corner_points corner_lut: array, shape (N,M,4,2) Corners of the pixel quadrilateral are given in order along the third dimension. Longitude and latitudes are indexes 0 and 1 in the trailing dimension, respectively. Latitudes, longitudes, and offsets are defined with east and north positive """ from scipy.io import loadmat nav = loadmat(filename) lats = nav[y_grid] lons = nav[x_grid] corner_lut = np.zeros((lats.shape[0], lats.shape[1], 4, 2), dtype='f8') corner_lut[:,:,0,0] = nav[x_corners[0]] corner_lut[:,:,1,0] = nav[x_corners[1]] corner_lut[:,:,2,0] = nav[x_corners[2]] corner_lut[:,:,3,0] = nav[x_corners[3]] corner_lut[:,:,0,1] = nav[y_corners[0]] corner_lut[:,:,1,1] = nav[y_corners[1]] corner_lut[:,:,2,1] = nav[y_corners[2]] corner_lut[:,:,3,1] = nav[y_corners[3]] return lons, lats, corner_lut
18,750
def get_component_name(job_type): """Gets component name for a job type.""" job = data_types.Job.query(data_types.Job.name == job_type).get() if not job: return '' match = re.match(r'.*BUCKET_PATH[^\r\n]*-([a-zA-Z0-9]+)-component', job.get_environment_string(), re.DOTALL) if not match: return '' component_name = match.group(1) return component_name
18,751
def group_by(source: ObservableBase, key_mapper, element_mapper=None) -> ObservableBase: """Groups the elements of an observable sequence according to a specified key mapper function and comparer and selects the resulting elements by using a specified function. 1 - observable.group_by(lambda x: x.id) 2 - observable.group_by(lambda x: x.id, lambda x: x.name) 3 - observable.group_by( lambda x: x.id, lambda x: x.name, lambda x: str(x)) Keyword arguments: key_mapper -- A function to extract the key for each element. element_mapper -- [Optional] A function to map each source element to an element in an observable group. Returns a sequence of observable groups, each of which corresponds to a unique key value, containing all elements that share that same key value. """ def duration_mapper(_): return Observable.never() return source.group_by_until(key_mapper, element_mapper, duration_mapper)
18,752
def quit_program(): """Quit program""" clear_screen() sys.exit()
18,753
def gauss_pdf(x, norm, mu, sigma): """ The method calculates the value of the Gaussian probability density function (using matplotlib routines) for a value/array x. for a given mean (mu) and standard deviation (sigma). The results is normalized (multiplied by) 'norm', and so 'norm' should equal 1.000 unless you have a reason for it to be otherwise. """ if any(np.isnan([norm, mu, sigma])) or any(np.isnan(x)): return np.NaN GaussPdf = norm * scipy.stats.norm.pdf(x, mu, sigma) return GaussPdf
18,754
def max_dbfs(sample_data: np.ndarray): """Peak dBFS based on the maximum energy sample. Args: sample_data ([np.ndarray]): float array, [-1, 1]. Returns: float: dBFS """ # Peak dBFS based on the maximum energy sample. Will prevent overdrive if used for normalization. return rms_to_dbfs(max(abs(np.min(sample_data)), abs(np.max(sample_data))))
18,755
def close_managers(): """ Do cleanup on all the cookie jars """ for prefix in MANAGERS: MANAGERS[prefix].cookie_jar.store_cookies() print("done closing manager...", prefix) print("EXITING...\n\n\n\n\n\n\n\n\n\n") _exit(0)
18,756
def youngGsmatchwinners(atpmatches): """calculates young grand slam match winners""" matches=atpmatches[(atpmatches['tourney_level'] == 'G') & (atpmatches['winner_age'] < 18)] print(matches[['tourney_date','tourney_name','winner_name', 'winner_age', 'loser_name','loser_age']].to_csv(sys.stdout,index=False))
18,757
def createOrUpdateAppUser(accountID=None): """ """ accountUser = User.objects.get(pk=accountID) if accountUser: accountUser.clientes.update_or_create()
18,758
def get_deltaF_v2(specfile, res, spec_res, addpix=int(10), CNR=None, CE=None, MF_corr=True, domask=True): """Get the optical depth and return the realistic mock spectra specfile : Address to the spectra. It should be in the foramt as the fale_spectra outputs spec_res : spectral resolution in units of voxels along the spectrum addpix : make a coarser spectrum by averaging this number of consecutive pixels along the line-of-sight CNR : Continuum to Nosie ratio CE : Continumm error MF_corr : If true, correct the mean flux of the spectra domask : If true, mask strong absorbtions along the spectrum """ ps = PS(res=res, num = 1, base='./', savedir='', savefile=specfile) spec_file = h5py.File(specfile, 'r') if MF_corr: try : # If HI density is recorded, do not use the high column density # sightlines for fixing the mean flux. NHI = spec_file['colden/H/1'][:] ind = np.where(np.sum(NHI,axis=1)<10**19) except (KeyError, np.AxisError, AttributeError): # It is only for FGPA spectra, as we do not know the exact HI density ind = np.ones_like(spec_file['tau/H/1/1215'][:], dtype=bool) mean_flux_desired = get_mean_flux(z=spec_file['Header'].attrs['redshift']) flux = correct_mean_flux(tau=spec_file['tau/H/1/1215'][:], mean_flux_desired=mean_flux_desired, ind=ind) from scipy.ndimage.filters import gaussian_filter1d flux = gaussian_filter1d(flux, spec_res, axis=-1, mode='wrap') L = np.shape(flux)[1] # Check if the last pixel is fixed t = np.arange(0,L+1,addpix) new_flux = np.zeros(shape=(np.shape(flux)[0], t.size-1)) #new_NHI = np.zeros(shape=(np.shape(NHI)[0], t.size)) # Averaging over the flux within a pixel for i in range(t.size-1) : new_flux[:,i] = (np.sum(flux[:,t[i]:t[i+1]], axis=1))/addpix if CE is not None: if CNR is not None: # the order below is important (new_flux, delta) = ps.add_cont_error(CE=CE, flux=new_flux) # A bit of hack, solve it later ps.nbins = int(L/addpix) (new_flux,noise_array) = ps.add_noise(snr=CNR, flux=new_flux) else: (new_flux, delta) = ps.add_cont_error(CE=CE, flux=new_flux) else: if CNR is not None: ps.nbins = int(L/addpix) (new_flux, noise_array) = ps.add_noise(snr=CNR, flux=new_flux) if domask : mask = np.zeros_like(new_flux,dtype=bool) for i in range(new_flux.shape[0]): mask[i,:] = mask_strong_absb_v2(deltav=addpix*ps.dvbin, Fnorm=new_flux[i,:], CNR=CNR[i]*np.ones(shape=(new_flux.shape[1],)), maxdv=1000, Fm=np.mean(new_flux), ewmin=5) else : mask = np.zeros(shape=new_flux.shape, dtype=bool) new_flux = np.ravel(new_flux) current_mean_flux = np.mean(np.ravel(new_flux)) print('mean flux after noise =', current_mean_flux) print ("*** Error on mean flux :*** ", current_mean_flux-mean_flux_desired) # flux contrast for each pixel #deltaF = (new_flux/(1.0*np.mean(new_flux))) - 1 deltaF = (new_flux/current_mean_flux) - 1 return (deltaF, current_mean_flux, mask)
18,759
def quick_amplitude(x, y, x_err, y_err): """ Assume y = ax Calculate the amplitude only. """ #x[x<0] = 1E-5 #y[y<0] = 1E-5 xy = x*y xx = x*x xy[xy<0] = 1E-10 A = np.ones(x.shape[0]) for i in np.arange(5): weight = 1./(np.square(y_err)+np.square(A).reshape(A.size,1)*np.square(x_err)) #weight = 1./(np.square(y_err)+np.square(A)*np.square(x_err)) A = np.einsum('ij, ij->i', xy, weight)/np.einsum('ij, ij->i', xx, weight) chi2 = np.einsum('ij, ij->i', np.square(A.reshape(A.size,1)*x - y), weight) #chi2 = np.einsum('ij, ij->i', np.square(A*x - y), weight) return (A, chi2)
18,760
def get_password(config, name): """Read password""" passfile = config.passstore / name with open(passfile, 'r') as fd: return fd.read()
18,761
def attribute_string(s): """return a python code string for a string variable""" if s is None: return "\"\"" # escape any ' characters #s = s.replace("'", "\\'") return "\"%s\"" % s
18,762
def inflate_cell(cell, distance): """ Expand the current cell in all directions and return the set. """ newset = set(cell) if distance==0: return(newset) # recursively call this based on the distance for offset in direction.all_offsets(): # FIXME: If distance is large this will be inefficient, but it is like 1 or 2 newset.update(inflate_cell(cell+offset,distance-1)) return newset
18,763
def log_exception(logger, err): # pylint: disable=unused-variable """ Log a general exception """ separator = "\n" exception_name = type(err).__name__ exception_message = str(err) string_buffer = ( "Exception:", "Name: {0}.".format(exception_name), "Message: {0}.".format(exception_message) ) content = separator.join(string_buffer) logger.exception(content)
18,764
def read_dfcpp_test_results(d4cpp_output_dir): """ Returns test results """ test_results = {} for dir_name in os.listdir(d4cpp_output_dir): path_to_dir = os.path.join(d4cpp_output_dir, dir_name) if not os.path.isdir(path_to_dir): continue case = dir_name.split('-')[-1] result_path = os.path.join(path_to_dir, f"{case}.test") with open(result_path, 'r') as f: test_results[case] = f.read().strip() return test_results
18,765
def poi(request, id=None): """ */entry/pois/<id>*, */entry/pois/new* The entry interface's edit/add/delete poi view. This view creates the edit page for a given poi, or the "new poi" page if it is not passed an ID. It also accepts POST requests to create or edit pois. If called with DELETE, it will return a 200 upon success or a 404 upon failure. This is to be used as part of an AJAX call, or some other API call. """ if request.method == 'DELETE': poi = get_object_or_404(PointOfInterest, pk=id) poi.delete() return HttpResponse() if request.method == 'POST': message = '' post_data = request.POST.copy() errors = [] try: try: post_data['location'] = fromstr( 'POINT(%s %s)' % (post_data['longitude'], post_data['latitude']), srid=4326) except: coordinates = coordinates_from_address( post_data['street'], post_data['city'], post_data['state'], post_data['zip']) post_data['location'] = fromstr( 'POINT(%s %s)' % (coordinates[1], coordinates[0]), srid=4326) # Bad Address will be thrown if Google does not return coordinates for # the address, and MultiValueDictKeyError will be thrown if the POST # data being passed in is empty. except (MultiValueDictKeyError, BadAddressException): errors.append("Full address is required.") try: categories = [Category.objects.get( pk=int(c)) for c in post_data.get( 'category_ids', None).split(',')] except: errors.append("You must choose at least one category.") poi_form = PointOfInterestForm(post_data) if poi_form.is_valid() and not errors: image_keys = post_data.get('image_ids', None) images = [] if image_keys: images = [Image.objects.get( pk=int(i)) for i in image_keys.split(',')] video_keys = post_data.get('video_ids', None) videos = [] if video_keys: videos = [Video.objects.get( pk=int(v)) for v in video_keys.split(',')] hazard_keys = post_data.get('hazard_ids', None) hazards = [] if hazard_keys: hazards = [Hazard.objects.get( pk=int(h)) for h in hazard_keys.split(',')] if id: poi = PointOfInterest.objects.get(id=id) # process images existing_images = poi.images.all() for image in existing_images: if image not in images: poi.images.remove(image) for image in images: if image not in existing_images: poi.images.add(image) # process videos existing_videos = poi.videos.all() for video in existing_videos: if video not in videos: poi.videos.remove(video) for video in videos: if video not in existing_videos: poi.videos.add(video) # process hazards existing_hazards = poi.hazards.all() for hazard in existing_hazards: if hazard not in hazards: poi.hazards.remove(hazard) for hazard in hazards: if hazard not in existing_hazards: poi.hazards.add(hazard) # process categories existing_categories = poi.categories.all() for category in existing_categories: if category not in categories: poi.categories.remove(category) for category in categories: if category not in existing_categories: poi.categories.add(category) poi.__dict__.update(**poi_form.cleaned_data) poi.save() else: poi = poi_form.save() for image in images: poi.images.add(image) for video in videos: poi.videos.add(video) for hazard in hazards: poi.hazards.add(hazard) for category in categories: poi.categories.add(category) return HttpResponseRedirect( "%s?saved=true" % reverse('entry-list-pois')) else: pass else: errors = [] message = '' if id: poi = PointOfInterest.objects.get(id=id) poi.latitude = poi.location[1] poi.longitude = poi.location[0] title = "Edit {0}".format(poi.name) post_url = reverse('edit-poi', kwargs={'id': id}) poi_form = PointOfInterestForm( instance=poi, initial={'latitude': poi.latitude, 'longitude': poi.longitude}) existing_images = poi.images.all() existing_videos = poi.videos.all() existing_categories = poi.categories.all() existing_hazards = poi.hazards.all() if request.GET.get('success') == 'true': message = "Item saved successfully!" elif request.method != 'POST': poi_form = PointOfInterestForm() post_url = reverse('new-poi') title = "New Item" existing_images = [] existing_videos = [] existing_categories = [] existing_hazards = [] else: post_url = reverse('new-poi') title = "New Item" existing_images = [] existing_videos = [] existing_categories = [] existing_hazards = [] data = {'images': [], 'videos': [], 'categories': [], 'hazards': []} for image in Image.objects.all(): data['images'].append({ 'id': image.id, 'name': image.name }) for video in Video.objects.all(): data['videos'].append({ 'id': video.id, 'name': video.name }) for hazard in Hazard.objects.all(): data['hazards'].append({ 'id': hazard.id, 'name': hazard.name }) for category in Category.objects.all(): data['categories'].append({ 'id': category.id, 'category': category.category }) return render(request, 'poi.html', { 'parent_url': [ {'url': reverse('home'), 'name': 'Home'}, {'url': reverse('entry-list-pois'), 'name': 'Points OfInterest'} ], 'existing_images': existing_images, 'existing_videos': existing_videos, 'existing_hazards': existing_hazards, 'existing_categories': existing_categories, 'data_json': json.dumps(data), 'data_dict': data, 'title': title, 'message': message, 'post_url': post_url, 'errors': errors, 'poi_form': poi_form, })
18,766
def relativeScope(fromScope, destScope): """relativeScope variant that handles invented fromScopes""" rs = idlutil.relativeScope(fromScope, destScope) if rs[0] is None: try: rd = idlast.findDecl(destScope) except idlast.DeclNotFound: return rs new_rs = rs while new_rs[0] is None and len(fromScope) > 1: fromScope = fromScope[:-1] new_rs = idlutil.relativeScope(fromScope, destScope) if new_rs[0] is not None: return new_rs return rs
18,767
def srbt(peer, pkts, inter=0.1, *args, **kargs): """send and receive using a bluetooth socket""" s = conf.BTsocket(peer=peer) a,b = sndrcv(s,pkts,inter=inter,*args,**kargs) s.close() return a,b
18,768
def p_specification_1(t): """specification : definition specification | program_def specification""" t[0] = Specification(t[1], t[2])
18,769
def export(state, from_dir, filter): """Export grades for uploading.""" grading_manager = GradingManager(state.get_assignment(), from_dir, filter) if not grading_manager.submission_count(): raise click.ClickException('no submissions match the filter given!') state.grades = [grade for grade in grading_manager.grades() if grade.grade_ready()]
18,770
def testgetpars(times=range(5000), lmodel='alpha', pmodel='quadratic', beta=0.07, nradii=101, n=[4,101], disp=0, optimize=None, save=False, ret=True, test=False, corr='tor', d=0.01, a=0.52, Ra=1.50): """Use data Ip, Btw, Btave(, and perhaps B0_MSE) values for range of indices times to generate values for the model parameters using getpars for comparison to database values. Each comparison is expressed in terms of the log10 of the absolute value of the relative error.""" import numpy as np import scipy.io as si pars0 = [3.25, 4.0] parnames = ['lam0', 'alpha'] npars = len(pars0) # Get data dire = '../idl_adhoc/' rs = si.readsav(dire + 'test_b0-1080219003.sav') time = rs['test']['t'][0] ip = rs['test']['ip'][0] ip = ip * a / 0.52 btw = rs['test']['btw'][0] btave = rs['test']['btave'][0] #b0 = rs['test']['b0'][0] alpha = rs['test']['alpha'][0] lam0 = rs['test']['lam0'][0] # Take sub-arrays ip = ip[times] btw = btw[times] btave = btave[times] #b0 = b0[times] time = time[times] alpha = alpha[times] lam0 = lam0[times] ntimes = len(times) pars = np.zeros([ntimes, npars]) # pars will be arrays of shape ntimes, npars. warnflag = np.zeros(ntimes).astype(int) # Get pars for all times for time in range(0, ntimes): pars[time, :], warnflag[time] = getpars(pars0, ip=ip[time], btw=btw[time], btave=btave[time], lmodel=lmodel, pmodel=pmodel, beta=beta, n=n, disp=disp, corr=corr, d=d, a=a, Ra=Ra) # print ( time, # np.log10(np.abs((pars[time, 0] # - lam0[time])/lam0[time])), # np.log10(np.abs((pars[time, 1] # - alpha[time])/alpha[time])) ) print ( time, pars[time, 0], lam0[time], pars[time, 1], alpha[time] )
18,771
def version(): """ The function that gets invoked by the subcommand "version" """ click.echo(f"Python {platform.python_version()}\n" f"{{ cookiecutter.project_slug }} {__version__}")
18,772
def rotation_matrix_from_vectors(vector1, vector2): """ Finds a rotation matrix that can rotate vector1 to align with vector 2 Args: vector1: np.narray (3) Vector we would apply the rotation to vector2: np.narray (3) Vector that will be aligned to Returns: rotation_matrix: np.narray (3,3) Rotation matrix that when applied to vector1 will turn it to the same direction as vector2 """ if all(np.abs(vector1)==np.abs(vector2)): return np.eye(3) a, b = (vector1 / np.linalg.norm(vector1)).reshape(3), (vector2 / np.linalg.norm(vector2)).reshape(3) v = np.cross(a, b) c = np.dot(a, b) s = np.linalg.norm(v) matrix = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) rotation_matrix = np.eye(3) + matrix + matrix.dot(matrix) * ((1 - c) / (s ** 2)) return rotation_matrix
18,773
def disconnect(): """Disconnect from pop and smtp connexion""" cpop = connectPop() csmtp = connectSmtp() cpop.quit() #close connection, mark messages as read csmtp.close()
18,774
def read_LUT_SonySPI1D(path: str) -> Union[LUT1D, LUT3x1D]: """ Read given *Sony* *.spi1d* *LUT* file. Parameters ---------- path *LUT* path. Returns ------- :class:`colour.LUT1D` or :class:`colour.LUT3x1D` :class:`LUT1D` or :class:`LUT3x1D` class instance. Examples -------- Reading a 1D *Sony* *.spi1d* *LUT*: >>> import os >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'sony_spi1d', ... 'eotf_sRGB_1D.spi1d') >>> print(read_LUT_SonySPI1D(path)) LUT1D - eotf sRGB 1D -------------------- <BLANKLINE> Dimensions : 1 Domain : [-0.1 1.5] Size : (16,) Comment 01 : Generated by "Colour 0.3.11". Comment 02 : "colour.models.eotf_sRGB". Reading a 3x1D *Sony* *.spi1d* *LUT*: >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'sony_spi1d', ... 'eotf_sRGB_3x1D.spi1d') >>> print(read_LUT_SonySPI1D(path)) LUT3x1D - eotf sRGB 3x1D ------------------------ <BLANKLINE> Dimensions : 2 Domain : [[-0.1 -0.1 -0.1] [ 1.5 1.5 1.5]] Size : (16, 3) Comment 01 : Generated by "Colour 0.3.11". Comment 02 : "colour.models.eotf_sRGB". """ title = path_to_title(path) domain_min, domain_max = np.array([0, 1]) dimensions = 1 data = [] comments = [] with open(path) as spi1d_file: lines = filter(None, (line.strip() for line in spi1d_file.readlines())) for line in lines: if line.startswith("#"): comments.append(line[1:].strip()) continue tokens = line.split() if tokens[0] == "Version": continue if tokens[0] == "From": domain_min, domain_max = as_float_array(tokens[1:]) elif tokens[0] == "Length": continue elif tokens[0] == "Components": component = as_int_scalar(tokens[1]) attest( component in (1, 3), "Only 1 or 3 components are supported!", ) dimensions = 1 if component == 1 else 2 elif tokens[0] in ("{", "}"): continue else: data.append(tokens) table = as_float_array(data) LUT: Union[LUT1D, LUT3x1D] if dimensions == 1: LUT = LUT1D( np.squeeze(table), title, np.array([domain_min, domain_max]), comments=comments, ) elif dimensions == 2: LUT = LUT3x1D( table, title, np.array( [ [domain_min, domain_min, domain_min], [domain_max, domain_max, domain_max], ] ), comments=comments, ) return LUT
18,775
def _compare_manifests(path_local, path_irods, logger): """Compare manifests at paths ``path_local`` and ``path_irods``.""" # Load file sizes and checksums. info_local = {} with open(path_local, "rt") as inputf: for line in inputf: if line.startswith("#") or line.startswith("%"): continue line = line.strip() size, chksum, path = line.split(",", 2) info_local[path] = (size, chksum) info_irods = {} with open(path_irods, "rt") as inputf: for line in inputf: line = line.strip() size, chksum, path = line.split(",", 2) if chksum.startswith("sha2:"): chksum = base64.b64decode(chksum[5:]).hex() info_irods[path] = (size, chksum) # Compare file sizes and checksums. problem = None for path in sorted(info_local.keys() & info_irods.keys()): size_local, chksum_local = info_local[path] size_irods, chksum_irods = info_irods[path] if size_local != size_irods: problem = "file size mismatch %s vs %s for %s" % (size_local, size_irods, path) logger.error( "file size does not match %s vs %s for %s" % (size_local, size_irods, path) ) if chksum_local != chksum_irods: problem = "file checksum mismatch %s vs %s for %s" % (chksum_local, chksum_irods, path) logger.error( "file checksum does not match %s vs %s for %s" % (chksum_local, chksum_irods, path) ) # Find extra items on either side. extra_local = info_local.keys() - info_irods.keys() if sorted(extra_local): problem = "extra file in local: %s" % list(sorted(extra_local))[0] logger.error( "%d items locally that are not in irods, up to 10 shown:\n %s" % (len(extra_local), " \n".join(list(sorted(extra_local))[:10])) ) extra_irods = info_irods.keys() - info_local.keys() if sorted(extra_irods): problem = "extra file in irods : %s" % list(sorted(extra_irods))[0] logger.error( "%d items in irods that are not present locally, up to 10 shown:\n %s" % (len(extra_irods), " \n".join(list(sorted(extra_irods))[:10])) ) if problem: raise RuntimeError("Difference in manifests: %s" % problem)
18,776
def test_fee_estimate(flat_fee, prop_fee_cli, max_lin_imbalance_fee, target_amount, expected_fee): """ Tests the backwards fee calculation. """ capacity = TA(10_000) prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee_cli)) imbalance_fee = None if max_lin_imbalance_fee > 0: # This created a simple asymmetric imbalance fee imbalance_fee = [(0, 0), (capacity, 0), (2 * capacity, max_lin_imbalance_fee)] tn = TokenNetworkForTests( channels=[dict(participant1=1, participant2=2), dict(participant1=2, participant2=3)], default_capacity=capacity, ) tn.set_fee(2, 1, flat=flat_fee, proportional=prop_fee, imbalance_penalty=imbalance_fee) tn.set_fee(2, 3, flat=flat_fee, proportional=prop_fee, imbalance_penalty=imbalance_fee) assert tn.estimate_fee(1, 3, value=PA(target_amount)) == expected_fee
18,777
def jaxpr_replicas(jaxpr) -> int: """The number of replicas needed for a jaxpr. For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the subjaxprs. For a list of eqns, take the maximum number of replicas. """ if isinstance(jaxpr, core.ClosedJaxpr): jaxpr = jaxpr.jaxpr return max(unsafe_map(eqn_replicas, jaxpr.eqns), default=1)
18,778
def tokens2ELMOids(tokens, sent_length): """ Transform input tokens to elmo ids. :param tokens: a list of words. :param sent_length: padded sent length. :return: numpy array of elmo ids, sent_length * 50 """ elmo_ids = batch_to_ids([tokens]).squeeze(0) pad_c = (0, 0, 0, sent_length - elmo_ids.size(0)) # assume PAD_id = 0 elmo_ids = torch.nn.functional.pad(elmo_ids, pad_c, value=0) elmo_ids = elmo_ids.data.cpu().numpy() return elmo_ids
18,779
def change_order_line_quantity(line, new_quantity): """Change the quantity of ordered items in a order line.""" line.quantity = new_quantity line.save() if not line.delivery_group.get_total_quantity(): line.delivery_group.delete() order = line.delivery_group.order if not order.get_items(): order.change_status(OrderStatus.CANCELLED) order.create_history_entry( status=OrderStatus.CANCELLED, comment=pgettext_lazy( 'Order status history entry', 'Order cancelled. No items in order'))
18,780
def load_configs(default, user): """Read the given config file paths into their respective config object.""" default_config.read(default) user_config.read(user) global default_config_path global user_config_path default_config_path = default user_config_path = user
18,781
def log(content): """Prints out a string prepended by the current timestamp""" print('\033[0;33m' + str(datetime.now()) + "\033[00m " + str(content))
18,782
def write_mapfile(stream, fh, checksum_func=None): """ Write an esgpublish mapfile from a stream of tuples (filepath, drs). :param checksum_func: A callable of one argument (path) which returns (checksum_type, checksum) or None """ for path, drs in stream: file_stat = os.stat(path) size = file_stat[stat.ST_SIZE] mtime = file_stat[stat.ST_MTIME] params = [drs.to_dataset_id(with_version=False), path, str(size), "mod_time=%f"%float(mtime)] if checksum_func: ret = checksum_func(path) if ret is not None: checksum_type, checksum = ret params.append('checksum_type=%s' % checksum_type) params.append('checksum=%s' % checksum) print >>fh, ' | '.join(params)
18,783
def get_section_endpoints(section_name): """Get the [lon, lat] endpoints associated with a pre-defined section e.g. >> pt1, pt2 = get_section_endpoints('Drake Passage') pt1 = [-68, -54] pt2 = [-63, -66] These sections mirror the gcmfaces definitions, see gcmfaces/gcmfaces_calc/gcmfaces_lines_pairs.m Parameters ---------- section_name : str name of the section to compute transport across Returns ------- pt1, pt2 : array_like array with two values, [lon, lat] of each endpoint or None if section_name is not in the pre-defined list of sections """ # Set to input lower case and remove spaces/tabs section_name = ''.join(section_name.lower().split()) # Test to see if name exists in list section_list = get_available_sections() section_list = [''.join(name.lower().split()) for name in section_list] if section_name not in section_list: warnings.warn('\nSection name %s unavailable as pre-defined section' % section_name) return None if section_name == 'drakepassage': pt1 = [-68, -54] pt2 = [-63, -66] elif section_name == 'beringstrait': pt1 = [-173, 65.5] pt2 = [-164, 65.5] elif section_name == 'gibraltar': pt1 = [-5, 34] pt2 = [-5, 40] elif section_name == 'floridastrait': pt1 = [-81, 28] pt2 = [-77, 26] elif section_name == 'floridastraitw1': pt1 = [-81, 28] pt2 = [-79, 22] elif section_name == 'floridastraits1': pt1 = [-76, 21] pt2 = [-76, 8] elif section_name == 'floridastraite1': pt1 = [-77, 26] pt2 = [-77, 24] elif section_name == 'floridastraite2': pt1 = [-77, 24] pt2 = [-77, 22] elif section_name == 'floridastraite3': pt1 = [-76, 21] pt2 = [-72, 18.5] elif section_name == 'floridastraite4': pt1 = [-72, 18.5] pt2 = [-72, 10] elif section_name == 'davisstrait': pt1 = [-65, 66] pt2 = [-50, 66] elif section_name == 'denmarkstrait': pt1 = [-35, 67] pt2 = [-20, 65] elif section_name == 'icelandfaroe': pt1 = [-16, 65] pt2 = [ -7, 62.5] elif section_name == 'scotlandnorway': pt1 = [-4, 57] pt2 = [ 8, 62] elif section_name == 'indonesiaw1': pt1 = [103, 4] pt2 = [103,-1] elif section_name == 'indonesiaw2': pt1 = [104, -3] pt2 = [109, -8] elif section_name == 'indonesiaw3': pt1 = [113, -8.5] pt2 = [118, -8.5] elif section_name == 'indonesiaw4': pt1 = [118, -8.5] pt2 = [127, -15] elif section_name == 'australiaantarctica': pt1 = [127, -25] pt2 = [127, -68] elif section_name == 'madagascarchannel': pt1 = [38, -10] pt2 = [46, -22] elif section_name == 'madagascarantarctica': pt1 = [46, -22] pt2 = [46, -69] elif section_name == 'southafricaantarctica': pt1 = [20, -30] pt2 = [20, -69.5] return pt1, pt2
18,784
def get_vm_metrics(monitor_client, resource_id): """Get metrics for the given vm. Returns row of cpu, disk, network activity""" today = datetime.utcnow().date() last_week = today - timedelta(days=7) metrics_data = monitor_client.metrics.list( resource_id, timespan="{}/{}".format(last_week, today), interval="PT12H", metricnames="Percentage CPU,Disk Read Bytes,Disk Write Bytes,Network In Total,Network Out Total", aggregation="Minimum,Average,Maximum", ) row = {} ave_cpu = [] min_cpu = [] max_cpu = [] ave_disk_read = [] ave_disk_write = [] ave_network_in = [] ave_network_out = [] for item in metrics_data.value: if item.name.value == "Percentage CPU": for timeserie in item.timeseries: for data in timeserie.data: if data.average: ave_cpu.append(data.average) if data.minimum: min_cpu.append(data.minimum) if data.maximum: max_cpu.append(data.maximum) if item.name.value == "Disk Read Bytes": for timeserie in item.timeseries: for data in timeserie.data: if data.average: ave_disk_read.append(data.average) if item.name.value == "Disk Write Bytes": for timeserie in item.timeseries: for data in timeserie.data: if data.average: ave_disk_write.append(data.average) if item.name.value == "Network In Total": for timeserie in item.timeseries: for data in timeserie.data: if data.average: ave_network_in.append(data.average) if item.name.value == "Network Out Total": for timeserie in item.timeseries: for data in timeserie.data: if data.average: ave_network_out.append(data.average) row = ( get_mean(ave_cpu), get_min(min_cpu), get_max(max_cpu), get_mean(ave_disk_read), get_mean(ave_disk_write), get_mean(ave_network_in), get_mean(ave_network_out), ) return row
18,785
def geo_api(): """ GeoAPI fixture data. See more at: http://doc.pytest.org/en/latest/fixture.html """ return GeoApi(os.getenv("TEST_AIRBUS_API_KEY"))
18,786
def current_time_id(): """ Returns the current time ID in milliseconds """ return int(round(time.time() * 1000))
18,787
def get_db(): """ Connects to the database. Returns a database object that can be queried. """ if 'db' not in g: g.db = sqlite3.connect( 'chemprop.sqlite3', detect_types=sqlite3.PARSE_DECLTYPES ) g.db.row_factory = sqlite3.Row return g.db
18,788
def test_invariance_tests(kwargs, model, dummy_house): """ Keeping all except 1 feature at a time the same Changing these features a bit should not result in a noticeable difference in the models prediction with the ground truth """ changed_score, unchanged_score = get_test_case( dummy_house, model, **kwargs, ) # check that there's about max $5k difference between unchanged and changed # house prices # $5k is something I feel makes sense, obviously domain knowledge plays # a big role in coming up with these test parameters assert math.isclose( changed_score, unchanged_score, rel_tol=5e3, )
18,789
def test_concurrent_persistent_group(dev, apdev): """Concurrent P2P persistent group""" logger.info("Connect to an infrastructure AP") hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-open", "channel": "2" }) dev[0].global_request("SET p2p_no_group_iface 0") dev[0].connect("test-open", key_mgmt="NONE", scan_freq="2417") logger.info("Run persistent group test while associated to an AP") form(dev[0], dev[1]) [go_res, cli_res] = invite_from_cli(dev[0], dev[1]) if go_res['freq'] != '2417': raise Exception("Unexpected channel selected: " + go_res['freq']) [go_res, cli_res] = invite_from_go(dev[0], dev[1]) if go_res['freq'] != '2417': raise Exception("Unexpected channel selected: " + go_res['freq'])
18,790
def computePatientConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file): """ @brief: Compute the patient confusion matrix given the location of its prediction and ground truth. @param patient_prediction_location : folder containing the prediction data @param patient_ground_truth_location : folder containing the ground truth data @param labels_names_file : file containing the name of the labels (stored as integer) We define the confusion matrix as the length confusion matrix with column normalization. It represents the repartition (ratio) of predicted labels for a given GT label. As for the length confusion matrix, it is defined with the following convention: - each line correspond to a given prediction class - each column correspond to a given ground truth class Both folders are assumed to have a particular hierarchy: - The folder patient_ground_truth_location: * all branches named "branch????.txt" * a "branch_labels.txt" file -The folder patient_prediction_location: * all branches named "branch????.txt" * a file "recomputed_labels.txt" N.B. It is assumed that the number of branches in both folder are identical and that the files storing labels have the same number lines. """ # compute the patient length confusion matrix: (resulting_confusion_matrix, label_legend) = computePatientLengthConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file) # normalize each column: totalColumnLength = sum(resulting_confusion_matrix, axis=0) totalColumnLength = maximum(totalColumnLength, MY_EPSILON) # prevent 0-division resulting_confusion_matrix /= totalColumnLength # return the confusion matrix with legend return (resulting_confusion_matrix, label_legend)
18,791
async def ping_server(): """ Ping Server =========== Returns the message "The Optuna-server is alive!" if the server is running. Parameters ---------- None Returns ------- msg : str A message witnessing that the server is running. """ msg = 'The Optuna-server is alive!' return msg
18,792
def treynor(rp: np.ndarray, rb: np.ndarray, rf: np.ndarray) -> np.ndarray: """Returns the treynor ratios for all pairs of p portfolios and b benchmarks Args: rp (np.ndarray): p-by-n matrix where the (i, j) entry corresponds to the j-th return of the i-th portfolio rb (np.ndarray): b-by-n matrix where the (i, j) entry corresponds to the j-th return of the i-th benchmark rf (np.ndarray): Scalar risk-free rate (as a 0-D tensor) Returns: np.ndarray: p-by-b matrix where the (i, j) entry corresponds to the treynor ratio for the i-th portfolio and j-th benchmark """ __expect_rp_rb_rf(rp, rb, rf) return kernels.treynor(rp, rb, rf)
18,793
def virus_tsne_list(tsne_df, virus_df): """ return data dic """ tsne_df.rename(columns={"Unnamed: 0": "barcode"}, inplace=True) df = pd.merge(tsne_df, virus_df, on="barcode", how="left") df["UMI"] = df["UMI"].fillna(0) tSNE_1 = list(df.tSNE_1) tSNE_2 = list(df.tSNE_2) virus_UMI = list(df.UMI) res = {"tSNE_1": tSNE_1, "tSNE_2": tSNE_2, "virus_UMI": virus_UMI} return res
18,794
def sometimes(aug): """ Return a shortcut for iaa.Sometimes :param aug: augmentation method :type aug: iaa.meta.Augmenter :return: wrapped augmentation method :rtype: iaa.meta.Augmenter """ return iaa.Sometimes(0.5, aug)
18,795
def test_BLPS_02_AC(): """ simple binary lens with extended source and different methods to evaluate magnification - version with adaptivecontouring """ params = mm.ModelParameters({ 't_0': t_0, 'u_0': u_0, 't_E': t_E, 'alpha': alpha, 's': s, 'q': q, 'rho': rho}) model = mm.Model(parameters=params) t = np.array([6112.5, 6113., 6114., 6115., 6116., 6117., 6118., 6119]) t += 2450000. ac_name = 'Adaptive_Contouring' methods = [2456113.5, 'Quadrupole', 2456114.5, 'Hexadecapole', 2456116.5, ac_name, 2456117.5] accuracy_1 = {'accuracy': 0.04} accuracy_2 = {'accuracy': 0.01, 'ld_accuracy': 0.00001} model.set_magnification_methods(methods) model.set_magnification_methods_parameters({ac_name: accuracy_1}) data = mm.MulensData(data_list=[t, t*0.+16., t*0.+0.01]) result = model.get_magnification(data.time) expected = np.array([4.69183078, 2.87659723, 1.83733975, 1.63865704, 1.61038135, 1.63603122, 1.69045492, 1.77012807]) almost(result, expected, decimal=3) # Below we test passing the limb coeff to VBBL function. # data.bandpass = 'I' model.set_limb_coeff_u('I', 10.) # This is an absurd value but I needed something quick. model.set_magnification_methods_parameters({ac_name: accuracy_2}) # result = model.data_magnification[0] result = model.get_magnification( data.time, gamma=model.get_limb_coeff_gamma('I')) almost(result[5], 1.6366862, decimal=3)
18,796
def rev_to_b10(letters): """Convert an alphabet number to its decimal representation""" return sum( (ord(letter) - A_UPPERCASE + 1) * ALPHABET_SIZE**i for i, letter in enumerate(reversed(letters.upper())) )
18,797
def AddPath(match): """Helper for adding file path for WebRTC header files, ignoring other.""" file_to_examine = match.group(1) + '.h' # TODO(mflodman) Use current directory and find webrtc/. for path, _, files in os.walk('./webrtc'): for filename in files: if fnmatch.fnmatch(filename, file_to_examine): path_name = os.path.join(path, filename).replace('./', '') return '#include "%s"\n' % path_name # No path found, return original string. return '#include "'+ file_to_examine + '"\n'
18,798
def emailcallback(pattern, line, lines, filename): """Send an email with the log context, when there is a match.""" _dum, just_the_name = os.path.split(filename) subject = "{} in {}".format(pattern, just_the_name) body = ''.join(lines) SendEmail(subject, body).start() logger.info("{} in {}".format(pattern, filename))
18,799