content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import warnings def certification_to_csv(stats, filepath, product_id): """Writes certification outputs to the file specified. Parameters ---------- stats : list of dict list of statistical outputs from the function `thermostat.compute_summary_statistics()` filepath : str filepath specification for location of output CSV file. Returns ------- df : pd.DataFrame DataFrame containing data output to CSV. """ if stats is None: warnings.warn("No certification data to export.") return None labels = [i.get("label") for i in stats] sw_version = stats[labels.index("all_tau_cvrmse_savings_p01_filter_heating")][ "sw_version" ] certification_data = [] for column_filter, column_data in DATA_COLUMNS: stats_column_number = labels.index(column_filter) value = stats[stats_column_number].get(column_data, None) row = [ product_id, sw_version, COLUMN_LOOKUP[column_data]["metric"], FILTER_LOOKUP[column_filter]["filter"], FILTER_LOOKUP[column_filter]["region"], COLUMN_LOOKUP[column_data]["statistic"], FILTER_LOOKUP[column_filter]["season"], value, ] certification_data.append(row) output_dataframe = pd.DataFrame(certification_data, columns=CERTIFICATION_HEADERS) output_dataframe.to_csv( filepath, index=False, columns=CERTIFICATION_HEADERS, float_format="%.2f" ) return output_dataframe
23f1a84fa2d9c5ad25eb04d23ea9646cf2849286
24,400
def get_ipns_link(name: str) -> str: """Get the ipns link with the name of it which we remember it by Args: name (str): Name we call ipns link Returns: str: Returns the IPNS url Raises: ValueError: if link not found >>> import random >>> key_name = str(random.getrandbits(32 * 8)) # get random, or else throws duplicate key error >>> create_new_ipns_link(key_name) != '' True """ keys = IPFS_CLIENT.key.list() does_match = lambda x: x['Name'] == name.lower() matches = list(filter(does_match, keys['Keys'])) if len(matches) == 0: raise ValueError(f'IPNS link not found with name: "{name}"!') ipns_id = matches[0]['Id'] # get first match return f'{IPNS_PATH}{ipns_id}'
1c171e0539013aecd3e45a7cf7ca2c2907df3955
24,401
def joint_sim(num_samp, num_dim, noise=0.5): """ Function for generating a joint-normal simulation. :param num_samp: number of samples for the simulation :param num_dim: number of dimensions for the simulation :param noise: noise level of the simulation, defaults to 0.5 :return: the data matrix and a response array """ gauss_noise = np.random.normal(loc=0, scale=1, size=(num_samp, 1)) if (num_dim > 1): kappa = 1 else: kappa = 0 rho = 1 / (2*num_dim) sig = np.diag(np.ones(shape=(2*num_dim))) sig[num_dim: (2*num_dim), 0: num_dim] = rho sig[0: num_dim, num_dim: (2*num_dim)] = rho samp = (np.random.multivariate_normal(cov=sig, mean=np.zeros(2*num_dim), size=num_samp)) if num_dim == 1: y = samp[:, (num_dim):(2*num_dim)] + kappa*noise*gauss_noise x = samp[:, 0:num_dim] else: y = samp[:, (num_dim+1):(2*num_dim)] + kappa*noise*gauss_noise x = samp[:, 0:num_dim] return x, y
71296c5093aa3113b7df70cb8966ac9ca06ccb31
24,402
def calculate_seasonal_tilt(axial_tilt, degrees): """Find the seasonal tilt offset from axial tilt and orbit (in degrees) axial_tilt -- The planet's tilt. e.g. Earth's tilt is 23.44 degrees. degrees -- How far along is the planet in its orbit around its star? (between 0 and 360. 0/360 and 180 are equinoxes. 90 and 270 are solstices.) """ # NOTE: IRL the tilt of a planet doesn't actually change as it orbits. # What does change is the *relative* angle of incoming sunlight. return np.sin(degrees * np.pi/180) * axial_tilt
a31e072f95d9d856b2c2d7549b7f96a97a4d6b60
24,403
import logging def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: return except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score)
784a619b06fed48a5b7d4c8f4c711da954125c9c
24,404
def mark_dts_nn(marked_dict): """Loops through a dictionary representation of the XML-text where determiners have been "focus"-marked. Finds the "focus"-marked determiners and looks for their nouns from the words after the determiner until the end of the current sentence. The found noun is then marked with "focus": 2. Once the first noun of the right type for the determiner is found, it stops looking and moved on to the next determiner. This is an add-on to make the second approach of marking both determiners and their nouns possible. Found an issue with single word sentences (often only a bracket or another such character in the XML-text). The "isinstance()" check on word_meta is a bandaid-fix for this. It simply skips these one-word sentences, since they most likely are not relevant to the issue at hand and because I found no relatively quick fix for the issue. Args: marked_dict: a dictionary representation of the XML-text, with the added word metadata attribute "focus" (only determiners marked). Returns: nn_marked_dict: a dictionary representation of the XML-text, with the added wordmetadata attribute "focus" for both determiners (1) and their nouns (2).""" nn_marked_dict = deepcopy(marked_dict) for paragraph in nn_marked_dict['corpus']['text']['lessontext']['paragraph']: sentence_lvl = paragraph['sentence'] if isinstance(sentence_lvl, dict): for word_meta in sentence_lvl['w']: if isinstance(word_meta, dict): if word_meta['focus'] == 1: start = sentence_lvl['w'].index(word_meta) for noun_meta in sentence_lvl['w'][start:]: if noun_meta['msd'] == 'NN.NEU.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.GEN': noun_meta['focus'] = 2 break elif isinstance(sentence_lvl, list): for sentence in sentence_lvl: for word_meta in sentence['w']: if isinstance(word_meta, dict): if word_meta['focus'] == 1: start = sentence['w'].index(word_meta) for noun_meta in sentence['w'][start:]: if noun_meta['msd'] == 'NN.NEU.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.GEN': noun_meta['focus'] = 2 break else: print("Found something that is not a dict/list!") return nn_marked_dict
2db6e21a3ea1f4249ef13fd7a235839a8a2d1871
24,405
def reservation_rollback(context, reservations, project_id=None, user_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id)
fcb5a82522320ffe6a6d262eb6571153aaa55b29
24,406
def encrypt( security_control: SecurityControlField, system_title: bytes, invocation_counter: int, key: bytes, plain_text: bytes, auth_key: bytes, ) -> bytes: """ Encrypts bytes according the to security context. """ if not security_control.encrypted and not security_control.authenticated: raise NotImplementedError("encrypt() only handles authenticated encryption") if len(system_title) != 8: raise ValueError(f"System Title must be of lenght 8, not {len(system_title)}") # initialization vector is 12 bytes long and consists of the system_title (8 bytes) # and invocation_counter (4 bytes) iv = system_title + invocation_counter.to_bytes(4, "big") # Making sure the keys are of correct length for specified security suite validate_key(security_control.security_suite, key) validate_key(security_control.security_suite, auth_key) # Construct an AES-GCM Cipher object with the given key and iv. Allow for # truncating the auth tag encryptor = Cipher( algorithms.AES(key), modes.GCM(initialization_vector=iv, tag=None, min_tag_length=TAG_LENGTH), ).encryptor() # associated_data will be authenticated but not encrypted, # it must also be passed in on decryption. associated_data = security_control.to_bytes() + auth_key encryptor.authenticate_additional_data(associated_data) # Encrypt the plaintext and get the associated ciphertext. # GCM does not require padding. ciphertext = encryptor.update(plain_text) + encryptor.finalize() # dlms uses a tag lenght of 12 not the default of 16. Since we have set the minimum # tag length to 12 it is ok to truncated the tag down to 12 bytes. tag = encryptor.tag[:TAG_LENGTH] return ciphertext + tag
f03066da2ab54e784063f01255b9f3f53050a2cf
24,407
def leftmost_turn(((x0, y0), (x1, y1)), (x, y), zs): """Find the line segment intersecting at the leftmost angle relative to initial segment. Arguments: (x0, y0) – where we started (x1, x2) – direction travelling in (x, y) – where intersected one or more alternative line segments zs – set of points definign direction to move in """ if len(zs) == 1: return (x, y), zs.pop() theta = atan2(y1 - y, x1 - x) # Direction currently headed. def fun((xn, yn)): phi = atan2(yn - y, xn - x) - theta if phi < -pi: phi += 2 * pi elif phi > pi: phi -= 2 * pi # Tie-breaker is length of segment: len2 = (yn - y) * (yn - y) + (xn - x) * (xn - x) return phi, len2 zm = max(zs, key=fun) return (x, y), zm
bfe1650a92e38612461942ddfcee5faaad96ad5f
24,408
def maya_window(): """Get Maya MainWindow as Qt. Returns: QtWidgets.QWidget: Maya main window as QtObject """ return to_qwidget("MayaWindow")
bea4ef97a14bb93a461f0dd54dbb6e9a25a14a63
24,409
def soap2Dict(soapObj): """A recursive version of sudsobject.asdict""" if isinstance(soapObj, sudsobject.Object): return {k: soap2Dict(v) for k, v in soapObj} elif isinstance(soapObj, list): return [soap2Dict(v) for v in soapObj] return soapObj
46d5b767640a1b8c506f85d03580508d9b2278f0
24,410
def generate_sequential_BAOAB_string(force_group_list, symmetric=True): """Generate BAOAB-like schemes that break up the "V R" step into multiple sequential updates E.g. force_group_list=(0,1,2), symmetric=True --> "V0 R V1 R V2 R O R V2 R V1 R V0" force_group_list=(0,1,2), symmetric=False --> "V0 R V1 R V2 R O V0 R V1 R V2 R" """ VR = [] for i in force_group_list: VR.append("V{}".format(i)) VR.append("R") if symmetric: return " ".join(VR + ["O"] + VR[::-1]) else: return " ".join(VR + ["O"] + VR)
7710775e365f0caae81a9737feec18c662790bde
24,411
def _get_active_tab(visible_tabs, request_path): """ return the tab that claims the longest matching url_prefix if one tab claims '/a/{domain}/data/' and another tab claims '/a/{domain}/data/edit/case_groups/' then the second tab wins because it's a longer match. """ matching_tabs = sorted( (url_prefix, tab) for tab in visible_tabs for url_prefix in tab.url_prefixes if request_path.startswith(url_prefix) ) if matching_tabs: _, tab = matching_tabs[-1] return tab
ac9cd34d4b4ee1c1c0356499b389c1f6a7195585
24,412
import os def path_normalize(path, target_os=None): """Normalize path (like os.path.normpath) for given os. >>> from piecutter.engines.jinja import path_normalize >>> path_normalize('foo/bar') 'foo/bar' >>> path_normalize('foo/toto/../bar') 'foo/bar' Currently, this is using os.path, i.e. the separator and rules for the computer running Jinja2 engine. A NotImplementedError exception will be raised if 'os' argument differs from 'os.name'. >>> import os >>> os.name == 'posix' # Sorry if you are running tests on another OS. True >>> path_normalize('foo/bar', target_os='nt') # Doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Cannot join path with "nt" style. Host OS is "posix". """ if target_os and target_os is not os.name: raise NotImplementedError('Cannot join path with "{target}" style. ' 'Host OS is "{host}".'.format( target=target_os, host=os.name)) return os.path.normpath(path)
581713d5ffa48db4f0c368a69ad2cfc932f92a51
24,413
import logging def fit_scale_heights(data, masks, min_lat = None, max_lat = None, deredden = False, fig_names = None, return_smoothed = False, smoothed_width = None, xlim = None, ylim = None, robust = True, n_boot = 10000): """ Fits scale height data and returns slopes Parameters ---------- data: `skySurvey` WHAM skySurvey object of full sky (requires track keyword), or spiral arm section masks: `list like` longitude masks to use min_lat: `u.Quantity` min latitude to fit max_lat: `u.Quantity` max latitude to fit deredden: `bool` if True, also fits dereddened slopes fig_names: `str` if provided, saves figures following this name return_smoothed: `bool` if True, returns smoothed longitude and slope estimates smoothed_width: `u.Quantity` width to smooth data to in longitude robust: `bool` if True, uses stats.models.robust_linear_model n_boot: `int` only if robust = True number of bootstrap resamples """ # Default values if min_lat is None: min_lat = 5*u.deg elif not hasattr(min_lat, "unit"): min_lat *= u.deg if max_lat is None: max_lat = 35*u.deg elif not hasattr(max_lat, "unit"): max_lat *= u.deg if smoothed_width is None: smoothed_width = 5*u.deg elif not hasattr(smoothed_width, "unit"): smoothed_width *= u.deg #initialize data arrays slopes_pos = [] slopes_neg = [] slopes_pos_dr = [] slopes_neg_dr = [] intercept_pos = [] intercept_neg = [] intercept_pos_dr = [] intercept_neg_dr = [] slopes_pos_err = [] slopes_neg_err = [] slopes_pos_dr_err = [] slopes_neg_dr_err = [] intercept_pos_err = [] intercept_neg_err = [] intercept_pos_dr_err = [] intercept_neg_dr_err = [] median_longitude = [] median_distance = [] for ell2 in range(len(masks)): xx = data["tan(b)"][masks[ell2]] yy = np.log(data["INTEN"][masks[ell2]]) nan_mask = np.isnan(yy) nan_mask |= np.isinf(yy) if deredden: zz = np.log(data["INTEN_DERED"][masks[ell2]]) nan_mask_z = np.isnan(zz) nan_mask_z |= np.isinf(zz) median_longitude.append(np.median(data["GAL-LON"][masks[ell2]])) if deredden: median_distance.append(np.median(data["DISTANCE"][masks[ell2]])) y_min = np.tan(min_lat) y_max = np.tan(max_lat) if not robust: if hasattr(stats, "siegelslopes"): slope_estimator = stats.siegelslopes else: logging.warning("Installed version of scipy does not have the siegelslopes method in scipy.stats!") slope_estimator = stats.theilslopes siegel_result_pos = slope_estimator(yy[(xx > y_min) & (xx < y_max) & ~nan_mask], xx[(xx > y_min) & (xx < y_max) & ~nan_mask]) siegel_result_neg = slope_estimator(yy[(xx < -y_min) & (xx > -y_max) & ~nan_mask], xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask]) if deredden: siegel_result_pos_dr = slope_estimator(zz[(xx > y_min) & (xx < y_max) & ~nan_mask_z], xx[(xx > y_min) & (xx < y_max) & ~nan_mask_z]) siegel_result_neg_dr = slope_estimator(zz[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z], xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z]) slopes_pos.append(siegel_result_pos[0]) slopes_neg.append(siegel_result_neg[0]) intercept_pos.append(siegel_result_pos[1]) intercept_neg.append(siegel_result_neg[1]) if deredden: slopes_pos_dr.append(siegel_result_pos_dr[0]) slopes_neg_dr.append(siegel_result_neg_dr[0]) intercept_pos_dr.append(siegel_result_pos_dr[1]) intercept_neg_dr.append(siegel_result_neg_dr[1]) if fig_names is not None: figure_name = "{0}_{1}.png".format(fig_names, ell2) if xlim is None: xlim = np.array([-0.9, 0.9]) if ylim is None: ylim = np.array([-4.6, 3.2]) fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twiny() ax.scatter(xx, yy, color ="k", alpha = 0.8) if deredden: ax.scatter(xx, zz, color ="grey", alpha = 0.8) ax.set_xlabel(r"$\tan$(b)", fontsize= 12) ax.set_ylabel(r"$\log$($H\alpha$ Intensity / R)", fontsize= 12) ax.set_title(r"${0:.1f} < l < {1:.1f}$".format(data["GAL-LON"][masks[ell2]].min(), data["GAL-LON"][masks[ell2]].max()), fontsize = 14) ax2.plot(np.degrees(np.arctan(xlim)), np.log([0.1,0.1]), ls = ":", lw = 1, color = "k", label = "0.1 R") ax2.fill_between([-min_lat, min_lat]*u.deg, [ylim[0], ylim[0]], [ylim[1], ylim[1]], color = pal[1], alpha = 0.1, label = r"$|b| < 5\degree$") line_xx = np.linspace(y_min, y_max, 10) line_yy_pos = siegel_result_pos[0] * line_xx + siegel_result_pos[1] line_yy_neg = siegel_result_neg[0] * -line_xx + siegel_result_neg[1] ax.plot(line_xx, line_yy_pos, color = "r", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = {0:.2f} D$".format(1/-siegel_result_pos[0])) ax.plot(-line_xx, line_yy_neg, color = "b", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = {0:.2f} D$".format(1/siegel_result_neg[0])) if deredden: line_yy_pos_dr = siegel_result_pos_dr[0] * line_xx + siegel_result_pos_dr[1] line_yy_neg_dr = siegel_result_neg_dr[0] * -line_xx + siegel_result_neg_dr[1] ax.plot(line_xx, line_yy_pos_dr, color = "r", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = {0:.2f} D$".format(1/-siegel_result_pos_dr[0])) ax.plot(-line_xx, line_yy_neg_dr, color = "b", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = {0:.2f} D$".format(1/siegel_result_neg_dr[0])) ax.set_xlim(xlim) ax.set_ylim(ylim) ax2.set_xlabel(r"$b$ (deg)", fontsize = 12) ax2.set_xlim(np.degrees(np.arctan(xlim))) ax.legend(fontsize = 12, loc = 1) ax2.legend(fontsize = 12, loc = 2) plt.tight_layout() plt.savefig(figure_name, dpi = 300) del(fig) plt.close() results = { "median_longitude":np.array(median_longitude), "slopes_pos":np.array(slopes_pos), "slopes_neg":np.array(slopes_neg), "intercept_pos":np.array(intercept_pos), "intercept_neg":np.array(intercept_neg) } if deredden: results["median_distance"] = np.array(median_distance), results["slopes_pos_dr"] = np.array(slopes_pos_dr) results["slopes_neg_dr"] = np.array(slopes_neg_dr) results["intercept_pos_dr"] = np.array(intercept_pos_dr) results["intercept_neg_dr"] = np.array(intercept_neg_dr) else: yy_pos = yy[(xx > y_min) & (xx < y_max) & ~nan_mask] xx_pos = xx[(xx > y_min) & (xx < y_max) & ~nan_mask] yy_neg = yy[(xx < -y_min) & (xx > -y_max) & ~nan_mask] xx_neg = xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask] if ((len(yy_pos) < 5) | (len(yy_neg) < 5)): slopes_pos.append(np.mean(boot_pos[:,1], axis = 0)) slopes_neg.append(np.mean(boot_neg[:,1], axis = 0)) slopes_pos_err.append(np.std(boot_pos[:,1], axis = 0)) slopes_neg_err.append(np.std(boot_neg[:,1], axis = 0)) intercept_pos.append(np.mean(boot_pos[:,0], axis = 0)) intercept_neg.append(np.mean(boot_neg[:,0], axis = 0)) intercept_pos_err.append(np.std(boot_pos[:,0], axis = 0)) intercept_neg_err.append(np.std(boot_neg[:,0], axis = 0)) else: if deredden: zz_dr_pos = zz[(xx > y_min) & (xx < y_max) & ~nan_mask_z] xx_dr_pos = xx[(xx > y_min) & (xx < y_max) & ~nan_mask_z] zz_dr_neg = zz[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z] xx_dr_neg = xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z] def slope_int_estimator_pos_dr(inds, YY = zz_dr_pos, XX = xx_dr_pos): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_neg_dr(inds, YY = zz_dr_neg, XX = xx_dr_neg): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_pos(inds, YY = yy_pos, XX = xx_pos): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_neg(inds, YY = yy_neg, XX = xx_neg): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params boot_pos = bootstrap(np.arange(len(yy_pos)), func = slope_int_estimator_pos, n_boot = n_boot) boot_neg = bootstrap(np.arange(len(yy_neg)), func = slope_int_estimator_neg, n_boot = n_boot) slopes_pos.append(np.mean(boot_pos[:,1], axis = 0)) slopes_neg.append(np.mean(boot_neg[:,1], axis = 0)) slopes_pos_err.append(np.std(boot_pos[:,1], axis = 0)) slopes_neg_err.append(np.std(boot_neg[:,1], axis = 0)) intercept_pos.append(np.mean(boot_pos[:,0], axis = 0)) intercept_neg.append(np.mean(boot_neg[:,0], axis = 0)) intercept_pos_err.append(np.std(boot_pos[:,0], axis = 0)) intercept_neg_err.append(np.std(boot_neg[:,0], axis = 0)) if deredden: boot_pos_dr = bootstrap(np.arange(len(zz_dr_pos)), func = slope_int_estimator_pos_dr, n_boot = n_boot) boot_neg_dr = bootstrap(np.arange(len(zz_dr_neg)), func = slope_int_estimator_neg_dr, n_boot = n_boot) slopes_pos_dr.append(np.mean(boot_pos_dr[:,1], axis = 0)) slopes_neg_dr.append(np.mean(boot_neg_dr[:,1], axis = 0)) slopes_pos_dr_err.append(np.std(boot_pos_dr[:,1], axis = 0)) slopes_neg_dr_err.append(np.std(boot_neg_dr[:,1], axis = 0)) intercept_pos_dr.append(np.mean(boot_pos_dr[:,0], axis = 0)) intercept_neg_dr.append(np.mean(boot_neg_dr[:,0], axis = 0)) intercept_pos_dr_err.append(np.std(boot_pos_dr[:,0], axis = 0)) intercept_neg_dr_err.append(np.std(boot_neg_dr[:,0], axis = 0)) if fig_names is not None: figure_name = "{0}_{1}.png".format(fig_names, ell2) if xlim is None: xlim = np.array([-0.9, 0.9]) if ylim is None: ylim = np.array([-4.6, 3.2]) fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twiny() ax.scatter(xx, yy, color ="k", alpha = 0.8) if deredden: ax.scatter(xx, zz, color ="grey", alpha = 0.8) ax.set_xlabel(r"$\tan$(b)", fontsize= 12) ax.set_ylabel(r"$\log$($H\alpha$ Intensity / R)", fontsize= 12) ax.set_title(r"${0:.1f} < l < {1:.1f}$".format(data["GAL-LON"][masks[ell2]].min(), data["GAL-LON"][masks[ell2]].max()), fontsize = 14) ax2.plot(np.degrees(np.arctan(xlim)), np.log([0.1,0.1]), ls = ":", lw = 1, color = "k", label = "0.1 R") ax2.fill_between([-min_lat, min_lat]*u.deg, [ylim[0], ylim[0]], [ylim[1], ylim[1]], color = pal[1], alpha = 0.1, label = r"$|b| < 5\degree$") line_xx = np.linspace(y_min, y_max, 100) def get_slope_conf_band(boot_res, X = line_xx): yy = [[res[0] + res[1] * X] for res in boot_res] yy = np.vstack(yy) return np.percentile(yy, (5,95), axis = 0) line_yy_pos = slopes_pos[-1] * line_xx + intercept_pos[-1] line_yy_neg = slopes_neg[-1] * -line_xx + intercept_neg[-1] line_yy_pos_range = get_slope_conf_band(boot_pos) line_yy_neg_range = get_slope_conf_band(boot_neg, X = -line_xx) ax.plot(line_xx, line_yy_pos, color = "r", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/-slopes_pos[-1], np.abs(1/slopes_pos[-1] * slopes_pos_err[-1] / slopes_pos[-1]))) ax.fill_between(line_xx, line_yy_pos_range[0], line_yy_pos_range[1], color = "r", alpha = 0.2) ax.plot(-line_xx, line_yy_neg, color = "b", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/slopes_neg[-1], np.abs(-1/slopes_pos[-1] * slopes_pos_err[-1] / slopes_pos[-1]))) ax.fill_between(-line_xx, line_yy_neg_range[0], line_yy_neg_range[1], color = "b", alpha = 0.2) if deredden: line_yy_pos_dr = slopes_pos_dr[-1] * line_xx + intercept_pos_dr[-1] line_yy_neg_dr = slopes_neg_dr[-1] * -line_xx + intercept_neg_dr[-1] line_yy_pos_range_dr = get_slope_conf_band(boot_pos_dr) line_yy_neg_range_dr = get_slope_conf_band(boot_neg_dr, X = -line_xx) ax.plot(line_xx, line_yy_pos_dr, color = "r", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/-slopes_pos_dr[-1], np.abs(1/slopes_pos_dr[-1] * slopes_pos_dr_err[-1] / slopes_pos_dr[-1]))) ax.fill_between(line_xx, line_yy_pos_range_dr[0], line_yy_pos_range_dr[1], color = "r", alpha = 0.2) ax.plot(-line_xx, line_yy_neg_dr, color = "b", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/slopes_neg_dr[-1], np.abs(-1/slopes_pos_dr[-1] * slopes_pos_dr_err[-1] / slopes_pos_dr[-1]))) ax.fill_between(-line_xx, line_yy_neg_range_dr[0], line_yy_neg_range_dr[1], color = "b", alpha = 0.2) ax.set_xlim(xlim) ax.set_ylim(ylim) ax2.set_xlabel(r"$b$ (deg)", fontsize = 12) ax2.set_xlim(np.degrees(np.arctan(xlim))) ax.legend(fontsize = 12, loc = 1) ax2.legend(fontsize = 12, loc = 2) plt.tight_layout() plt.savefig(figure_name, dpi = 300) del(fig) plt.close() results = { "median_longitude":np.array(median_longitude), "slopes_pos":np.array(slopes_pos), "slopes_neg":np.array(slopes_neg), "intercept_pos":np.array(intercept_pos), "intercept_neg":np.array(intercept_neg), "slopes_pos_err":np.array(slopes_pos_err), "slopes_neg_err":np.array(slopes_neg_err), "intercept_pos_err":np.array(intercept_pos_err), "intercept_neg_err":np.array(intercept_neg_err) } if deredden: results["median_distance"] = np.array(median_distance), results["slopes_pos_dr"] = np.array(slopes_pos_dr) results["slopes_neg_dr"] = np.array(slopes_neg_dr) results["intercept_pos_dr"] = np.array(intercept_pos_dr) results["intercept_neg_dr"] = np.array(intercept_neg_dr) results["slopes_pos_dr_err"] = np.array(slopes_pos_dr_err) results["slopes_neg_dr_err"] = np.array(slopes_neg_dr_err) results["intercept_pos_dr_err"] = np.array(intercept_pos_dr_err) results["intercept_neg_dr_err"] = np.array(intercept_neg_dr_err) if return_smoothed: results["smoothed_longitude"] = np.arange(np.min(median_longitude), np.max(median_longitude), 0.25) if deredden: distance_interp = interp1d(median_longitude, median_distance) results["smoothed_distance"] = distance_interp(results["smoothed_longitude"]) smoothed_slope_pos_ha = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_neg_ha = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_pos_ha_dr = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_neg_ha_dr = np.zeros((3,len(results["smoothed_longitude"]))) for ell,lon in enumerate(results["smoothed_longitude"]): smoothed_slope_pos_ha[:,ell] = np.nanpercentile(np.array(slopes_pos)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) smoothed_slope_neg_ha[:,ell] = np.nanpercentile(np.array(slopes_neg)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) if deredden: smoothed_slope_pos_ha_dr[:,ell] = np.nanpercentile(np.array(slopes_pos_dr)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) smoothed_slope_neg_ha_dr[:,ell] = np.nanpercentile(np.array(slopes_neg_dr)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) results["smoothed_slopes_pos"] = smoothed_slope_pos_ha results["smoothed_slopes_neg"] = smoothed_slope_neg_ha if deredden: results["smoothed_slopes_pos_dr"] = smoothed_slope_pos_ha_dr results["smoothed_slopes_neg_dr"] = smoothed_slope_neg_ha_dr return results
fe2cd6d1cc1dfa18b7a78593e326f80ee99222bc
24,414
from typing import List from typing import Dict from typing import Tuple from typing import Set import warnings def _check_meas_specs_still_todo( meas_specs: List[_MeasurementSpec], accumulators: Dict[_MeasurementSpec, BitstringAccumulator], stopping_criteria: StoppingCriteria, ) -> Tuple[List[_MeasurementSpec], int]: """Filter `meas_specs` in case some are done. In the sampling loop in `measure_grouped_settings`, we submit each `meas_spec` in chunks. This function contains the logic for removing `meas_spec`s from the loop if they are done. """ still_todo = [] repetitions_set: Set[int] = set() for meas_spec in meas_specs: accumulator = accumulators[meas_spec] more_repetitions = stopping_criteria.more_repetitions(accumulator) if more_repetitions < 0: raise ValueError( "Stopping criteria's `more_repetitions` should return 0 or a positive number." ) if more_repetitions == 0: continue repetitions_set.add(more_repetitions) still_todo.append(meas_spec) if len(still_todo) == 0: return still_todo, 0 repetitions = _aggregate_n_repetitions(repetitions_set) total_repetitions = len(still_todo) * repetitions if total_repetitions > MAX_REPETITIONS_PER_JOB: old_repetitions = repetitions repetitions = MAX_REPETITIONS_PER_JOB // len(still_todo) if repetitions < 10: raise ValueError( "You have requested too many parameter settings to batch your job effectively. " "Consider fewer sweeps or manually splitting sweeps into multiple jobs." ) warnings.warn( f"The number of requested sweep parameters is high. To avoid a batched job with more " f"than {MAX_REPETITIONS_PER_JOB} shots, the number of shots per call to run_sweep " f"(per parameter value) will be throttled from {old_repetitions} to {repetitions}." ) return still_todo, repetitions
bacb0a7b666a1a59bb0df722fe60530b0d4f4d6e
24,415
def get_aircon_mock(said): """Get a mock of an air conditioner.""" mock_aircon = mock.Mock(said=said) mock_aircon.connect = AsyncMock() mock_aircon.fetch_name = AsyncMock(return_value="TestZone") mock_aircon.get_online.return_value = True mock_aircon.get_power_on.return_value = True mock_aircon.get_mode.return_value = whirlpool.aircon.Mode.Cool mock_aircon.get_fanspeed.return_value = whirlpool.aircon.FanSpeed.Auto mock_aircon.get_current_temp.return_value = 15 mock_aircon.get_temp.return_value = 20 mock_aircon.get_current_humidity.return_value = 80 mock_aircon.get_humidity.return_value = 50 mock_aircon.get_h_louver_swing.return_value = True mock_aircon.set_power_on = AsyncMock() mock_aircon.set_mode = AsyncMock() mock_aircon.set_temp = AsyncMock() mock_aircon.set_humidity = AsyncMock() mock_aircon.set_mode = AsyncMock() mock_aircon.set_fanspeed = AsyncMock() mock_aircon.set_h_louver_swing = AsyncMock() return mock_aircon
68833445b94b2194f73c9b699d925bb92dca010b
24,416
def mutation(individual): """ Shuffle certain parameters of the network to keep evolving it. Concretely: - thresh, tau_v, tau_t, alpha_v, alpha_t, q """ individual[0].update_params() return individual,
8ccd373f991cbf2e8161e6bbe32375ca8826e48c
24,417
def truncate_repeated_single_step_traversals_in_sub_queries( compound_match_query: CompoundMatchQuery, ) -> CompoundMatchQuery: """For each sub-query, remove one-step traversals that overlap a previous traversal location.""" lowered_match_queries = [] for match_query in compound_match_query.match_queries: new_match_query = truncate_repeated_single_step_traversals(match_query) lowered_match_queries.append(new_match_query) return compound_match_query._replace(match_queries=lowered_match_queries)
b5d264640fb65ff7162209a714257b0a65128e89
24,418
def intersect(list1, list2): """ Compute the intersection of two sorted lists. Returns a new sorted list containing only elements that are in both list1 and list2. This function can be iterative. """ result_list = [] idx1 = 0 idx2 = 0 while idx1 < len(list1) and idx2 < len(list2): if list1[idx1] == list2[idx2]: result_list.append(list1[idx1]) idx1 += 1 idx2 += 1 elif list1[idx1] < list2[idx2]: idx1 += 1 elif list1[idx1] > list2[idx2]: idx2 += 1 else: print 'error in func intersect!!!' return return result_list
d0f50b466108f685dc74d227554ab057cac018ae
24,419
import typing def get_parent_project_ids(project_id: int, only_if_child_can_add_users_to_parent: bool = False) -> typing.List[int]: """ Return the list of parent project IDs for an existing project. :param project_id: the ID of an existing project :param only_if_child_can_add_users_to_parent: whether or not to only show those parent projects, which someone with GRANT permissions on this project can add users to (transitively) :return: list of project IDs """ subproject_relationships: typing.Iterable[SubprojectRelationship] = SubprojectRelationship.query.filter_by( child_project_id=project_id ).all() parent_project_ids = [] for subproject_relationship in subproject_relationships: if subproject_relationship.child_can_add_users_to_parent or not only_if_child_can_add_users_to_parent: parent_project_ids.append(subproject_relationship.parent_project_id) return parent_project_ids
b0c9d2241a0b114b3fcf531592b7f05000596fec
24,420
import glob import os def get_all_object_names(bucket, prefix=None, without_prefix=False): """ Returns the names of all objects in the passed bucket Args: bucket (str): Bucket path prefix (str, default=None): Prefix for keys withot_prefix (bool, default=False) Returns: list: List of object names """ root = bucket if prefix is not None: root = f"{bucket}/{prefix}" root_len = len(bucket) + 1 if without_prefix: prefix_len = len(prefix) subdir_names = glob.glob(f"{root}*") object_names = [] while True: names = subdir_names subdir_names = [] for name in names: if name.endswith("._data"): # remove the ._data at the end name = name[root_len:-6] while name.endswith("/"): name = name[0:-1] if without_prefix: name = name[prefix_len:] while name.startswith("/"): name = name[1:] if len(name) > 0: object_names.append(name) elif os.path.isdir(name): subdir_names += glob.glob(f"{name}/*") if len(subdir_names) == 0: break return object_names
c3f0757cd8416cc966fb0b11fbb74fb348ea7f48
24,421
def integral_length(v): """ Compute the integral length of a given rational vector. INPUT: - ``v`` - any object which can be converted to a list of rationals OUTPUT: Rational number ``r`` such that ``v = r u``, where ``u`` is the primitive integral vector in the direction of ``v``. EXAMPLES:: sage: lattice_polytope.integral_length([1, 2, 4]) 1 sage: lattice_polytope.integral_length([2, 2, 4]) 2 sage: lattice_polytope.integral_length([2/3, 2, 4]) 2/3 """ data = [QQ(e) for e in list(v)] ns = [e.numerator() for e in data] ds = [e.denominator() for e in data] return gcd(ns)/lcm(ds)
54d2b2726bea848e1a5836425516371fc09f54b3
24,422
def load_classification_pipeline( model_dir: str = "wukevin/tcr-bert", multilabel: bool = False, device: int = 0 ) -> TextClassificationPipeline: """ Load the pipeline object that does classification """ try: tok = ft.get_pretrained_bert_tokenizer(model_dir) except OSError: tok = ft.get_aa_bert_tokenizer(64) if multilabel: model = BertForSequenceClassificationMulti.from_pretrained(model_dir) pipeline = TextMultiClassificationPipeline( model=model, tokenizer=tok, device=device, framework="pt", task="mulitlabel_classification", return_all_scores=True, ) else: model = BertForSequenceClassification.from_pretrained(model_dir) pipeline = TextClassificationPipeline( model=model, tokenizer=tok, return_all_scores=True, device=device ) return pipeline
0811cdc4ddaac3992e1cec7f43d88df276356c5c
24,423
def skew_image(img, angle): """ Skew image using some math :param img: PIL image object :param angle: Angle in radians (function doesn't do well outside the range -1 -> 1, but still works) :return: PIL image object """ width, height = img.size # Get the width that is to be added to the image based on the angle of skew xshift = tan(abs(angle)) * height new_width = width + int(xshift) if new_width < 0: return img # Apply transform img = img.transform( (new_width, height), Image.AFFINE, (1, angle, -xshift if angle > 0 else 0, 0, 1, 0), Image.BICUBIC ) return img
5b52a87edc44669e9fad82efd5c594df12edee41
24,424
import logging def test_process_bto_order_high_risk(monkeypatch, capsys, caplog): """BTO order should be correctly processed with high risk flag set """ caplog.set_level(logging.INFO) monkeypatch.setitem(USR_SET, "high_risk_ord_value", 1000) monkeypatch.setitem(USR_SET, "buy_limit_percent", 0.03) monkeypatch.setitem(USR_SET, "SL_percent", 0.25) monkeypatch.setitem(VALID_ORD_INPUT, "contract_price", 2.00) flags = {"SL": None, "risk_level": "high risk", "reduce": None} monkeypatch.setitem(VALID_ORD_INPUT, "flags", flags) def mock_place_order(acct_num, order_spec): built_order = order_spec.build() assert built_order["price"] == "2.06" assert built_order["orderLegCollection"][0]["quantity"] == 4 assert built_order["orderStrategyType"] == "TRIGGER" assert built_order["childOrderStrategies"][0]["orderType"] == "STOP" assert built_order["childOrderStrategies"][0]["stopPrice"] == "1.50" return "PASSAR" client = tda.client.Client monkeypatch.setattr(client, "place_order", mock_place_order) am.process_bto_order(client, "1234567890", VALID_ORD_INPUT, USR_SET) captured = capsys.readouterr() assert captured.out.split()[-1] == "PASSAR" logged = caplog.text assert logged.split()[-1] == "PASSAR"
0981a09686670ad8d941a438514832c17b541863
24,425
import types import doctest def _load_tests_from_module(tests, module, globs, setUp=None, tearDown=None): """Load tests from module, iterating through submodules. """ for attr in (getattr(module, x) for x in dir(module) if not x.startswith("_")): if isinstance(attr, types.ModuleType): suite = doctest.DocTestSuite( attr, globs, setUp=setUp, tearDown=tearDown, optionflags=+doctest.ELLIPSIS, ) tests.addTests(suite) return tests
068eb24fd826192730bfb7dde2c978ef42fb8475
24,426
def calculate_full_spectrum(xs, cp, ep=None, betas=(0,0), data=None): """Direct solution of the k-eigenvalue problem in integral transport by the collision probability method. Input data are the xs list and the collision probabilities in cp. Only isotropic scattering is allowed. A relation of albedo for the partial currents can be used at the boundary.""" st, ss, chi, nsf = xs G, I = nsf.shape check_xs(xs) betaL, betaR = betas if (betaL < 0) or (betaL > 1): raise ValueError("betaL (left albedo) is not in valid range") elif betaL > 0: if ep is None: raise ValueError("betaL > 0, but no input escape probs") if data is None: raise ValueError("input mesh data is needed for VjoSbL") else: # r, geo, V, Sb = data.xi, data.geometry_type, data.Vi, data.Si[0] # V = calculate_volumes(r, geo) # Sb = calculate_surfaces(r, geo)[0] VjoSbL = data.Vi / data.Si[0] if (betaR < 0) or (betaR > 1): raise ValueError("betaR (right albedo) is not in valid range") elif betaR > 0: if ep is None: raise ValueError("betaR > 0, but no input escape probs") if data is None: raise ValueError("input mesh data is needed for VjoSbR") else: VjoSbR = data.Vi / data.Si[-1] def get_rt(rpjx, st): total_collision = np.dot(rpjx, st) if data.geometry_type != 'slab': reflection, transmission = 1 - total_collision, 0 else: reflection, transmission = 0, 1 - total_collision return reflection, transmission GI = G * I PS = np.zeros((GI, GI),) PX, F = np.zeros((GI, I),), np.zeros((I, GI),) # X = np.zeros_like(PX) Is = np.arange(I) for g in range(G): idx = slice(I * g, I * (g + 1)) pji = np.transpose(cp[g,:,:] / st[g,:]) # reduced CP # apply b.c. eaj, ebj = -ep[g,0,:,1], ep[g,-1,:,0] if betaL > 0: # pja and pjb are both needed if refl at both sides pja = 4 * VjoSbL * eaj if betaR > 0: pjb = 4 * VjoSbR * ebj if betaL > 0: r, t = get_rt(pja, st[g,:]) coef = betaL / (1 - betaL * (r + t**2 * betaR / (1 - betaR * r))) pji += coef * np.dot(np.diag(eaj), np.tile(pja, (I, 1))) if betaR > 0: coef *= betaR * t pji += coef * np.dot(np.diag(eaj), np.tile(pjb, (I, 1))) if betaR > 0: r, t = get_rt(pjb, st[g,:]) coef = betaR / (1 - betaR * (r + t**2 * betaL / (1 - betaL * r))) pji += coef * np.dot(np.diag(ebj), np.tile(pjb, (I, 1))) if betaL > 0: coef *= betaL * t pji += coef * np.dot(np.diag(ebj), np.tile(pja, (I, 1))) # X[Is + g * I, Is] = chi[g,:] F[Is, Is + g * I] = nsf[g,:] PX[idx,:] = pji * chi[g,:] for gg in range(G): jdx = slice(I * gg, I * (gg + 1)) PS[idx, jdx] = pji * ss[g,gg,0,:] PS *= -1 PS[np.diag_indices_from(PS)] += 1 H = np.dot(F, np.dot(np.linalg.inv(PS), PX)) return np.linalg.eig(H)
ad145dc3fc5ae57f6512cb01b1119a2fc150b4bd
24,427
def get_connectors_by_type(type : str): """ Convenience method for `get_connectors()`. """ return get_connectors(type)
7e41c2a37173a4d72d7d947aa5a166c23f102da0
24,428
def crawl(alphabet, initial, accepts, follow): """ Create a new FSM from the above conditions. """ states = [initial] accepting = set() transition = dict() i = 0 while i < len(states): state = states[i] if accepts(state): accepting.add(i) transition[i] = dict() for symbol in alphabet: try: next_states = follow(state, symbol) except OblivionError: continue else: try: j = states.index(next_states) except ValueError: j = len(states) states.append(next_states) transition[i][symbol] = j i += 1 return FSM( alphabet=alphabet, states=range(len(states)), initial=0, accepting=accepting, transition=transition, __validation__=False )
c72b743ed4d06691fea020e2e66236a54d53df5f
24,429
def mpncovresnet101(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = MPNCOVResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['mpncovresnet101'])) return model
bad52e5b47a84faabdb9d82fb50e585ee287b392
24,430
def cathegory_encoder(data, labelCathegory=labelCathegory): """ Encode cathegorical labels """ for k in labelCathegory: encoder = sklearn.preprocessing.LabelEncoder() encoder.fit(list(data[k].values)) data[k] = encoder.transform(list(data[k].values)) return data
dc4c549e58097d219ade1b7140a9e09356692cd8
24,431
import logging import os def _make_symbols_cg_df(symbols, benchmark): """ 相关性金融数据收集,子进程委托函数,子进程通过make_kl_df完成主进程委托的symbols个 金融数据收集工作,最终返回所有金融时间序列涨跌幅度pd.DataFrame对象 :param symbols: 可迭代symbols序列,序列中的元素为str对象 :param benchmark: 进行数据收集使用的标尺对象,数据时间范围确定使用,AbuBenchmark实例对象 :return: 所有金融时间序列涨跌幅度pd.DataFrame对象 """ # 子进程金融数据收集工作, 由于本事是在子进程内工作,所以不再make_kl_df中使用parallel模式,上层进行多任务分配及任务数确定 panel = ABuSymbolPd.make_kl_df(symbols, data_mode=EMarketDataSplitMode.E_DATA_SPLIT_UNDO, benchmark=benchmark, show_progress=True) if panel is None or panel.empty: logging.info('pid {} panel is None'.format(os.getpid())) return None # 转换panel轴方向,即可方便获取所有金融时间数据的某一个列 panel = panel.swapaxes('items', 'minor') net_cg_df = panel['p_change'].fillna(value=0) """ 转轴后直接获取p_change,即所有金融时间序列涨跌幅度pd.DataFrame对象,形如下所示: usF usFCAU usGM usHMC usTM usTSLA usTTM 2015-06-25 -0.387 -0.517 -1.308 0.522 -0.391 1.365 -0.029 2015-06-26 -0.259 1.300 -0.922 0.366 0.437 -0.632 -0.229 2015-06-29 -2.468 -6.799 -3.345 -2.676 -2.222 -1.898 -2.550 2015-06-30 -0.067 0.000 0.301 1.250 0.982 2.381 1.353 2015-07-01 -0.133 0.688 -0.870 -1.605 -0.112 0.332 0.261 ................................................................. """ return net_cg_df
6472a2e77d0d6c5ef1c3639cb48e0cd80461a4c1
24,432
def split_dataset(args, dataset): """Split the dataset Parameters ---------- args : dict Settings dataset Dataset instance Returns ------- train_set Training subset val_set Validation subset test_set Test subset """ train_ratio, val_ratio, test_ratio = map(float, args['split_ratio'].split(',')) if args['split'] == 'scaffold': train_set, val_set, test_set = ScaffoldSplitter.train_val_test_split( dataset, frac_train=train_ratio, frac_val=val_ratio, frac_test=test_ratio, scaffold_func='smiles') elif args['split'] == 'random': train_set, val_set, test_set = RandomSplitter.train_val_test_split( dataset, frac_train=train_ratio, frac_val=val_ratio, frac_test=test_ratio) else: return ValueError("Expect the splitting method to be 'scaffold', got {}".format(args['split'])) return train_set, val_set, test_set
1fbaac75655694bc1ca3a5e8ed06d31401d3dd9c
24,433
def depthwise_conv2d_nchw(inputs, weight, bias=None, stride=1, padding=0, dilation=1): """Depthwise convolution 2d NCHW layout Args: ----------------------------- inputs : tvm.te.tensor.Tensor shape [batch, channel, height, width] weight : tvm.te.tensor.Tensor shape [in_channel, factor, kernel_height, kernel_width] bias : (optional:None) tvm.te.tensor.Tensor shape [out_channel] stride : (optional:1) int or tuple padding : (optional:0) int or tuple dilation: (optional:1) int ----------------------------- Returns: ----------------------------- tvm.te.tensor.Tensor shape [batch, out_channel, output_height, output_width] ----------------------------- """ batch_size, in_channel, in_h, in_w = inputs.shape _in_channel, factor, k_h, k_w = weight.shape assert_print(_in_channel.value == in_channel.value) out_channel = in_channel * factor stride = (stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding dilation = (dilation, dilation) if isinstance(dilation, (int, tvm.tir.IntImm)) else dilation assert_print(isinstance(stride, tuple) and len(stride) == 2) assert_print(isinstance(padding, tuple) and len(padding) == 2) assert_print(isinstance(dilation, tuple) and len(dilation) == 2) out_h = (in_h + 2 * padding[0] - dilation[0] * (k_h - 1) - 1) // stride[0] + 1 out_w = (in_w + 2 * padding[1] - dilation[1] * (k_w - 1) - 1) // stride[1] + 1 rh = tvm.te.reduce_axis((0, k_h)) rw = tvm.te.reduce_axis((0, k_w)) padded = zero_pad2d(inputs, padding=padding) output = tvm.te.compute( (batch_size, out_channel, out_h, out_w), lambda b, c, h, w: tvm.te.sum( (padded[b, c//factor, h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]] * weight[c//factor, c%factor, rh, rw]), axis=[rw, rh] ) ) if bias is not None: output = tvm.te.compute( (batch_size, out_channel, out_h, out_w), lambda b, c, h, w: output[b, c, h, w] + bias[c] ) return output
bd4f5f0f7dc3a12adefce0e19fa010919e9b9407
24,434
def xtransformed(geo, transformation): """Returns a copy of the transformed Rhino Geometry object. Args: geo (:class:`Rhino.Geometry.GeometryBase`): a Rhino Geometry object transformation (:class:`Transformation`): the transformation. Returns: (:class:`Rhino.Geometry.GeometryBase`): the transformed geometry """ T = xform_from_transformation(transformation) geo_copy = geo.Duplicate() geo_copy.Transform(T) return geo_copy
9d21ad58358bff07b10e18c7c3593cca68f07541
24,435
def function_calls(libfuncs): """ libfuncs is the list of library functions called in script. Returns ths list of all library functions required in script """ libfuncs2 = set() while libfuncs: func = libfuncs.pop() libfuncs2.add(func) for func in called_functions(func): if func not in libfuncs and func not in libfuncs2: libfuncs.add(func) return sorted(list(libfuncs2))
3c6e29930f0a59cc2ad5a3b24ca22c07f3fca28b
24,436
def preprocess_yaml_config(config: SimpleNamespace, prefix_keys=False) -> SimpleNamespace: """ Preprocess a simple namespace. Currently, - prepend the prefix key to all the configuration parameters - change 'None' strings to None values :param config: The SimpleNamespace containing the configuration. :return: Preprocessed configuration as a SimpleNamespace """ # Make sure there's a prefix in the configuration assert 'prefix' in config.__dict__, 'Please include a prefix in the yaml.' if prefix_keys: # Grab the prefix from the yaml file prefix = config.prefix # Prepend the prefix to all the keys, and get rid of the prefix config = SimpleNamespace(**{f'{prefix}_{k}': v for k, v in config.__dict__.items() if k != prefix}) # Change 'None' to None in the top level: recommended behavior is to use null instead of None in the yaml for key, value in config.__dict__.items(): config.__dict__[key] = value if value != 'None' else None return config
52c4e79334bc95c573b795a6962e83d949cf9639
24,437
from numpy.core import isinf, errstate def gisinf(x): """ Like isinf, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- `isinf` and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level. """ with errstate(invalid="ignore"): st = isinf(x) if isinstance(st, type(NotImplemented)): raise TypeError("isinf not supported for this type") return st
cc525ffc10e87b44a5cee3e93fc1c4466bc7a171
24,438
def make_const(g, # type: base_graph.BaseGraph name, # type: str value, # type: np.ndarray uniquify_name=False # type: bool ): """ Convenience method to add a `Const` op to a `gde.Graph`. Args: g: The graph that the node should be added to name: Name for the new `Const` node value: Value to use for the constant uniquify_name: if True, generate unique names by appending a numeric suffix in the event of a name collision. Otherwise name collisions result in an error. Returns `gde.Node` object representing the new node. """ dtype = tf.as_dtype(value.dtype) ret = g.add_node(name, "Const", uniquify_name=uniquify_name) ret.add_attr("dtype", dtype) ret.add_attr("value", value) ret.set_outputs_from_pairs([(dtype, tf.TensorShape(value.shape))]) return ret
fd8493c6ea33c2fd4f930f78fd906ddb5fcdf12e
24,439
def find_maxima(x): """Halla los índices de los máximos relativos""" idx = [] N = len(x) if x[1] < x[0]: idx.append(0) for i in range(1, N - 1): if x[i-1] < x[i] and x[i+1] < x[i]: idx.append(i) if x[-2] < x[-1]: idx.append(N - 1) return idx
8be862981e46ac2534a78354adf52993ca78426a
24,440
def _transform_rankings(Y): """Transform the rankings to integer.""" Yt = np.zeros(Y.shape, dtype=np.int64) Yt[np.isfinite(Y)] = Y[np.isfinite(Y)] Yt[np.isnan(Y)] = RANK_TYPE.RANDOM.value Yt[np.isinf(Y)] = RANK_TYPE.TOP.value return Yt
7a89bc4dd2ff1ad8b00456198f4051ab9030ccbc
24,441
import os import json def get_config(key=None, default=None, raise_error=False): """Read expyfun preference from env, then expyfun config Parameters ---------- key : str The preference key to look for. The os environment is searched first, then the expyfun config file is parsed. default : str | None Value to return if the key is not found. raise_error : bool If True, raise an error if the key is not found (instead of returning default). Returns ------- value : str | None The preference key value. """ if key is not None and not isinstance(key, string_types): raise ValueError('key must be a string') # first, check to see if key is in env if key is not None and key in os.environ: return os.environ[key] # second, look for it in expyfun config file config_path = get_config_path() if not op.isfile(config_path): key_found = False val = default else: with open(config_path, 'r') as fid: config = json.load(fid) if key is None: return config key_found = True if key in config else False val = config.get(key, default) if not key_found and raise_error is True: meth_1 = 'os.environ["%s"] = VALUE' % key meth_2 = 'expyfun.utils.set_config("%s", VALUE)' % key raise KeyError('Key "%s" not found in environment or in the ' 'expyfun config file:\n%s\nTry either:\n' ' %s\nfor a temporary solution, or:\n' ' %s\nfor a permanent one. You can also ' 'set the environment variable before ' 'running python.' % (key, config_path, meth_1, meth_2)) return val
7f05779658ddffd6008cd44984a34b52ef9f3ac9
24,442
import os import sys def check_exist(path, mode, flag_exit=True): """ function to check for file existence @param path(str): target file path @param mode(int): 1(existence) / 2(existence for file) / 3(existence for dir) @param flag_exit(bool): Exit if not present (Default: True) @param (bool) or exit(None) """ if mode == 1: if not os.path.exists(path): sys.stderr.write("ERROR: No such path (%s)\n" % path) if flag_exit: sys.exit(1) else: return False elif mode == 2: if not os.path.isfile(path): sys.stderr.write("ERROR: No such file (%s)\n" % path) if flag_exit: sys.exit(1) else: return False elif mode == 3: if not os.path.isdir(path): sys.stderr.write("ERROR: No such directory (%s)\n" % path) if flag_exit: sys.exit(1) else: return False else: sys.stderr.write("ERROR: Subroutine error: Not specified mode\n") sys.exit(1) return True
4eda0d241b61d1b813593d91edbf8a14bd4df036
24,443
import math def convert_pf_patch_to_cg_patch(p, simnum): """Converts a pfpatch p to a CG patch.""" # Print patch info LOGGER.info("Macro patch is:") LOGGER.info(str(p)) LOGGER.info("Patch id = {}".format(p.id)) LOGGER.info("Protein bead ids = {}".format(p.protein_ids)) LOGGER.info("Protein bead states = {}".format(p.protein_states)) LOGGER.info("Protein bead pos. = {}".format(p.protein_positions)) LOGGER.info("Macro simname = {}".format(p.config.simname)) LOGGER.info("Macro filenames = {}".format(p.config.filenames)) LOGGER.info("Macro simname = {} / {}".format(p.config.simtime, p.config.tunit)) # Remove any suppriusly negative densities - this can happen in the macro model due to noise term p.concentrations = np.maximum(p.concentrations, 0) # For testing of lipid consentrations #np.save("concentrations_macro_all.npy", p.concentrations) # Convert patch from native 37x37 to 5x5 patch size # @TOOD Harsh is looking at this now #lconcentrations = p.subsample() #lconcentrations = p._subsample_mean(p.concentrations, 30.0, 5) #lconcentrations = p.subsample_intg() lconcentrations = p.concentrations # For testing of lipid consentrations #np.save("concentrations_macro.npy", lconcentrations) LOGGER.info("Lipid nat. grid = {}".format(p.concentrations.shape)) LOGGER.info("Lipid subs.grid = {}".format(lconcentrations.shape)) # Get protein bead posisions - these should all be within the patch # Get RAS posision within the patch - RAS is now back to be patch centric #localRasPos = p.rasPositions - p.extents[0] #LOGGER.info("RAS pos = {}".format(p.rasPositions)) #LOGGER.info("RAS local pos = {}".format(localRasPos)) #LOGGER.info("Protein bead pos. = {}".format(p.protein_positions)) # Convert patch to CG input # Info Patch.LIPID_NAMES = # ['nInner_POPC', 'nInner_PAPC', 'nInner_POPE', 'nInner_DIPE', 'nInner_DPSM', 'nInner_PAPS', 'nInner_PAP6', 'nInner_CHOL', # 'nOuter_POPC', 'nOuter_PAPC', 'nOuter_POPE', 'nOuter_DIPE', 'nOuter_DPSM', 'nOuter_CHOL'] lipidTypes = ["POPX", "POPC", "PAPC", "POPE", "DIPE", "DPSM", "PAPS", "PAP6", "CHOL"] lInner = lconcentrations[:, :, 0:8] lOuter = lconcentrations[:, :, 8:14] sumOuter = np.sum(lOuter) / (lconcentrations.shape[0]*lconcentrations.shape[1]) sumInner = np.sum(lInner) / (lconcentrations.shape[0]*lconcentrations.shape[1]) asym = np.rint(1600 * (1 - (sumInner / sumOuter))).astype(int) # For testing of lipid consentrations #np.save("concentrations_asym.npy", asym) if (lInner.min() < 0 or lOuter.min() < 0): error = "---> Negative lipid consentrations found " LOGGER.error(error) raise ValueError(error) if (asym > 500 or asym < -500): error = "---> Bilayer asymmetry is all to high asym = " + str(asym) LOGGER.error(error) raise ValueError(error) # @WARNING this should not be here - remove after C3 - a temp fix to not build patches with more than 4 protein - used in last part of C3 if (len(p.protein_ids) > 4): error = "---> To many protein beads in patch - stop build. Current p.protein_ids count = " + str(len(p.protein_ids)) LOGGER.error(error) raise ValueError(error) # @TODO change this to based on 64 lipids per subpatch (to explore rounding) # @TODO depdning on variations in cons from PF maybe to "proper" pre rounding here ??? #old_lOuter = np.rint(64 * lOuter / sumOuter).astype(int) #old_lInner = np.rint(64 * lInner / sumInner).astype(int) # Compute a probablistic construction of the particles in a cell. # We use a cumlative distribution function to # TODO: We should really assert or something here to verify that the # inner and outer leaflets are the same size. # Iterate through each grid point and convert to the number of particles # based on the probability. for i in range(0, lInner.shape[0]): for j in range(0, lInner.shape[1]): lInner[i, j] = generate_probabilistic_patch(lInner[i, j], 64) lInner = lInner.astype(int) for i in range(0, lOuter.shape[0]): for j in range(0, lOuter.shape[1]): lOuter[i, j] = generate_probabilistic_patch(lOuter[i, j], 64) lOuter = lOuter.astype(int) # @TODO Use this to explor lipid consetnrations - excel magic ( a 0 values will be left out of the list but 1/1000 is ok) # not needed for production and makes other consentrations a little strange # lOuter[lOuter == 0] = 1 # lInner[lInner == 0] = 1 # For testing of lipid consentrations # @TODO add this to a extra debug / analysis flag #saveCons = np.zeros(lconcentrations.shape) #saveCons[:, :, 0:8] = lInner #saveCons[:, :, 8:14] = lOuter #np.save("concentrations_macro_int_"+simnum+".npy", saveCons) # Convet into the right shape (adding empty lipids as 0 and chansing 5x5 to 25) # lipidUpperRatioArray = [[243, 0, 121, 20, 61, 242, 0, 0, 313]] # lipidLowerRatioArray = [[0, 139, 75, 54, 161, 108, 161, 22, 280]] lipidUpperRatioArray = np.insert(np.insert(np.insert(lOuter, 1, 0, axis=2), 6, 0, axis=2), 7, 0, axis=2) lipidLowerRatioArray = np.insert(lInner, 0, 0, axis=2) lipidUpperRatioArray = np.reshape(lipidUpperRatioArray, (lconcentrations.shape[0]*lconcentrations.shape[1], len(lipidTypes))) lipidLowerRatioArray = np.reshape(lipidLowerRatioArray, (lconcentrations.shape[0]*lconcentrations.shape[1], len(lipidTypes))) # Save full lipid type asignmet patch for insane to read lipid_types_inner = ["POPC", "PAPC", "POPE", "DIPE", "DPSM", "PAPS", "PAP6", "CHOL"] lipid_types_outer = ["POPX", "PAPC", "POPE", "DIPE", "DPSM", "CHOL"] #p.concentrations.shape # (37, 37, 14) (for new patches) #lconcentrations_full = pdcglob.reinterp(37, 37, 14, p.concentrations, 40, 40) # To fit protein placement we need to use the transpose of x,y columns lconcentrations_full = pdcglob.reinterp(37, 37, 14, p.concentrations.transpose((1,0,2)), 40, 40) lInner = lconcentrations_full[:, :, 0:8] lOuter = lconcentrations_full[:, :, 8:14] sumOuter = np.sum(lOuter) / (lconcentrations_full.shape[0]*lconcentrations_full.shape[1]) sumInner = np.sum(lInner) / (lconcentrations_full.shape[0]*lconcentrations_full.shape[1]) asym_full = np.rint(1600 * (1 - (sumInner / sumOuter))).astype(int) LOGGER.info("Both asym(s) shoudl be the same asym {} and asym_full {}".format(asym, asym_full)) lipid_counts_inner = pdcglob.pdcglob(40, 40, 8, lInner) lipid_counts_outer = pdcglob.pdcglob(40, 40, 6, lOuter) lipid_counts_file = "lipid_counts.npz" np.savez_compressed(lipid_counts_file, lipid_counts_inner = lipid_counts_inner, lipid_counts_outer = lipid_counts_outer, asym = asym_full, lipid_types_inner = lipid_types_inner, lipid_types_outer = lipid_types_outer) LOGGER.info("Full patch lipid type assignment saved to {}, asym {}, shape".format(lipid_counts_file, asym_full, lconcentrations_full.shape)) # Convert protein bead list to protein "unit" list - where each unit is inserted a one piece later in the pipeline # For each protein unit - get inital structure, insertion properties (location, angle etc), and charge LOGGER.info("Protein bead ids = {}".format(p.protein_ids)) LOGGER.info("Protein bead states = {}".format(p.protein_states)) LOGGER.info("Protein bead pos. = {}".format(p.protein_positions)) LOGGER.info("Protein types = {}".format(p.type_c3())) LOGGER.info("Protein complex ids = {}".format(p.complex_ids)) #LOGGER.info("Protein comple dis. = {}".format(p.complex_dists)) # removed from storage! # Initial shift not needed anymore as first beads is at center pixel (not 0,0 but within 1/2 pixel of that) # posArray = p.protein_positions - p.protein_positions[0] # Center at 0,0 # Shift from box center at 0,0 to box lower/left edge at 0,0 as used in the CG simulations posArray = [] if len(p.protein_positions) > 0: box_dim = 31.0 # @WARNING hared coded, also in particesim.py half_box_dim = box_dim / 2.0 # Get box center to move protein 0,0 to box edge posArray = p.protein_positions # First bead in the patche should be RAS and now centerd (close to 0,0) posArray = posArray + [half_box_dim, half_box_dim] if (posArray.max() > 31 or posArray.min() < 0): error = "---> Protein posisions are outside the CG box size; pos are: " + np.array2string(krasPosArray) LOGGER.error(error) raise ValueError(error) LOGGER.info("Protein bead pos ce = {}".format(posArray)) # Change from RAS4B to RAS4A for C4 hearling run (code below in if for both RAS-only and RAS-RAF) # If False this will be a normal 4B setup if True change to 4A c4_healing_convert_to_4A = True # @TODO fix this? this is an evil hack to get the protein unites form current complexes - Harsh make RAS also complex? # works as we only have x2 types of proteins proteinUnits = [] proteinUnitCount = len(p.protein_ids) - len(p.complex_ids) # Currently each complex has x2 beads LOGGER.info("Protein unit count = {}".format(proteinUnitCount)) remainig_ids = p.protein_ids for i in range(proteinUnitCount): cProteinDict = {} cProteinDict["id"] = i # check if current bead belongs to a complex if (len(p.complex_ids) > 0 and any(remainig_ids[0] in string for string in p.complex_ids)): cComplex = [] if c4_healing_convert_to_4A: # Convert 4B to 4A for C4 hearling runs # Now we are adding a RAS4A-RAF cProteinDict["type"] = "RAS4A-RAF" for x in p.complex_ids: if remainig_ids[0] in x: cComplex = x break ras_bead_id = cComplex[0] raf_bead_id = cComplex[1] # Flip beads if RAS/RAF bead order was reverse if "RAF" in p.protein_states[np.where(p.protein_ids == ras_bead_id)][0]: ras_bead_id = cComplex[1] raf_bead_id = cComplex[0] cProteinDict["beads"] = cComplex ras_state = p.protein_states[np.where(p.protein_ids == ras_bead_id)][0] cProteinDict["state"] = ras_state[0] + ras_state[-1] cPosRAS = posArray[np.where(p.protein_ids == ras_bead_id)][0] cPosRAF = posArray[np.where(p.protein_ids == raf_bead_id)][0] deltaPost = cPosRAF - cPosRAS rafAngle = math.degrees(math.atan2(deltaPost[1], deltaPost[0])) % 360 # atan2 returns in radians -pi to pi and takes in y,x so flip, convert and set to 0-360 cProteinDict["angle"] = rafAngle LOGGER.info("RAF angle RAS.pos {} RAF.pos {}, delta.pos {}, angle {}".format(cPosRAS, cPosRAF, deltaPost, rafAngle)) # RAS-RAF is places usign RAS farnesyl x,y pos cProteinDict["position"] = cPosRAS assert cProteinDict["position"].shape[0] == 2 cProteinDict["structure"] = "N/A" #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_RAF_shift_updateitp.gro" #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_RAF_shift.gro" # @TODO change to N/A and - after we fix library for 4A - cProteinDict["structure"] = "N/A" cProteinDict["charge"] = -1 LOGGER.info("Protein unit {} converting 4B to 4A - adding RAS4A-RAF complex for bead ids {} full info: {}".format(i, cComplex, str(cProteinDict))) else: # Now we are adding a RAS-RAF cProteinDict["type"] = "RAS-RAF" for x in p.complex_ids: if remainig_ids[0] in x: cComplex = x break ras_bead_id = cComplex[0] raf_bead_id = cComplex[1] # Flip beads if RAS/RAF bead order was reverse if "RAF" in p.protein_states[np.where(p.protein_ids == ras_bead_id)][0]: ras_bead_id = cComplex[1] raf_bead_id = cComplex[0] cProteinDict["beads"] = cComplex ras_state = p.protein_states[np.where(p.protein_ids == ras_bead_id)][0] cProteinDict["state"] = ras_state[0] + ras_state[-1] # @TODO get the angle orientation of RAS-RAF cPosRAS = posArray[np.where(p.protein_ids == ras_bead_id)][0] cPosRAF = posArray[np.where(p.protein_ids == raf_bead_id)][0] deltaPost = cPosRAF - cPosRAS rafAngle = math.degrees(math.atan2(deltaPost[1], deltaPost[0])) % 360 # atan2 returns in radians -pi to pi and takes in y,x so flip, convert and set to 0-360 cProteinDict["angle"] = rafAngle LOGGER.info("RAF angle RAS.pos {} RAF.pos {}, delta.pos {}, angle {}".format(cPosRAS, cPosRAF, deltaPost, rafAngle)) # RAS-RAF is places usign RAS farnesyl x,y pos cProteinDict["position"] = cPosRAS assert cProteinDict["position"].shape[0] == 2 # Now structure is assigned in placeprotein ''' # Numer of diffrent configurations saved for each RAS-RAF state # @WARNING hardcoded _0, _1, ... _librarySize-1 librarySize = 5000 cInt = randint(0, librarySize - 1) cStructureName = "{}/{}".format(Naming.dir_res('structures'), Naming.protein_structure(cProteinDict["type"], cProteinDict["state"], cInt)) LOGGER.info("Get {} structure: state {}, randint {}, filename {}".format(cProteinDict["type"], cProteinDict["state"], cInt, cStructureName)) if not os.path.isfile(cStructureName): error = "---> Protein structure file {} not found.".format(cStructureName) LOGGER.error(error) raise ValueError(error) cProteinDict["structure"] = cStructureName ''' cProteinDict["structure"] = "N/A" cProteinDict["charge"] = -1 LOGGER.info("Protein unit {} adding RAS-RAF complex for bead ids {} full info: {}".format(i, cComplex, str(cProteinDict))) # Remove both complex beads from list remainig_ids = remainig_ids[~np.isin(remainig_ids, cComplex)] else: if c4_healing_convert_to_4A: # Convert 4B to 4A for C4 hearling runs # Now we are adding a RAS4A-only cProteinDict["type"] = "RAS4A-ONLY" cProteinDict["beads"] = [remainig_ids[0]] cProteinDict["state"] = p.protein_states[np.where(p.protein_ids == remainig_ids[0])][0][3:] cProteinDict["position"] = posArray[np.where(p.protein_ids == remainig_ids[0])][0] assert cProteinDict["position"].shape[0] == 2 #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_shift.gro" #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_shift_updateitp.gro" cProteinDict["structure"] = "N/A" # @TODO change to N/A and - after we fix library for 4A - cProteinDict["structure"] = "N/A" cProteinDict["charge"] = -2 LOGGER.info("Protein unit {} converting 4B to 4A - adding RAS4A-ONLY for bead id {} full info: {}".format(i, remainig_ids[0], str(cProteinDict))) else: # Now we are adding a RAS-only cProteinDict["type"] = "RAS-ONLY" cProteinDict["beads"] = [remainig_ids[0]] cProteinDict["state"] = p.protein_states[np.where(p.protein_ids == remainig_ids[0])][0][3:] cProteinDict["position"] = posArray[np.where(p.protein_ids == remainig_ids[0])][0] assert cProteinDict["position"].shape[0] == 2 # Now structure is assigned in placeprotein ''' # Numer of diffrent configurations saved for each RAS-ONLY state # @WARNING hardcoded _0, _1, ... _librarySize-1 librarySize = 5000 cInt = randint(0, librarySize - 1) cStructureName = "{}/{}".format(Naming.dir_res('structures'), Naming.protein_structure(cProteinDict["type"], cProteinDict["state"], cInt)) LOGGER.info("Get {} structure: state {}, randint {}, filename {}".format(cProteinDict["type"], cProteinDict["state"], cInt, cStructureName)) if not os.path.isfile(cStructureName): error = "---> Protein structure file {} not found.".format(cStructureName) LOGGER.error(error) raise ValueError(error) cProteinDict["structure"] = cStructureName ''' cProteinDict["structure"] = "N/A" ''' @TODO, this # Temp add for RAS-ONLY seperate build using old patches cProteinDict["beads"] = 1 cProteinDict["state"] = "n/a" cProteinDict["position"] = posArray[0] assert cProteinDict["position"].shape[0] == 2 #/p/gpfs1/helgi/init_structures_ras/chris_fixed_10/sr2_pfpatch_000000343624_1000ns.gro cStructureName = "/p/gpfs1/helgi/init_structures_ras/chris_fixed_10/{}".format(simnum) LOGGER.info("Get {} structure: state {}, filename {}".format(cProteinDict["type"], cProteinDict["state"], cStructureName)) if not os.path.isfile(cStructureName): error = "---> Protein structure file {} not found.".format(cStructureName) LOGGER.error(error) raise ValueError(error) cProteinDict["structure"] = cStructureName # Temp end add for RAS-ONLY seperate build using old patches ''' cProteinDict["charge"] = -2 LOGGER.info("Protein unit {} adding RAS-ONLY for bead id {} full info: {}".format(i, remainig_ids[0], str(cProteinDict))) # remove current bead from list remainig_ids = remainig_ids[~np.isin(remainig_ids, remainig_ids[0])] #LOGGER.debug("Ids {}".format(remainig_ids)) proteinUnits.append(cProteinDict) # Print CG patch input LOGGER.info("CG_patch input:") LOGGER.info("macro patch summ_outer = {}".format(sumOuter)) LOGGER.info("macro patch summ_inner = {}".format(sumInner)) LOGGER.info("patch asym = {}".format(asym)) LOGGER.info("Lipids outer cons. = {}".format(str(lipidUpperRatioArray))) LOGGER.info("Lipids inner cons. = {}".format(str(lipidLowerRatioArray))) LOGGER.info("Protein units all = {}".format(proteinUnits)) ''' @TODO fix this and make it general for RAS and RAF # Select KRAS structures acording to states - pfPatch state to saved files + random value (e.g. 1-1000) # "KRAS-s"+str(cS)+"-r"+str(randint(1, librarySize))+".gro" #librarySize = 1000 # numer of diffrent configurations saved for each state #krasStructs = [] for i in range(len(p.rasStates)): cInt = randint(1, librarySize) cState = int(p.rasStates[i]) if (cState != 1 and cState != 2): error = "---> KRAS state {} is not supported: ".format(cState) LOGGER.error(error) raise ValueError(error) krasStruct = "{}/random_state{}.{}.KRAS.pbc.mol.gro".format(particlesim.dirKRASStructures, cState, cInt) LOGGER.info("Get KRAS structure file: KRAS # {}, in state {} and rand {}, name {}".format(i, cState, cInt, krasStruct)) krasStructs.append(krasStruct) # @TODO remove RAS-RAF temp test - NOW this is RAS only campain 1* structs krasStructs = [] #krasStruct = particlesim.dirKRASStructures + "/KRAS-04-protein-cg-M22-em-shift.gro" #krasStruct = "/g/g90/helgi/mummi_resources/kras-structures/RAS-RAF-01-fix-shift.gro" structNumber = simnum.split("_")[2] # get XXX number only RAS_RAF_XXX #structPath = "/p/gpfs1/helgi/init_structures_ras_craf/crafForHelgi_2020June20/sel_1000_weight_random" structPath = "/p/gpfs1/helgi/init_structures_ras/converterd" fileMatch = fnmatch.filter(os.listdir(structPath), '{}_*.gro'.format(structNumber)) krasStruct = "{}/{}".format(structPath, fileMatch[0]) for i in range(len(p.protein_states)): krasStructs.append(krasStruct) #print("s num {} file {}".format(structNumber, krasStruct)) ## @TODO remove this - it'shere Make this a none RAS patch: #krasStructs = [] #krasPosArray = [] #exit() ''' # Add simnum after name, if provided (shoud be empty in MuMMI runs) simName=p.id if len(simnum) > 0: simName += "_" + simnum patchSim = particlesim.ParticleSim(lipidTypes, lipidUpperRatioArray, lipidLowerRatioArray, [lconcentrations.shape[0], lconcentrations.shape[1]], proteinUnits, asym=-asym, simName=simName, lipidFullAsignment=True) return patchSim
91a455104f2319d041a6ccafa3b73c65c1b7a491
24,444
def find_error_detect(image_path): """ 给一张图片,判断是否检查正确,错误则保存下来。 :param image_path: :return: """ save_path = './ctpn_detect_error.txt' image = cv2.imread(image_path) # 传输给服务器的数据 data = {'fname': image_path, 'img_str': _img_to_str_base64(image)} # test by EAST mode res_east_detect = c_det_east.detect(data, 0.8, False) # Debug mode, test image is need labeled? bboxdict_east = res_east_detect['data'] boxes_east = list() for idx, inst in enumerate(bboxdict_east): x0, y0, x1, y1 = int(inst['x0']), int(inst['y0']), int(inst['x2']), int(inst['y2']) boxes_east.append([x0, y0, x1, y1]) # test by CTPN mode res_frcnn_detect = c_det_frcnn.detect(data) # bboxdict_frcnn = res_frcnn_detect['data']['bbox_list'] boxes_ctpn = [_['bbox'] for _ in bboxdict_frcnn] # (n[4]) list_rec_ctpn = [rectangle(_[0], _[1], _[2], _[3]) for _ in boxes_ctpn] list_rec_east = [rectangle(_[0], _[1], _[2], _[3]) for _ in boxes_east] list_rec_onlyeast = list() for east_idx, one_east_rec in enumerate(list_rec_east): flag = 0 for ctpn_idx, one_ctpn_rec in enumerate(list_rec_ctpn): overlap_area, iou_width = iou_area(one_east_rec, one_ctpn_rec) if overlap_area*1./one_east_rec.area > 0.8 or overlap_area*1./one_ctpn_rec.area > 0.8: # 重叠面积到达两者其中一个的0.8以上,认为这两个框重合 flag = 1 break else: continue # 找晚所有的ctpn,都没很重合的,则这个east框可能是比ctpn多的 if not flag: list_rec_onlyeast.append(one_east_rec) detect_right = 1 # 得到了east比ctpn多的框,判断这个框是否处于最上面被ctpn滤掉的框,如果不是,说明ctpn检测错误 for east_rec in list_rec_onlyeast: if east_rec.y0 <= 2 or east_rec.y0 > image.shape[0]-60: continue else: detect_right = 0 break if not detect_right: # 如果检查错误,则保存图片路径,返回错误 with open(save_path, 'a') as f: f.write(image_path) return detect_right
e76d6f2b21d0b735a4e811d5aacfde0e273075d2
24,445
def appendItem(): """Includes product into invoice and redirect""" app.logger.debug('This is appendItem to PO process') if request.method == 'POST': (prod_properties, check_up) = checkProduct(request.form) if check_up: appendProduct(prod_properties, session['userID']) session['active_tab'] = 'p_agg' return redirect(url_for('index'))
3f5b77b8817f5f5d068e86bcd1b8a5137aab9113
24,446
import io import gzip def gzip_bytes(bytes_obj): """byte: Compress a string as gzip in memory. """ if isinstance(bytes_obj, (str,)): bytes_obj = bytes_obj.encode() out_ = io.BytesIO() with gzip.GzipFile(fileobj=out_, mode='w') as fo: fo.write(bytes_obj) return out_
68d0a6b3c64b8633a3084114f617ccd792a688f9
24,447
def ones_like(other_ary): """ Create a PitchArray with all entry equal 1, whose shape and dtype is the same as other_ary """ result = PitchArray(other_ary.shape, other_ary.dtype) result.fill(1) return result
7bbdbdaa409de3986db66c98eedc3670d2483b2b
24,448
def inference_multiview(views, n_classes, keep_prob): """ views: N x V x W x H x C tensor """ n_views = views.get_shape().as_list()[1] # transpose views : (NxVxWxHxC) -> (VxNxWxHxC) views = tf.transpose(views, perm=[1, 0, 2, 3, 4]) view_pool = [] for i in xrange(n_views): # set reuse True for i > 0, for weight-sharing reuse = (i != 0) view = tf.gather(views, i) # NxWxHxC conv1 = _conv('conv1', view, [11, 11, 3, 96], [1, 4, 4, 1], 'VALID', reuse=reuse) lrn1 = None pool1 = _maxpool('pool1', conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv2 = _conv('conv2', pool1, [5, 5, 96, 256], group=2, reuse=reuse) lrn2 = None pool2 = _maxpool('pool2', conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv3 = _conv('conv3', pool2, [3, 3, 256, 384], reuse=reuse) conv4 = _conv('conv4', conv3, [3, 3, 384, 384], group=2, reuse=reuse) conv5 = _conv('conv5', conv4, [3, 3, 384, 256], group=2, reuse=reuse) pool5 = _maxpool('pool5', conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') dim = np.prod(pool5.get_shape().as_list()[1:]) reshape = tf.reshape(pool5, [-1, dim]) view_pool.append(reshape) pool5_vp = _view_pool(view_pool, 'pool5_vp') print('pool5_vp', pool5_vp.get_shape().as_list()) fc6 = _fc('fc6', pool5_vp, 4096, dropout=keep_prob) fc7 = _fc('fc7', fc6, 4096, dropout=keep_prob) fc8 = _fc('fc8', fc7, n_classes) return fc8
b9fd30db4d130aad29333d80a24c9cac6a6ce580
24,449
def trueReturn(data, msg): """ 操作成功结果 """ result = { "status": True, "data": data, "msg": msg } return JSONResponse(content=result)
7eabfe62bb0cf11b92d146cae3171fe391c27d5f
24,450
from pathlib import Path import re def parse_slurm_times(job_id: str, path: Path = Path.cwd()) -> float: """Performs the parsing of the file slurm-{job_id}.out by returning in milliseconds the time measured by Slurm. Args: out_file (str): The job slurm output file path to parse. path (Path): The path where to look for the slurm output. Defaults to the current working directory. Returns: float: The time elapsed by the application, in milliseconds. """ real_time = None out_file = path / f"slurm-{job_id}.out" try: with open(out_file, "r") as file: lines = file.readlines() for line in lines: if line.startswith("real"): time = re.split("\t", line)[-1].strip() real_time = parse_milliseconds(time) if real_time: return real_time raise ValueError( "Could not parse time of slurm output," f"content set to {real_time} !" ) except FileNotFoundError: raise FileNotFoundError("Slurm output was not generated.")
22cc642aa711ab302772273d3d05f7d5615e21d1
24,451
import torch def bprl(positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor: """ Bayesian Personalized Ranking Loss https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf """ dist = positive - negative return -F.logsigmoid(dist).mean()
0fb13f41c27880e821548298a369091f0b96c0c1
24,452
def select2_js_url(): """ Return the full url to the Select2 JavaScript library Default: ``None`` # Example {% select2_js_url %} """ return sl2.select2_js_url()
6866c7ad1a00e8d23c15f94fd9169412213aa4f0
24,453
def _SharedSuffix(pattern1, pattern2): """Returns the shared suffix of two patterns.""" return _SharedPrefix(pattern1[::-1], pattern2[::-1])[::-1]
c48792aaaf3e470571cbf4d16f6af0b00a671c3f
24,454
import credstash import argparse import getpass def run(args): """Handle credstash script.""" parser = argparse.ArgumentParser( description=("Modify Home Assistant secrets in credstash." "Use the secrets in configuration files with: " "!secret <name>")) parser.add_argument( '--script', choices=['credstash']) parser.add_argument( 'action', choices=['get', 'put', 'del', 'list'], help="Get, put or delete a secret, or list all available secrets") parser.add_argument( 'name', help="Name of the secret", nargs='?', default=None) parser.add_argument( 'value', help="The value to save when putting a secret", nargs='?', default=None) # pylint: disable=import-error, no-member args = parser.parse_args(args) table = _SECRET_NAMESPACE try: credstash.listSecrets(table=table) except Exception: # pylint: disable=broad-except credstash.createDdbTable(table=table) if args.action == 'list': secrets = [i['name'] for i in credstash.listSecrets(table=table)] deduped_secrets = sorted(set(secrets)) print('Saved secrets:') for secret in deduped_secrets: print(secret) return 0 if args.name is None: parser.print_help() return 1 if args.action == 'put': if args.value: the_secret = args.value else: the_secret = getpass.getpass('Please enter the secret for {}: ' .format(args.name)) current_version = credstash.getHighestVersion(args.name, table=table) credstash.putSecret(args.name, the_secret, version=int(current_version) + 1, table=table) print('Secret {} put successfully'.format(args.name)) elif args.action == 'get': the_secret = credstash.getSecret(args.name, table=table) if the_secret is None: print('Secret {} not found'.format(args.name)) else: print('Secret {}={}'.format(args.name, the_secret)) elif args.action == 'del': credstash.deleteSecrets(args.name, table=table) print('Deleted secret {}'.format(args.name))
832abd0f137e31f2045883edc32ef2a13d408589
24,455
import os import mimetypes def handle_request_files_upload(request): """ Handle request.FILES if len(request.FILES) == 1. Returns tuple(upload, filename, is_raw, mime_type) where upload is file itself. """ # FILES is a dictionary in Django but Ajax Upload gives the uploaded file # an ID based on a random number, so it cannot be guessed here in the code. # Rather than editing Ajax Upload to pass the ID in the querystring, # note that each upload is a separate request so FILES should only # have one entry. # Thus, we can just grab the first (and only) value in the dict. is_raw = False upload = list(request.FILES.values())[0] filename = upload.name _, iext = os.path.splitext(filename) mime_type = upload.content_type.lower() if iext not in mimetypes.guess_all_extensions(mime_type): msg = "MIME-Type '{mimetype}' does not correspond to file extension of {filename}." raise UploadException(msg.format(mimetype=mime_type, filename=filename)) return upload, filename, is_raw, mime_type
437be6e8bf6881224900034e51df04dc754fe716
24,456
def vgg16(num_class): """VGG 16-layer model (configuration "D") with batch normalization """ model = VGG(make_layers(cfg['D'], batch_norm=True), num_classes=num_class) return model
abdb0a48bd5190cd7c7e50193f3d950af5195770
24,457
def IFS(*args) -> Function: """ Evaluates multiple conditions and returns a value that corresponds to the first true condition. Learn more: https//support.google.com/docs/answer/7014145 """ return Function("IFS", args)
395c67b524b4cccbeabba73666bc1a8f78668ff2
24,458
def idwt_joined_(w, rec_lo, rec_hi, mode): """Computes single level discrete wavelet reconstruction """ n = len(w) m = n // 2 ca = w[:m] cd = w[m:] x = idwt_(ca, cd, rec_lo, rec_hi, mode) return x
4b7371a36abc4bd094a3cd86faa1005ff5d6fd69
24,459
def _get_pattern_nts(rule): """ Return a list of NT names present in given rule. """ nt_names = [] for bt in rule.ipattern.bits: if bt.is_nonterminal(): nt_name = bt.nonterminal_name() nt_names.append(nt_name) return nt_names
e690e9187aaff0cf3138444db085e15adfda3847
24,460
def stopping_player(bot, state): """ A Player that just stands still. """ return bot.position
72628e39d26760eedc9a0e85a8279ac530ab851d
24,461
def check_continue(transformer: transformer_class.Transformer, check_md: dict, transformer_md: dict, full_md: dict) -> tuple: """Checks if conditions are right for continuing processing Arguments: transformer: instance of transformer class Return: Returns a tuple containining the return code for continuing or not, and an error message if there's an error """ have_file = False for one_file in check_md['list_files'](): if one_file.endswith('.bin'): have_file = True break return (0) if have_file else (-1, "Missing raw image bin file from list of files")
78348046acde489a129fc8a4426a9b11ee2e2238
24,462
def getFlatten(listToFlat): """ :param listToFlat: anything ,preferably list of strings :return: flatten list (list of strings) #sacred """ preSelect=mc.ls(sl=True,fl=True) mc.select(cl=1) mc.select(listToFlat) flatten = mc.ls(sl=True, fl=True) mc.select(preSelect) return flatten
91d1376d81140fd258c80bcc23cb220ce0f99926
24,463
def can_exit_room(state: State, slot: int) -> bool: """ Return True if amphipod can escape a room because all amphipods are in their place Not exhaustive! If there are amphipods above it, it may still be stuck """ amphipod = state[slot] assert amphipod != EMPTY_SLOT room = slot // 4 bottom_amphipod = state[room * 4 + 3] if bottom_amphipod != room: return True assert bottom_amphipod != EMPTY_SLOT for i in range(room * 4 + 2, room * 4 - 1, -1): if state[i] == EMPTY_SLOT: return False if state[i] != bottom_amphipod: return True return False
914881e90c2e9b357d49fb44d56b7f864b4973c0
24,464
def square_matrix(square): """ This function will calculate the value x (i.e blurred pixel value) for each 3*3 blur image. """ tot_sum = 0 # Calculate sum of all teh pixels in a 3*3 matrix for i in range(3): for j in range(3): tot_sum += square[i][j] return tot_sum//9
4f378736c19c33f104be462939b834ece403f713
24,465
def before_after_text(join_set, index, interval_list): """ Extracts any preceeding or following markup to be joined to an interval's text. """ before_text, after_text = '', '' # Checking if we have some preceeding or following markup to join with. if join_set: if index > 0: before_text = ''.join(character for character in interval_list[index - 1][2] if character in join_set) if index < len(interval_list) - 1: after_text = ''.join(character for character in interval_list[index + 1][2] if character in join_set) return before_text, after_text
b2c63fe1e7ea5bb204e41b27bc79d2c81964369a
24,466
import os def load_data(outputpath): """Load the numpy data as stored in directory outputpath. Parameters ---------- outputpath : str directory where the numpy files are stored Returns ------- x_train y_train_binary x_val y_val_binary x_test y_test_binary """ ext = '.npy' X_train = np.load(os.path.join(outputpath, 'X_train' + ext)) y_train = np.load(os.path.join(outputpath, 'y_train' + ext)) X_val = np.load(os.path.join(outputpath, 'X_val' + ext)) y_val = np.load(os.path.join(outputpath, 'y_val' + ext)) X_test = np.load(os.path.join(outputpath, 'X_test' + ext)) y_test = np.load(os.path.join(outputpath, 'y_test' + ext)) return X_train, y_train, X_val, y_val, X_test, y_test
46f355fdafcc73b371514b3dcb5c32b6935f26a0
24,467
import socket import ssl def create_server_ssl(addr, port, backlog): """ """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((addr, port)) server.listen(backlog) context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_default_certs() wrap = context.wrap_socket(server, do_handshake_on_connect=False, server_side=True) sserver = SuperSocketSSL(wrap) ServerSSL(sserver) sserver.add_map(SSL_ACCEPT, handles_wrapper) return sserver
3bcd3d8a157401f23c50e6c35fe9b8e45f4659d6
24,468
def other_ops(request): """ Other Operations View """ args = { 'pending': OtherOperation.objects.filter(status=0).count(), 'active': OtherOperation.objects.filter(status=1).count(), 'done': OtherOperation.objects.filter(status=2).count(), 'cancelled': OtherOperation.objects.filter(status=3).count(), 'passed': OtherOperation.objects.filter(status=4).count(), 'failed': OtherOperation.objects.filter(status=5).count(), } args['other_ops'] = OtherOperation.objects.all() args['a'] = 'other-ops' return render(request, 'operations/other-ops.html', args)
727e620d0ba5798eb0bcdc31e31a831a9332e802
24,469
def distance_point_2_line(point, seg): """Finds the minimum distance and closest point between a point and a line Args: point ([float, float]): (x,y) point to test seg ([[float, float], [float, float]]): two points defining the line Returns: A list of two items: * Distance between the point and line * The (x,y) value on the line that is the closest point """ dseg = seg[1] - seg[0] dpt = point - seg[0] proj = (np.dot(dpt, dseg) / np.dot(dseg, dseg)) * dseg dist = np.linalg.norm(dpt, proj) return dist, seg[0] + proj
4627639f4b900b72a0b88104df44e498ef123cb4
24,470
def load_glove_from_file(glove_filepath): """ Load the GloVe embeddings Args: glove_filepath (str): path to the glove embeddings file Returns: word_to_index (dict), embeddings (numpy.ndarary) """ word_to_index = {} embeddings = [] with open(glove_filepath, "r") as fp: for index, line in enumerate(fp): line = line.split(" ") # each line: word num1 num2 ... word_to_index[line[0]] = index # word = line[0] embedding_i = np.array([float(val) for val in line[1:]]) embeddings.append(embedding_i) return word_to_index, np.stack(embeddings)
30d8a0fb8e1b0728ae9943dd0f5c2387dbcdb778
24,471
def make_pd(space: gym.Space): """Create `ProbabilityDistribution` from gym.Space""" if isinstance(space, gym.spaces.Discrete): return CategoricalPd(space.n) elif isinstance(space, gym.spaces.Box): assert len(space.shape) == 1 return DiagGaussianPd(space.shape[0]) elif isinstance(space, gym.spaces.MultiBinary): return BernoulliPd(space.n) else: raise TypeError(space)
0849e947061221ba08bf113f6576c531ca2df2cd
24,472
import typing import requests def download_file_from_google_drive( gdrive_file_id: typing.AnyStr, destination: typing.AnyStr, chunk_size: int = 32768 ) -> typing.AnyStr: """ Downloads a file from google drive, bypassing the confirmation prompt. Args: gdrive_file_id: ID string of the file to download from google drive. destination: where to save the file. chunk_size: chunk size for gradual downloads. Returns: The path to the downloaded file. """ # taken from this StackOverflow answer: https://stackoverflow.com/a/39225039 URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': gdrive_file_id}, stream=True) token = None for key, value in response.cookies.items(): if key.startswith('download_warning'): token = value if token: params = {'id': gdrive_file_id, 'confirm': token} response = session.get(URL, params=params, stream=True) with open(destination, "wb") as f: for chunk in response.iter_content(chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) return destination
29cdcc509aa21a6f2ae14ed18f2c0523bbdbd5a4
24,473
import inspect import functools def attach(func, params): """ Given a function and a namespace of possible parameters, bind any params matching the signature of the function to that function. """ sig = inspect.signature(func) params = Projection(sig.parameters.keys(), params) return functools.partial(func, **params)
35116b9b3be12f1e19789e2b1c36b7c34b6138ea
24,474
def question_route(): """ 題庫畫面 """ # 取得使用者物件 useruid = current_user.get_id() # 嘗試保持登入狀態 if not keep_active(useruid): logout_user() return question_page(useruid)
1b752709aa8264fdc19aaa44f2233b2e0382e1b5
24,475
import base64 def generate_qrcode(url: str, should_cache: bool = True) -> str: """ Generate a QR code (as data URI) to a given URL. :param url: the url the QR code should reference :param should_cache: whether or not the QR code should be cached :return: a data URI to a base64 encoded SVG image """ if should_cache and url in qrcode_cache: return qrcode_cache[url] image = qrcode.make(url, image_factory=qrcode.image.svg.SvgPathFillImage) image_stream = BytesIO() image.save(image_stream) image_stream.seek(0) qrcode_url = 'data:image/svg+xml;base64,' + base64.b64encode(image_stream.read()).decode('utf-8') if should_cache: qrcode_cache[url] = qrcode_url return qrcode_url
ab89cf09d7d50217960f48f75ff17b1d46513f52
24,476
def get_conflict_fks_versions(obj, version, revision, exclude=None): """ Lookup for deleted FKs for obj, expects version to be obj version from the same revision. If exclude provided - excludes based on that from versions to check. Expects exclude to be a dict of filter string, value i.e {'pk': 1}. Returns versions for deleted fks. """ # TODO: get all conflicts, return a tuple/dict with required and not. fk_relations = get_fk_models(obj) versions_to_check = [] for relation in fk_relations: found_versions = revision.version_set.exclude( pk=version.pk).filter(content_type=relation['content_type']) versions_to_check += list(found_versions.values_list('pk', flat=True)) # convert to versions queryset instead of a list versions_to_check_qs = revision.version_set.filter(pk__in=versions_to_check) if exclude is not None: versions_to_check_qs = versions_to_check_qs.exclude(**exclude) conflict_fks_versions = get_deleted_objects_versions( versions_to_check_qs) return conflict_fks_versions
2e4e3b8842b1c17cc6973143254f39ae4a42da68
24,477
def hz2mel(f): """Convert an array of frequency in Hz into mel.""" return 1127.01048 * np.log(f/700 +1)
84522419c972bf9b78c9931aef871f97a8a0d292
24,478
def figure(figsize=None, logo="iem", title=None, subtitle=None, **kwargs): """Return an opinionated matplotlib figure. Parameters: figsize (width, height): in inches for the figure, defaults to something good for twitter. dpi (int): dots per inch logo (str): Currently, 'iem', 'dep' is supported. `None` disables. title (str): Title to place on the figure. subtitle (str): SubTitle to place on the figure. """ if figsize is None: figsize = TWITTER_RESOLUTION_INCH fig = plt.figure(figsize=figsize, **kwargs) draw_logo(fig, logo) titlebounds = [0.1, 0.9, 0.91, 0.98] if subtitle is not None: titlebounds[2] = 0.94 fitbox(fig, title, *titlebounds) fitbox(fig, subtitle, 0.1, 0.9, 0.91, 0.935) return fig
fd89e550a891ccf6f639f8c981215aa25fa0ad06
24,479
def run_epoch(session, model, eval_op=None, verbose=False): """Runs the model on the given data.""" costs = 0.0 iters = 0 state = session.run(model.initial_state) fetches = { "cost": model.cost, "final_state": model.final_state, "accuracy":model.accuracy, "y_new":model.y_new, "y_target":model.y_target } accuracys = 0.0 if eval_op is not None: fetches["eval_op"] = eval_op output_y = [] for step in range(model.input.epoch_size): feed_dict = {} feed_dict[model.initial_state] = state vals = session.run(fetches, feed_dict) cost = vals["cost"] state = vals["final_state"] accuracy = vals["accuracy"] y_new = vals["y_new"] y_target = vals["y_target"] costs += cost accuracys += accuracy #iters += model.input.num_steps iters = iters + 1 for i in range(model.input.batch_size): if y_new[i,0] == 0: output_y.append(1) else: output_y.append(0) return costs, accuracys / iters, output_y
a69ed33e930245118e0d4054a10d6c1fd61cc0da
24,480
from typing import Any def is_scoo(x: Any) -> bool: """check if an object is an `SCoo` (a SAX sparse S-matrix representation in COO-format)""" return isinstance(x, (tuple, list)) and len(x) == 4
96d3937d9884198b75440e3de75949c713b8e16a
24,481
import base64 import os def createNonce(): """Creates a new nonce and stores it in the session.""" nonce = base64.b64encode(os.urandom(32)) flask_session['nonce'] = nonce return nonce
0a4135537d9bce3a35cb2ee681f16b2dffda2d13
24,482
def project_rename_folder(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /project-xxxx/renameFolder API method. For more info, see: https://documentation.dnanexus.com/developer/api/data-containers/folders-and-deletion#api-method-class-xxxx-renamefolder """ return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
60bfe648eb9846bf06125fd65436e9c7cf5c2fd6
24,483
def full(shape, fill_value, dtype=None): """Returns a new array of given shape and dtype, filled with a given value. This function currently does not support ``order`` option. Args: shape (tuple of ints): Dimensionalities of the array. fill_value: A scalar value to fill a new array. dtype: Data type specifier. Returns: cupy.ndarray: An array filled with ``fill_value``. .. seealso:: :func:`numpy.full` """ # TODO(beam2d): Support ordering option a = empty(shape, dtype) a.fill(fill_value) return a
99d1505382395c4990b35115edfaef267d00f3f9
24,484
def is_seq(a): """Return `True` if `a` is a Z3 sequence expression. >>> print (is_seq(Unit(IntVal(0)))) True >>> print (is_seq(StringVal("abc"))) True """ return isinstance(a, SeqRef)
1429fb3fd800a3688700a62dd0665df7536b56d9
24,485
from re import T def identity(__obj: T, /) -> T: """Identity function""" return __obj
8c96839e48e1ec270bd57616abcc3234b6f0958f
24,486
def lines_in_file(filename: str) -> int: """ Count the number of lines in a file :param filename: A string containing the relative or absolute path to a file :returns: The number of lines in the file """ with open(filename, "r") as f: return len(f.readlines())
d71b5c8de1b4eb9a45988e06c17a129f4a19f221
24,487
import argparse from sys import path def readargs(): """ Read input arguments if run as separate program Returns ------- None. """ parser = argparse.ArgumentParser(description=( 'Convert data from WiPL format to binary SimRadar-compatible format.' )) parser.add_argument('input', type=str, help='[REQUIRED] File to be converted') parser.add_argument('--outdir', '-o', type=str, help='Output directory for rcs files, default as location of input file', default='.') parser.add_argument('--debug', '-d', type=str, help=('[True/False] Flag for debug mode - writes file back out again in human readable' +' format after conversion to allow validation of converted data'), default='False') args = parser.parse_args() infile = args.input if not path.isfile(infile): raise ArgumentsError("Could not find file {}\n If file exists try again with absolute path rather than relative path") path_out = args.outdir if path_out == '.': inpath = path.split(infile)[0] if inpath != '': path_out = inpath if not path.exists(path_out): print('Directory to write rcs files to' + ' does not exist\nAttempting to create:') try: makedirs(path_out) except: raise FatalError('Could not create directory '+ path_out +'\n') else: print ("Success!\n") if path_out and not path.isdir(path_out): raise ArgumentsError(path_out + ' exists but is not a directory\n') debug_str = args.debug if debug_str.lower() == "true" or debug_str.lower() == "t": debug = True elif debug_str.lower() == "false" or debug_str.lower() == "f": debug = False else: raise ArgumentsError("Value for debug should be True/False. Value read was {}".format(debug_str)) return (infile, path_out, debug)
51cac24d106f8776e27aa78841870926b05c1ef7
24,488
import click def validate_input_parameters(live_parameters, original_parameters): """Return validated input parameters.""" parsed_input_parameters = dict(live_parameters) for parameter in parsed_input_parameters.keys(): if parameter not in original_parameters: click.echo( click.style('Given parameter - {0}, is not in ' 'reana.yaml'.format(parameter), fg='red'), err=True) del live_parameters[parameter] return live_parameters
226b95d0d9b42e586e395107def239d4e61c057a
24,489
def _upper_zero_group(match: ty.Match, /) -> str: """ Поднимает все символы в верхний регистр у captured-группы `let`. Используется для конвертации snake_case в camelCase. Arguments: match: Регекс-группа, полученная в результате `re.sub` Returns: Ту же букву из группы, но в верхнем регистре """ return match.group("let").upper()
311dbc41c17b1c6fde39b30d8126eb4c867d7a6f
24,490
def _concatenate_shapes(shapes, axis): """Given array shapes, return the resulting shape and slices prefixes. These help in nested concatenation. Returns ------- shape: tuple of int This tuple satisfies: ``` shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) shape == concatenate(arrs, axis).shape ``` slice_prefixes: tuple of (slice(start, end), ) For a list of arrays being concatenated, this returns the slice in the larger array at axis that needs to be sliced into. For example, the following holds: ``` ret = concatenate([a, b, c], axis) _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) ret[(slice(None),) * axis + sl_a] == a ret[(slice(None),) * axis + sl_b] == b ret[(slice(None),) * axis + sl_c] == c ``` These are called slice prefixes since they are used in the recursive blocking algorithm to compute the left-most slices during the recursion. Therefore, they must be prepended to rest of the slice that was computed deeper in the recursion. These are returned as tuples to ensure that they can quickly be added to existing slice tuple without creating a new tuple every time. """ # Cache a result that will be reused. shape_at_axis = [shape[axis] for shape in shapes] # Take a shape, any shape first_shape = shapes[0] first_shape_pre = first_shape[:axis] first_shape_post = first_shape[axis + 1 :] if any( shape[:axis] != first_shape_pre or shape[axis + 1 :] != first_shape_post for shape in shapes ): raise ValueError("Mismatched array shapes in block along axis {}.".format(axis)) shape = first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1 :] offsets_at_axis = _accumulate(shape_at_axis) slice_prefixes = [ (slice(start, end),) for start, end in zip([0] + offsets_at_axis, offsets_at_axis) ] return shape, slice_prefixes
2ca93f3c656f1629fa3fdb7f5c8cb325abd40cf2
24,491
import re def md_changes(seq, md_tag): """Recreates the reference sequence of a given alignment to the extent that the MD tag can represent. Note: Used in conjunction with `cigar_changes` to recreate the complete reference sequence Args: seq (str): aligned segment sequence md_tag (str): MD tag for associated sequence Returns: ref_seq (str): a version of the aligned segment's reference sequence given \ the changes reflected in the MD tag Raises: ValueError: if MD tag is None Example: >>> md_changes('CTTATATTGGCCTT', '3C4AT4') 'CTTCTATTATCCTT' """ if md_tag is None: raise ValueError('No MD tag found or given for sequence') ref_seq = '' last_md_pos = 0 for mo in re.finditer(r'(?P<matches>\d+)|(?P<del>\^\w+?(?=\d))|(?P<sub>\w)', md_tag): mo_group_dict = mo.groupdict() if mo_group_dict['matches'] is not None: matches = int(mo_group_dict['matches']) ref_seq += seq[last_md_pos:last_md_pos + matches] last_md_pos += matches elif mo_group_dict['del'] is not None: deletion = mo_group_dict['del'] ref_seq += deletion[1:] elif mo_group_dict['sub'] is not None: substitution = mo_group_dict['sub'] ref_seq += substitution last_md_pos += 1 else: pass return ref_seq
f8591d0084f6c10c9bbd1a39b3f9e13cfe952e68
24,492
def _get_partitions(dev): """Return partition information (num, size, type) for a device.""" dev_path = utils.make_dev_path(dev) out, _err = utils.execute('parted', '--script', '--machine', dev_path, 'unit s', 'print', run_as_root=True) lines = [line for line in out.split('\n') if line] partitions = [] LOG.debug("Partitions:") for line in lines[2:]: line = line.rstrip(';') num, start, end, size, fstype, name, flags = line.split(':') num = int(num) start = int(start.rstrip('s')) end = int(end.rstrip('s')) size = int(size.rstrip('s')) LOG.debug(" %(num)s: %(fstype)s %(size)d sectors", {'num': num, 'fstype': fstype, 'size': size}) partitions.append((num, start, size, fstype, name, flags)) return partitions
35f671609b7776166263163d712ecd85c9ceb7d2
24,493
def get_cs_token(accesskey="",secretkey="",identity_url="",tenant_id=""): """ Pass our accesskey and secretkey to keystone for tokenization. """ identity_request_json = json.dumps({ 'auth' : { 'apiAccessKeyCredentials' : { 'accessKey' : accesskey, 'secretKey' : secretkey }, "tenantId": tenant_id } }) identity_req = urllib2.Request(identity_url+"/tokens", identity_request_json, {'Content-type':'application/json'}) try: response = urllib2.urlopen(identity_req).read() except urllib2.HTTPError, e: log("HTTP Error: "+str(e)) return False response_json = json.loads(response) if response_json['access']['token']['tenant']['id'] == tenant_id: return response_json['access']['token']['id'] return False
a14324651039687bb52e47f4068fcee74c34aa65
24,494
def get_auto_scaling_group(asg, asg_name: str): """Get boto3 Auto Scaling Group by name or raise exception""" result = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) groups = result["AutoScalingGroups"] if not groups: raise Exception("Auto Scaling Group {} not found".format(asg_name)) return groups[0]
07176e538cdb265ae86b16a5d36bf1b274f45c19
24,495
def guiraud_r(txt_len: int, vocab_size: int) -> np.float64: """ The TTR formula underwent simple corrections: RTTR (root type-token ratio), Guiraud, 1960. """ return vocab_size / np.sqrt(txt_len)
9c054d6d741fabb64ec0659b280474385b5cfa79
24,496
def serialize_dagster_namedtuple(nt: tuple, **json_kwargs) -> str: """Serialize a whitelisted named tuple to a json encoded string""" check.tuple_param(nt, "nt") return _serialize_dagster_namedtuple(nt, whitelist_map=_WHITELIST_MAP, **json_kwargs)
fbe6606d0001d425593c0f4f880a6b314f69b94b
24,497
def join_epiweek(year, week): """ return an epiweek from the (year, week) pair """ return year * 100 + week
fdbc50f8a953ef7307e9558019b3c2b50bc65be4
24,498
def get_or_create_api_key(datastore: data_store.DataStore, project_id: str) -> str: """Return API key of existing project or create a new project and API key. If the project exists, return its API key, otherwise create a new project with the provided project ID and return its API key. Args: datastore: The datastore used for reading / writing the project. project_id: The ID of the project to get or write. Returns: The API key associated with the project. """ try: return datastore.read_by_proto_ids(project_id=project_id).api_key except data_store.NotFoundError: # Project not found, create it. api_key = unique_id.generate_base64_id() project = data_store_pb2.Project( project_id=project_id, name=project_id, api_key=api_key) datastore.write(project) return api_key
2cb5b04dcf44b0e39d171683a0bd184d582eaf34
24,499