content
stringlengths
22
815k
id
int64
0
4.91M
def render_heat_table(frame: pd.DataFrame) -> None: """ renders a dataframe as a heatmap :param frame: the frame to render :return: None """ cm = mpl.cm.get_cmap('RdYlGn') styled = frame.style.background_gradient(cmap=cm, axis=0).format('{:.2f}').set_properties( **{'text-align': 'center'}) render_table(styled)
35,800
def test_arguments(): """Test arguments passed to bermuda.""" args = get_arguments(['--config', 'path/to/config']) config_dir = args.config assert config_dir == 'path/to/config'
35,801
def reservation_rollback(context, reservations, project_id=None, user_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id)
35,802
def encrypt( security_control: SecurityControlField, system_title: bytes, invocation_counter: int, key: bytes, plain_text: bytes, auth_key: bytes, ) -> bytes: """ Encrypts bytes according the to security context. """ if not security_control.encrypted and not security_control.authenticated: raise NotImplementedError("encrypt() only handles authenticated encryption") if len(system_title) != 8: raise ValueError(f"System Title must be of lenght 8, not {len(system_title)}") # initialization vector is 12 bytes long and consists of the system_title (8 bytes) # and invocation_counter (4 bytes) iv = system_title + invocation_counter.to_bytes(4, "big") # Making sure the keys are of correct length for specified security suite validate_key(security_control.security_suite, key) validate_key(security_control.security_suite, auth_key) # Construct an AES-GCM Cipher object with the given key and iv. Allow for # truncating the auth tag encryptor = Cipher( algorithms.AES(key), modes.GCM(initialization_vector=iv, tag=None, min_tag_length=TAG_LENGTH), ).encryptor() # associated_data will be authenticated but not encrypted, # it must also be passed in on decryption. associated_data = security_control.to_bytes() + auth_key encryptor.authenticate_additional_data(associated_data) # Encrypt the plaintext and get the associated ciphertext. # GCM does not require padding. ciphertext = encryptor.update(plain_text) + encryptor.finalize() # dlms uses a tag lenght of 12 not the default of 16. Since we have set the minimum # tag length to 12 it is ok to truncated the tag down to 12 bytes. tag = encryptor.tag[:TAG_LENGTH] return ciphertext + tag
35,803
def leftmost_turn(((x0, y0), (x1, y1)), (x, y), zs): """Find the line segment intersecting at the leftmost angle relative to initial segment. Arguments: (x0, y0) – where we started (x1, x2) – direction travelling in (x, y) – where intersected one or more alternative line segments zs – set of points definign direction to move in """ if len(zs) == 1: return (x, y), zs.pop() theta = atan2(y1 - y, x1 - x) # Direction currently headed. def fun((xn, yn)): phi = atan2(yn - y, xn - x) - theta if phi < -pi: phi += 2 * pi elif phi > pi: phi -= 2 * pi # Tie-breaker is length of segment: len2 = (yn - y) * (yn - y) + (xn - x) * (xn - x) return phi, len2 zm = max(zs, key=fun) return (x, y), zm
35,804
def _merge_timeseries(rows, cols, params): """ Merge time series output """ log.info("Merging timeseries output") shape, tiles, ifgs_dict = _merge_setup(rows, cols, params) # load the first tsincr file to determine the number of time series tifs tsincr_file = join(params[cf.TMPDIR], 'tsincr_0.npy') tsincr = np.load(file=tsincr_file) # pylint: disable=no-member no_ts_tifs = tsincr.shape[2] # create 2 x no_ts_tifs as we are splitting tsincr and tscuml to all processes. process_tifs = mpiops.array_split(range(2 * no_ts_tifs)) # depending on nvelpar, this will not fit in memory # e.g. nvelpar=100, nrows=10000, ncols=10000, 32bit floats need 40GB memory # 32 * 100 * 10000 * 10000 / 8 bytes = 4e10 bytes = 40 GB # the double for loop helps us overcome the memory limit log.info('Process {} writing {} timeseries tifs of ' 'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2)) for i in process_tifs: if i < no_ts_tifs: tscum_g = assemble_tiles(shape, params[cf.TMPDIR], tiles, out_type='tscuml', index=i) _save_merged_files(ifgs_dict, params[cf.OUT_DIR], tscum_g, out_type='tscuml', index=i, savenpy=params["savenpy"]) else: i %= no_ts_tifs tsincr_g = assemble_tiles(shape, params[cf.TMPDIR], tiles, out_type='tsincr', index=i) _save_merged_files(ifgs_dict, params[cf.OUT_DIR], tsincr_g, out_type='tsincr', index=i, savenpy=params["savenpy"]) mpiops.comm.barrier() log.debug('Process {} finished writing {} timeseries tifs of ' 'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))
35,805
def maya_window(): """Get Maya MainWindow as Qt. Returns: QtWidgets.QWidget: Maya main window as QtObject """ return to_qwidget("MayaWindow")
35,806
def soap2Dict(soapObj): """A recursive version of sudsobject.asdict""" if isinstance(soapObj, sudsobject.Object): return {k: soap2Dict(v) for k, v in soapObj} elif isinstance(soapObj, list): return [soap2Dict(v) for v in soapObj] return soapObj
35,807
def generate_sequential_BAOAB_string(force_group_list, symmetric=True): """Generate BAOAB-like schemes that break up the "V R" step into multiple sequential updates E.g. force_group_list=(0,1,2), symmetric=True --> "V0 R V1 R V2 R O R V2 R V1 R V0" force_group_list=(0,1,2), symmetric=False --> "V0 R V1 R V2 R O V0 R V1 R V2 R" """ VR = [] for i in force_group_list: VR.append("V{}".format(i)) VR.append("R") if symmetric: return " ".join(VR + ["O"] + VR[::-1]) else: return " ".join(VR + ["O"] + VR)
35,808
def _get_active_tab(visible_tabs, request_path): """ return the tab that claims the longest matching url_prefix if one tab claims '/a/{domain}/data/' and another tab claims '/a/{domain}/data/edit/case_groups/' then the second tab wins because it's a longer match. """ matching_tabs = sorted( (url_prefix, tab) for tab in visible_tabs for url_prefix in tab.url_prefixes if request_path.startswith(url_prefix) ) if matching_tabs: _, tab = matching_tabs[-1] return tab
35,809
def path_normalize(path, target_os=None): """Normalize path (like os.path.normpath) for given os. >>> from piecutter.engines.jinja import path_normalize >>> path_normalize('foo/bar') 'foo/bar' >>> path_normalize('foo/toto/../bar') 'foo/bar' Currently, this is using os.path, i.e. the separator and rules for the computer running Jinja2 engine. A NotImplementedError exception will be raised if 'os' argument differs from 'os.name'. >>> import os >>> os.name == 'posix' # Sorry if you are running tests on another OS. True >>> path_normalize('foo/bar', target_os='nt') # Doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Cannot join path with "nt" style. Host OS is "posix". """ if target_os and target_os is not os.name: raise NotImplementedError('Cannot join path with "{target}" style. ' 'Host OS is "{host}".'.format( target=target_os, host=os.name)) return os.path.normpath(path)
35,810
def fit_scale_heights(data, masks, min_lat = None, max_lat = None, deredden = False, fig_names = None, return_smoothed = False, smoothed_width = None, xlim = None, ylim = None, robust = True, n_boot = 10000): """ Fits scale height data and returns slopes Parameters ---------- data: `skySurvey` WHAM skySurvey object of full sky (requires track keyword), or spiral arm section masks: `list like` longitude masks to use min_lat: `u.Quantity` min latitude to fit max_lat: `u.Quantity` max latitude to fit deredden: `bool` if True, also fits dereddened slopes fig_names: `str` if provided, saves figures following this name return_smoothed: `bool` if True, returns smoothed longitude and slope estimates smoothed_width: `u.Quantity` width to smooth data to in longitude robust: `bool` if True, uses stats.models.robust_linear_model n_boot: `int` only if robust = True number of bootstrap resamples """ # Default values if min_lat is None: min_lat = 5*u.deg elif not hasattr(min_lat, "unit"): min_lat *= u.deg if max_lat is None: max_lat = 35*u.deg elif not hasattr(max_lat, "unit"): max_lat *= u.deg if smoothed_width is None: smoothed_width = 5*u.deg elif not hasattr(smoothed_width, "unit"): smoothed_width *= u.deg #initialize data arrays slopes_pos = [] slopes_neg = [] slopes_pos_dr = [] slopes_neg_dr = [] intercept_pos = [] intercept_neg = [] intercept_pos_dr = [] intercept_neg_dr = [] slopes_pos_err = [] slopes_neg_err = [] slopes_pos_dr_err = [] slopes_neg_dr_err = [] intercept_pos_err = [] intercept_neg_err = [] intercept_pos_dr_err = [] intercept_neg_dr_err = [] median_longitude = [] median_distance = [] for ell2 in range(len(masks)): xx = data["tan(b)"][masks[ell2]] yy = np.log(data["INTEN"][masks[ell2]]) nan_mask = np.isnan(yy) nan_mask |= np.isinf(yy) if deredden: zz = np.log(data["INTEN_DERED"][masks[ell2]]) nan_mask_z = np.isnan(zz) nan_mask_z |= np.isinf(zz) median_longitude.append(np.median(data["GAL-LON"][masks[ell2]])) if deredden: median_distance.append(np.median(data["DISTANCE"][masks[ell2]])) y_min = np.tan(min_lat) y_max = np.tan(max_lat) if not robust: if hasattr(stats, "siegelslopes"): slope_estimator = stats.siegelslopes else: logging.warning("Installed version of scipy does not have the siegelslopes method in scipy.stats!") slope_estimator = stats.theilslopes siegel_result_pos = slope_estimator(yy[(xx > y_min) & (xx < y_max) & ~nan_mask], xx[(xx > y_min) & (xx < y_max) & ~nan_mask]) siegel_result_neg = slope_estimator(yy[(xx < -y_min) & (xx > -y_max) & ~nan_mask], xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask]) if deredden: siegel_result_pos_dr = slope_estimator(zz[(xx > y_min) & (xx < y_max) & ~nan_mask_z], xx[(xx > y_min) & (xx < y_max) & ~nan_mask_z]) siegel_result_neg_dr = slope_estimator(zz[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z], xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z]) slopes_pos.append(siegel_result_pos[0]) slopes_neg.append(siegel_result_neg[0]) intercept_pos.append(siegel_result_pos[1]) intercept_neg.append(siegel_result_neg[1]) if deredden: slopes_pos_dr.append(siegel_result_pos_dr[0]) slopes_neg_dr.append(siegel_result_neg_dr[0]) intercept_pos_dr.append(siegel_result_pos_dr[1]) intercept_neg_dr.append(siegel_result_neg_dr[1]) if fig_names is not None: figure_name = "{0}_{1}.png".format(fig_names, ell2) if xlim is None: xlim = np.array([-0.9, 0.9]) if ylim is None: ylim = np.array([-4.6, 3.2]) fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twiny() ax.scatter(xx, yy, color ="k", alpha = 0.8) if deredden: ax.scatter(xx, zz, color ="grey", alpha = 0.8) ax.set_xlabel(r"$\tan$(b)", fontsize= 12) ax.set_ylabel(r"$\log$($H\alpha$ Intensity / R)", fontsize= 12) ax.set_title(r"${0:.1f} < l < {1:.1f}$".format(data["GAL-LON"][masks[ell2]].min(), data["GAL-LON"][masks[ell2]].max()), fontsize = 14) ax2.plot(np.degrees(np.arctan(xlim)), np.log([0.1,0.1]), ls = ":", lw = 1, color = "k", label = "0.1 R") ax2.fill_between([-min_lat, min_lat]*u.deg, [ylim[0], ylim[0]], [ylim[1], ylim[1]], color = pal[1], alpha = 0.1, label = r"$|b| < 5\degree$") line_xx = np.linspace(y_min, y_max, 10) line_yy_pos = siegel_result_pos[0] * line_xx + siegel_result_pos[1] line_yy_neg = siegel_result_neg[0] * -line_xx + siegel_result_neg[1] ax.plot(line_xx, line_yy_pos, color = "r", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = {0:.2f} D$".format(1/-siegel_result_pos[0])) ax.plot(-line_xx, line_yy_neg, color = "b", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = {0:.2f} D$".format(1/siegel_result_neg[0])) if deredden: line_yy_pos_dr = siegel_result_pos_dr[0] * line_xx + siegel_result_pos_dr[1] line_yy_neg_dr = siegel_result_neg_dr[0] * -line_xx + siegel_result_neg_dr[1] ax.plot(line_xx, line_yy_pos_dr, color = "r", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = {0:.2f} D$".format(1/-siegel_result_pos_dr[0])) ax.plot(-line_xx, line_yy_neg_dr, color = "b", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = {0:.2f} D$".format(1/siegel_result_neg_dr[0])) ax.set_xlim(xlim) ax.set_ylim(ylim) ax2.set_xlabel(r"$b$ (deg)", fontsize = 12) ax2.set_xlim(np.degrees(np.arctan(xlim))) ax.legend(fontsize = 12, loc = 1) ax2.legend(fontsize = 12, loc = 2) plt.tight_layout() plt.savefig(figure_name, dpi = 300) del(fig) plt.close() results = { "median_longitude":np.array(median_longitude), "slopes_pos":np.array(slopes_pos), "slopes_neg":np.array(slopes_neg), "intercept_pos":np.array(intercept_pos), "intercept_neg":np.array(intercept_neg) } if deredden: results["median_distance"] = np.array(median_distance), results["slopes_pos_dr"] = np.array(slopes_pos_dr) results["slopes_neg_dr"] = np.array(slopes_neg_dr) results["intercept_pos_dr"] = np.array(intercept_pos_dr) results["intercept_neg_dr"] = np.array(intercept_neg_dr) else: yy_pos = yy[(xx > y_min) & (xx < y_max) & ~nan_mask] xx_pos = xx[(xx > y_min) & (xx < y_max) & ~nan_mask] yy_neg = yy[(xx < -y_min) & (xx > -y_max) & ~nan_mask] xx_neg = xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask] if ((len(yy_pos) < 5) | (len(yy_neg) < 5)): slopes_pos.append(np.mean(boot_pos[:,1], axis = 0)) slopes_neg.append(np.mean(boot_neg[:,1], axis = 0)) slopes_pos_err.append(np.std(boot_pos[:,1], axis = 0)) slopes_neg_err.append(np.std(boot_neg[:,1], axis = 0)) intercept_pos.append(np.mean(boot_pos[:,0], axis = 0)) intercept_neg.append(np.mean(boot_neg[:,0], axis = 0)) intercept_pos_err.append(np.std(boot_pos[:,0], axis = 0)) intercept_neg_err.append(np.std(boot_neg[:,0], axis = 0)) else: if deredden: zz_dr_pos = zz[(xx > y_min) & (xx < y_max) & ~nan_mask_z] xx_dr_pos = xx[(xx > y_min) & (xx < y_max) & ~nan_mask_z] zz_dr_neg = zz[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z] xx_dr_neg = xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z] def slope_int_estimator_pos_dr(inds, YY = zz_dr_pos, XX = xx_dr_pos): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_neg_dr(inds, YY = zz_dr_neg, XX = xx_dr_neg): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_pos(inds, YY = yy_pos, XX = xx_pos): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_neg(inds, YY = yy_neg, XX = xx_neg): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params boot_pos = bootstrap(np.arange(len(yy_pos)), func = slope_int_estimator_pos, n_boot = n_boot) boot_neg = bootstrap(np.arange(len(yy_neg)), func = slope_int_estimator_neg, n_boot = n_boot) slopes_pos.append(np.mean(boot_pos[:,1], axis = 0)) slopes_neg.append(np.mean(boot_neg[:,1], axis = 0)) slopes_pos_err.append(np.std(boot_pos[:,1], axis = 0)) slopes_neg_err.append(np.std(boot_neg[:,1], axis = 0)) intercept_pos.append(np.mean(boot_pos[:,0], axis = 0)) intercept_neg.append(np.mean(boot_neg[:,0], axis = 0)) intercept_pos_err.append(np.std(boot_pos[:,0], axis = 0)) intercept_neg_err.append(np.std(boot_neg[:,0], axis = 0)) if deredden: boot_pos_dr = bootstrap(np.arange(len(zz_dr_pos)), func = slope_int_estimator_pos_dr, n_boot = n_boot) boot_neg_dr = bootstrap(np.arange(len(zz_dr_neg)), func = slope_int_estimator_neg_dr, n_boot = n_boot) slopes_pos_dr.append(np.mean(boot_pos_dr[:,1], axis = 0)) slopes_neg_dr.append(np.mean(boot_neg_dr[:,1], axis = 0)) slopes_pos_dr_err.append(np.std(boot_pos_dr[:,1], axis = 0)) slopes_neg_dr_err.append(np.std(boot_neg_dr[:,1], axis = 0)) intercept_pos_dr.append(np.mean(boot_pos_dr[:,0], axis = 0)) intercept_neg_dr.append(np.mean(boot_neg_dr[:,0], axis = 0)) intercept_pos_dr_err.append(np.std(boot_pos_dr[:,0], axis = 0)) intercept_neg_dr_err.append(np.std(boot_neg_dr[:,0], axis = 0)) if fig_names is not None: figure_name = "{0}_{1}.png".format(fig_names, ell2) if xlim is None: xlim = np.array([-0.9, 0.9]) if ylim is None: ylim = np.array([-4.6, 3.2]) fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twiny() ax.scatter(xx, yy, color ="k", alpha = 0.8) if deredden: ax.scatter(xx, zz, color ="grey", alpha = 0.8) ax.set_xlabel(r"$\tan$(b)", fontsize= 12) ax.set_ylabel(r"$\log$($H\alpha$ Intensity / R)", fontsize= 12) ax.set_title(r"${0:.1f} < l < {1:.1f}$".format(data["GAL-LON"][masks[ell2]].min(), data["GAL-LON"][masks[ell2]].max()), fontsize = 14) ax2.plot(np.degrees(np.arctan(xlim)), np.log([0.1,0.1]), ls = ":", lw = 1, color = "k", label = "0.1 R") ax2.fill_between([-min_lat, min_lat]*u.deg, [ylim[0], ylim[0]], [ylim[1], ylim[1]], color = pal[1], alpha = 0.1, label = r"$|b| < 5\degree$") line_xx = np.linspace(y_min, y_max, 100) def get_slope_conf_band(boot_res, X = line_xx): yy = [[res[0] + res[1] * X] for res in boot_res] yy = np.vstack(yy) return np.percentile(yy, (5,95), axis = 0) line_yy_pos = slopes_pos[-1] * line_xx + intercept_pos[-1] line_yy_neg = slopes_neg[-1] * -line_xx + intercept_neg[-1] line_yy_pos_range = get_slope_conf_band(boot_pos) line_yy_neg_range = get_slope_conf_band(boot_neg, X = -line_xx) ax.plot(line_xx, line_yy_pos, color = "r", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/-slopes_pos[-1], np.abs(1/slopes_pos[-1] * slopes_pos_err[-1] / slopes_pos[-1]))) ax.fill_between(line_xx, line_yy_pos_range[0], line_yy_pos_range[1], color = "r", alpha = 0.2) ax.plot(-line_xx, line_yy_neg, color = "b", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/slopes_neg[-1], np.abs(-1/slopes_pos[-1] * slopes_pos_err[-1] / slopes_pos[-1]))) ax.fill_between(-line_xx, line_yy_neg_range[0], line_yy_neg_range[1], color = "b", alpha = 0.2) if deredden: line_yy_pos_dr = slopes_pos_dr[-1] * line_xx + intercept_pos_dr[-1] line_yy_neg_dr = slopes_neg_dr[-1] * -line_xx + intercept_neg_dr[-1] line_yy_pos_range_dr = get_slope_conf_band(boot_pos_dr) line_yy_neg_range_dr = get_slope_conf_band(boot_neg_dr, X = -line_xx) ax.plot(line_xx, line_yy_pos_dr, color = "r", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/-slopes_pos_dr[-1], np.abs(1/slopes_pos_dr[-1] * slopes_pos_dr_err[-1] / slopes_pos_dr[-1]))) ax.fill_between(line_xx, line_yy_pos_range_dr[0], line_yy_pos_range_dr[1], color = "r", alpha = 0.2) ax.plot(-line_xx, line_yy_neg_dr, color = "b", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/slopes_neg_dr[-1], np.abs(-1/slopes_pos_dr[-1] * slopes_pos_dr_err[-1] / slopes_pos_dr[-1]))) ax.fill_between(-line_xx, line_yy_neg_range_dr[0], line_yy_neg_range_dr[1], color = "b", alpha = 0.2) ax.set_xlim(xlim) ax.set_ylim(ylim) ax2.set_xlabel(r"$b$ (deg)", fontsize = 12) ax2.set_xlim(np.degrees(np.arctan(xlim))) ax.legend(fontsize = 12, loc = 1) ax2.legend(fontsize = 12, loc = 2) plt.tight_layout() plt.savefig(figure_name, dpi = 300) del(fig) plt.close() results = { "median_longitude":np.array(median_longitude), "slopes_pos":np.array(slopes_pos), "slopes_neg":np.array(slopes_neg), "intercept_pos":np.array(intercept_pos), "intercept_neg":np.array(intercept_neg), "slopes_pos_err":np.array(slopes_pos_err), "slopes_neg_err":np.array(slopes_neg_err), "intercept_pos_err":np.array(intercept_pos_err), "intercept_neg_err":np.array(intercept_neg_err) } if deredden: results["median_distance"] = np.array(median_distance), results["slopes_pos_dr"] = np.array(slopes_pos_dr) results["slopes_neg_dr"] = np.array(slopes_neg_dr) results["intercept_pos_dr"] = np.array(intercept_pos_dr) results["intercept_neg_dr"] = np.array(intercept_neg_dr) results["slopes_pos_dr_err"] = np.array(slopes_pos_dr_err) results["slopes_neg_dr_err"] = np.array(slopes_neg_dr_err) results["intercept_pos_dr_err"] = np.array(intercept_pos_dr_err) results["intercept_neg_dr_err"] = np.array(intercept_neg_dr_err) if return_smoothed: results["smoothed_longitude"] = np.arange(np.min(median_longitude), np.max(median_longitude), 0.25) if deredden: distance_interp = interp1d(median_longitude, median_distance) results["smoothed_distance"] = distance_interp(results["smoothed_longitude"]) smoothed_slope_pos_ha = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_neg_ha = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_pos_ha_dr = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_neg_ha_dr = np.zeros((3,len(results["smoothed_longitude"]))) for ell,lon in enumerate(results["smoothed_longitude"]): smoothed_slope_pos_ha[:,ell] = np.nanpercentile(np.array(slopes_pos)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) smoothed_slope_neg_ha[:,ell] = np.nanpercentile(np.array(slopes_neg)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) if deredden: smoothed_slope_pos_ha_dr[:,ell] = np.nanpercentile(np.array(slopes_pos_dr)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) smoothed_slope_neg_ha_dr[:,ell] = np.nanpercentile(np.array(slopes_neg_dr)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) results["smoothed_slopes_pos"] = smoothed_slope_pos_ha results["smoothed_slopes_neg"] = smoothed_slope_neg_ha if deredden: results["smoothed_slopes_pos_dr"] = smoothed_slope_pos_ha_dr results["smoothed_slopes_neg_dr"] = smoothed_slope_neg_ha_dr return results
35,811
def _check_meas_specs_still_todo( meas_specs: List[_MeasurementSpec], accumulators: Dict[_MeasurementSpec, BitstringAccumulator], stopping_criteria: StoppingCriteria, ) -> Tuple[List[_MeasurementSpec], int]: """Filter `meas_specs` in case some are done. In the sampling loop in `measure_grouped_settings`, we submit each `meas_spec` in chunks. This function contains the logic for removing `meas_spec`s from the loop if they are done. """ still_todo = [] repetitions_set: Set[int] = set() for meas_spec in meas_specs: accumulator = accumulators[meas_spec] more_repetitions = stopping_criteria.more_repetitions(accumulator) if more_repetitions < 0: raise ValueError( "Stopping criteria's `more_repetitions` should return 0 or a positive number." ) if more_repetitions == 0: continue repetitions_set.add(more_repetitions) still_todo.append(meas_spec) if len(still_todo) == 0: return still_todo, 0 repetitions = _aggregate_n_repetitions(repetitions_set) total_repetitions = len(still_todo) * repetitions if total_repetitions > MAX_REPETITIONS_PER_JOB: old_repetitions = repetitions repetitions = MAX_REPETITIONS_PER_JOB // len(still_todo) if repetitions < 10: raise ValueError( "You have requested too many parameter settings to batch your job effectively. " "Consider fewer sweeps or manually splitting sweeps into multiple jobs." ) warnings.warn( f"The number of requested sweep parameters is high. To avoid a batched job with more " f"than {MAX_REPETITIONS_PER_JOB} shots, the number of shots per call to run_sweep " f"(per parameter value) will be throttled from {old_repetitions} to {repetitions}." ) return still_todo, repetitions
35,812
def get_aircon_mock(said): """Get a mock of an air conditioner.""" mock_aircon = mock.Mock(said=said) mock_aircon.connect = AsyncMock() mock_aircon.fetch_name = AsyncMock(return_value="TestZone") mock_aircon.get_online.return_value = True mock_aircon.get_power_on.return_value = True mock_aircon.get_mode.return_value = whirlpool.aircon.Mode.Cool mock_aircon.get_fanspeed.return_value = whirlpool.aircon.FanSpeed.Auto mock_aircon.get_current_temp.return_value = 15 mock_aircon.get_temp.return_value = 20 mock_aircon.get_current_humidity.return_value = 80 mock_aircon.get_humidity.return_value = 50 mock_aircon.get_h_louver_swing.return_value = True mock_aircon.set_power_on = AsyncMock() mock_aircon.set_mode = AsyncMock() mock_aircon.set_temp = AsyncMock() mock_aircon.set_humidity = AsyncMock() mock_aircon.set_mode = AsyncMock() mock_aircon.set_fanspeed = AsyncMock() mock_aircon.set_h_louver_swing = AsyncMock() return mock_aircon
35,813
def mutation(individual): """ Shuffle certain parameters of the network to keep evolving it. Concretely: - thresh, tau_v, tau_t, alpha_v, alpha_t, q """ individual[0].update_params() return individual,
35,814
async def test_migrate_entry(hass): """Test successful migration of entry data.""" legacy_config = { axis.CONF_DEVICE: { axis.CONF_HOST: "1.2.3.4", axis.CONF_USERNAME: "username", axis.CONF_PASSWORD: "password", axis.CONF_PORT: 80, }, axis.CONF_MAC: "mac", axis.device.CONF_MODEL: "model", axis.device.CONF_NAME: "name", } entry = MockConfigEntry(domain=axis.DOMAIN, data=legacy_config) assert entry.data == legacy_config assert entry.version == 1 await axis.async_migrate_entry(hass, entry) assert entry.data == { axis.CONF_DEVICE: { axis.CONF_HOST: "1.2.3.4", axis.CONF_USERNAME: "username", axis.CONF_PASSWORD: "password", axis.CONF_PORT: 80, }, axis.CONF_HOST: "1.2.3.4", axis.CONF_USERNAME: "username", axis.CONF_PASSWORD: "password", axis.CONF_PORT: 80, axis.CONF_MAC: "mac", axis.device.CONF_MODEL: "model", axis.device.CONF_NAME: "name", } assert entry.version == 2
35,815
def truncate_repeated_single_step_traversals_in_sub_queries( compound_match_query: CompoundMatchQuery, ) -> CompoundMatchQuery: """For each sub-query, remove one-step traversals that overlap a previous traversal location.""" lowered_match_queries = [] for match_query in compound_match_query.match_queries: new_match_query = truncate_repeated_single_step_traversals(match_query) lowered_match_queries.append(new_match_query) return compound_match_query._replace(match_queries=lowered_match_queries)
35,816
def intersect(list1, list2): """ Compute the intersection of two sorted lists. Returns a new sorted list containing only elements that are in both list1 and list2. This function can be iterative. """ result_list = [] idx1 = 0 idx2 = 0 while idx1 < len(list1) and idx2 < len(list2): if list1[idx1] == list2[idx2]: result_list.append(list1[idx1]) idx1 += 1 idx2 += 1 elif list1[idx1] < list2[idx2]: idx1 += 1 elif list1[idx1] > list2[idx2]: idx2 += 1 else: print 'error in func intersect!!!' return return result_list
35,817
def get_parent_project_ids(project_id: int, only_if_child_can_add_users_to_parent: bool = False) -> typing.List[int]: """ Return the list of parent project IDs for an existing project. :param project_id: the ID of an existing project :param only_if_child_can_add_users_to_parent: whether or not to only show those parent projects, which someone with GRANT permissions on this project can add users to (transitively) :return: list of project IDs """ subproject_relationships: typing.Iterable[SubprojectRelationship] = SubprojectRelationship.query.filter_by( child_project_id=project_id ).all() parent_project_ids = [] for subproject_relationship in subproject_relationships: if subproject_relationship.child_can_add_users_to_parent or not only_if_child_can_add_users_to_parent: parent_project_ids.append(subproject_relationship.parent_project_id) return parent_project_ids
35,818
def get_all_object_names(bucket, prefix=None, without_prefix=False): """ Returns the names of all objects in the passed bucket Args: bucket (str): Bucket path prefix (str, default=None): Prefix for keys withot_prefix (bool, default=False) Returns: list: List of object names """ root = bucket if prefix is not None: root = f"{bucket}/{prefix}" root_len = len(bucket) + 1 if without_prefix: prefix_len = len(prefix) subdir_names = glob.glob(f"{root}*") object_names = [] while True: names = subdir_names subdir_names = [] for name in names: if name.endswith("._data"): # remove the ._data at the end name = name[root_len:-6] while name.endswith("/"): name = name[0:-1] if without_prefix: name = name[prefix_len:] while name.startswith("/"): name = name[1:] if len(name) > 0: object_names.append(name) elif os.path.isdir(name): subdir_names += glob.glob(f"{name}/*") if len(subdir_names) == 0: break return object_names
35,819
def integral_length(v): """ Compute the integral length of a given rational vector. INPUT: - ``v`` - any object which can be converted to a list of rationals OUTPUT: Rational number ``r`` such that ``v = r u``, where ``u`` is the primitive integral vector in the direction of ``v``. EXAMPLES:: sage: lattice_polytope.integral_length([1, 2, 4]) 1 sage: lattice_polytope.integral_length([2, 2, 4]) 2 sage: lattice_polytope.integral_length([2/3, 2, 4]) 2/3 """ data = [QQ(e) for e in list(v)] ns = [e.numerator() for e in data] ds = [e.denominator() for e in data] return gcd(ns)/lcm(ds)
35,820
def load_classification_pipeline( model_dir: str = "wukevin/tcr-bert", multilabel: bool = False, device: int = 0 ) -> TextClassificationPipeline: """ Load the pipeline object that does classification """ try: tok = ft.get_pretrained_bert_tokenizer(model_dir) except OSError: tok = ft.get_aa_bert_tokenizer(64) if multilabel: model = BertForSequenceClassificationMulti.from_pretrained(model_dir) pipeline = TextMultiClassificationPipeline( model=model, tokenizer=tok, device=device, framework="pt", task="mulitlabel_classification", return_all_scores=True, ) else: model = BertForSequenceClassification.from_pretrained(model_dir) pipeline = TextClassificationPipeline( model=model, tokenizer=tok, return_all_scores=True, device=device ) return pipeline
35,821
def rest_target_disable(args): """ :param argparse.Namespace args: object containing the processed command line arguments; need args.target :raises: IndexError if target not found """ for target in args.target: rtb, rt = _rest_target_find_by_id(target) rtb.rest_tb_target_disable(rt, ticket = args.ticket)
35,822
def skew_image(img, angle): """ Skew image using some math :param img: PIL image object :param angle: Angle in radians (function doesn't do well outside the range -1 -> 1, but still works) :return: PIL image object """ width, height = img.size # Get the width that is to be added to the image based on the angle of skew xshift = tan(abs(angle)) * height new_width = width + int(xshift) if new_width < 0: return img # Apply transform img = img.transform( (new_width, height), Image.AFFINE, (1, angle, -xshift if angle > 0 else 0, 0, 1, 0), Image.BICUBIC ) return img
35,823
def delete_empty_folders(logger, top, error_dir): """ This function recursively walks through a given directory (`top`) using depth-first search (bottom up) and deletes any directory that is empty (ignoring hidden files). If there are directories that are not empty (except hidden files), it will save the absolute directory in a Pandas dataframe and export it as a `not-empty-folders.csv` to `error_dir`. Parameters ---------- top : {str} The directory to iterate through. error_dir : {str} The directory to save the `not-empty-folders.csv` to. Returns ------- None """ try: curdir_list = [] files_list = [] for (curdir, dirs, files) in os.walk(top=top, topdown=False): if curdir != str(top): dirs.sort() files.sort() print(f"WE ARE AT: {curdir}") print("=" * 10) print("List dir:") directories_list = [ f for f in os.listdir(curdir) if not f.startswith(".") ] print(directories_list) if len(directories_list) == 0: print("DELETE") shutil.rmtree(curdir, ignore_errors=True) elif len(directories_list) > 0: print("DON'T DELETE") curdir_list.append(curdir) files_list.append(directories_list) print() print("Moving one folder up...") print("-" * 40) print() if len(curdir_list) > 0: not_empty_df = pd.DataFrame( list(zip(curdir_list, files_list)), columns=["curdir", "files"] ) to_save_path = os.path.join(error_dir, "not-empty-folders.csv") not_empty_df.to_csv(to_save_path, index=False) except Exception as e: # logger.error(f'Unable to delete_empty_folders!\n{e}') print((f"Unable to delete_empty_folders!\n{e}"))
35,824
def test_process_bto_order_high_risk(monkeypatch, capsys, caplog): """BTO order should be correctly processed with high risk flag set """ caplog.set_level(logging.INFO) monkeypatch.setitem(USR_SET, "high_risk_ord_value", 1000) monkeypatch.setitem(USR_SET, "buy_limit_percent", 0.03) monkeypatch.setitem(USR_SET, "SL_percent", 0.25) monkeypatch.setitem(VALID_ORD_INPUT, "contract_price", 2.00) flags = {"SL": None, "risk_level": "high risk", "reduce": None} monkeypatch.setitem(VALID_ORD_INPUT, "flags", flags) def mock_place_order(acct_num, order_spec): built_order = order_spec.build() assert built_order["price"] == "2.06" assert built_order["orderLegCollection"][0]["quantity"] == 4 assert built_order["orderStrategyType"] == "TRIGGER" assert built_order["childOrderStrategies"][0]["orderType"] == "STOP" assert built_order["childOrderStrategies"][0]["stopPrice"] == "1.50" return "PASSAR" client = tda.client.Client monkeypatch.setattr(client, "place_order", mock_place_order) am.process_bto_order(client, "1234567890", VALID_ORD_INPUT, USR_SET) captured = capsys.readouterr() assert captured.out.split()[-1] == "PASSAR" logged = caplog.text assert logged.split()[-1] == "PASSAR"
35,825
def _load_tests_from_module(tests, module, globs, setUp=None, tearDown=None): """Load tests from module, iterating through submodules. """ for attr in (getattr(module, x) for x in dir(module) if not x.startswith("_")): if isinstance(attr, types.ModuleType): suite = doctest.DocTestSuite( attr, globs, setUp=setUp, tearDown=tearDown, optionflags=+doctest.ELLIPSIS, ) tests.addTests(suite) return tests
35,826
def p_expr_comment(p): """expr : COMMENT """ p[0] = Comment(p[1])
35,827
def setup_node(config, envs): """ Install node dependencies language: node_js node_js: - "0.12" """ for version in listify(config.get('node_js', [])): setup = [] envs.append(setup) setup.append(apt_get('ca-certificates', 'curl')) setup.append( 'curl --location' ' https://raw.github.com/creationix/nvm/master/nvm.sh' ' -o /tmp/nvm.sh') # Disable error checking in NVM # (https://github.com/creationix/nvm/issues/721) setup.append('set +o errexit') setup.append('. /tmp/nvm.sh') setup.append('set -o errexit') setup.append('nvm install {}'.format(version))
35,828
def run(command, *args): """Application caller wrap console call 'command args' """ args_str = " " + " ".join(str(s) for s in list(args[0])) if len(args_str) > 0 : command += " " + args_str print "Running", command retcode = check_call(command, shell=True) if retcode < 0: raise RuntimeError("Child was terminated by signal")
35,829
def info(ctx, objects): """ Obtain all kinds of information """ if not objects: status(ctx) for obj in objects: process_object(ctx, obj)
35,830
def pp(): """Payback Period""" a = input("For Payback with Equal Annual Net Cash Inflows Press 1\nFor Payback with Unequal Net Cash Inflows Press 2: ") if (a == 1): b = float(input("Please Enter Total Amount Invested: ")) c = float(input("Please Enter Expected Annual Net Cash Inflow: ")) d = float(input("Please Enter Investment Useful Life (Years): ")) e = (float(c)*float(d))-float(b) f = float(b)/float(c) print ">> Your Payback Period is",f,"Years" print ">> Total Amount Remaining After Payback Period is",e,"(Residual Value Not Included)" elif(a == 2): b = float(input("Please Enter Total Amount Invested: ")) c = list(input("Please Enter Expected Annual Net Cash Inflow: ")) n = 0 t = 0 ss = sum(c) am = float(ss)-float(b) if(ss < b): print ">> Your Loss On Investment is",abs(am) elif(ss > b): while t <= b: e = c[0 + n] t += float(e) n += 1 gg = c[n - 1] ii = (float(t)-float(b))/float(gg) ccc = float(n)-float(ii) print ">> Your Payback Period is",ccc,"Years" print ">> Total Amount Remaining After Payback Period is",am
35,831
def calculate_full_spectrum(xs, cp, ep=None, betas=(0,0), data=None): """Direct solution of the k-eigenvalue problem in integral transport by the collision probability method. Input data are the xs list and the collision probabilities in cp. Only isotropic scattering is allowed. A relation of albedo for the partial currents can be used at the boundary.""" st, ss, chi, nsf = xs G, I = nsf.shape check_xs(xs) betaL, betaR = betas if (betaL < 0) or (betaL > 1): raise ValueError("betaL (left albedo) is not in valid range") elif betaL > 0: if ep is None: raise ValueError("betaL > 0, but no input escape probs") if data is None: raise ValueError("input mesh data is needed for VjoSbL") else: # r, geo, V, Sb = data.xi, data.geometry_type, data.Vi, data.Si[0] # V = calculate_volumes(r, geo) # Sb = calculate_surfaces(r, geo)[0] VjoSbL = data.Vi / data.Si[0] if (betaR < 0) or (betaR > 1): raise ValueError("betaR (right albedo) is not in valid range") elif betaR > 0: if ep is None: raise ValueError("betaR > 0, but no input escape probs") if data is None: raise ValueError("input mesh data is needed for VjoSbR") else: VjoSbR = data.Vi / data.Si[-1] def get_rt(rpjx, st): total_collision = np.dot(rpjx, st) if data.geometry_type != 'slab': reflection, transmission = 1 - total_collision, 0 else: reflection, transmission = 0, 1 - total_collision return reflection, transmission GI = G * I PS = np.zeros((GI, GI),) PX, F = np.zeros((GI, I),), np.zeros((I, GI),) # X = np.zeros_like(PX) Is = np.arange(I) for g in range(G): idx = slice(I * g, I * (g + 1)) pji = np.transpose(cp[g,:,:] / st[g,:]) # reduced CP # apply b.c. eaj, ebj = -ep[g,0,:,1], ep[g,-1,:,0] if betaL > 0: # pja and pjb are both needed if refl at both sides pja = 4 * VjoSbL * eaj if betaR > 0: pjb = 4 * VjoSbR * ebj if betaL > 0: r, t = get_rt(pja, st[g,:]) coef = betaL / (1 - betaL * (r + t**2 * betaR / (1 - betaR * r))) pji += coef * np.dot(np.diag(eaj), np.tile(pja, (I, 1))) if betaR > 0: coef *= betaR * t pji += coef * np.dot(np.diag(eaj), np.tile(pjb, (I, 1))) if betaR > 0: r, t = get_rt(pjb, st[g,:]) coef = betaR / (1 - betaR * (r + t**2 * betaL / (1 - betaL * r))) pji += coef * np.dot(np.diag(ebj), np.tile(pjb, (I, 1))) if betaL > 0: coef *= betaL * t pji += coef * np.dot(np.diag(ebj), np.tile(pja, (I, 1))) # X[Is + g * I, Is] = chi[g,:] F[Is, Is + g * I] = nsf[g,:] PX[idx,:] = pji * chi[g,:] for gg in range(G): jdx = slice(I * gg, I * (gg + 1)) PS[idx, jdx] = pji * ss[g,gg,0,:] PS *= -1 PS[np.diag_indices_from(PS)] += 1 H = np.dot(F, np.dot(np.linalg.inv(PS), PX)) return np.linalg.eig(H)
35,832
def plot_confusion_matrix(cm, classes, normalize=False, cmap=None): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if cmap is None: cmap = plt.cm.Blues if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] logging.info("Normalized confusion matrix.") else: logging.info( 'Confusion matrix will be computed without normalization.') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) plt.tight_layout() plt.ylabel('Ground truth') plt.xlabel('Predicted label')
35,833
def get_connectors_by_type(type : str): """ Convenience method for `get_connectors()`. """ return get_connectors(type)
35,834
def test_heart_get_params(withings_api: WithingsApi) -> None: """Test function.""" responses_add_heart_get() withings_api.heart_get(signalid=1234567) assert_url_query_equals( responses.calls[0].request.url, {"signalid": "1234567"}, ) assert_url_path(responses.calls[0].request.url, "/v2/heart") assert_url_query_equals(responses.calls[0].request.url, {"action": "get"})
35,835
def test_scipy_dense_solvers_num_argument(): """ By default, the Scipy dense solvers compute all eigenpairs of the eigenproblem. We can limit the number of solutions returned by specifying the 'num' argument, either directly when instantiating the solver or when calling the eigenproblem.solve() method. This test checks that the expected number of solutions is returned in all cases. """ diagproblem = DiagonalEigenproblem() # Dense solvers should yield all solutions by default solver1 = ScipyLinalgEig() solver2 = ScipyLinalgEigh() omega1, w1, _ = diagproblem.solve(solver1, N=10, dtype=float) omega2, w2, _ = diagproblem.solve(solver2, N=10, dtype=float) assert(len(omega1) == 10) assert(len(omega2) == 10) assert(w1.shape == (10, 10)) assert(w2.shape == (10, 10)) # By initialising them with the 'num' argument they should only # yield the first 'num' solutions. solver3 = ScipyLinalgEig(num=5) solver4 = ScipyLinalgEigh(num=3) omega3, w3, _ = diagproblem.solve(solver3, N=10, dtype=float) omega4, w4, _ = diagproblem.solve(solver4, N=10, dtype=float) assert(len(omega3) == 5) assert(len(omega4) == 3) assert(w3.shape == (5, 10)) assert(w4.shape == (3, 10)) # We can also provide the 'num' argument in the solve() method # directly. If provided, this argument should be used. Otherwise # The fallback value from the solver initialisation is used. solver5 = ScipyLinalgEig(num=7) solver6 = ScipyLinalgEig() solver7 = ScipyLinalgEigh(num=3) omega5, w5, _ = diagproblem.solve(solver5, N=10, num=5, dtype=float) omega6, w6, _ = diagproblem.solve(solver6, N=10, num=8, dtype=float) omega7, w7, _ = diagproblem.solve(solver7, N=10, num=6, dtype=float) omega8, w8, _ = diagproblem.solve(solver7, N=10, num=None, dtype=float) assert(len(omega5) == 5) assert(len(omega6) == 8) assert(len(omega7) == 6) assert(len(omega8) == 3) assert(w5.shape == (5, 10)) assert(w6.shape == (8, 10)) assert(w7.shape == (6, 10)) assert(w8.shape == (3, 10))
35,836
def crawl(alphabet, initial, accepts, follow): """ Create a new FSM from the above conditions. """ states = [initial] accepting = set() transition = dict() i = 0 while i < len(states): state = states[i] if accepts(state): accepting.add(i) transition[i] = dict() for symbol in alphabet: try: next_states = follow(state, symbol) except OblivionError: continue else: try: j = states.index(next_states) except ValueError: j = len(states) states.append(next_states) transition[i][symbol] = j i += 1 return FSM( alphabet=alphabet, states=range(len(states)), initial=0, accepting=accepting, transition=transition, __validation__=False )
35,837
def mpncovresnet101(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = MPNCOVResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['mpncovresnet101'])) return model
35,838
def cathegory_encoder(data, labelCathegory=labelCathegory): """ Encode cathegorical labels """ for k in labelCathegory: encoder = sklearn.preprocessing.LabelEncoder() encoder.fit(list(data[k].values)) data[k] = encoder.transform(list(data[k].values)) return data
35,839
def _make_symbols_cg_df(symbols, benchmark): """ 相关性金融数据收集,子进程委托函数,子进程通过make_kl_df完成主进程委托的symbols个 金融数据收集工作,最终返回所有金融时间序列涨跌幅度pd.DataFrame对象 :param symbols: 可迭代symbols序列,序列中的元素为str对象 :param benchmark: 进行数据收集使用的标尺对象,数据时间范围确定使用,AbuBenchmark实例对象 :return: 所有金融时间序列涨跌幅度pd.DataFrame对象 """ # 子进程金融数据收集工作, 由于本事是在子进程内工作,所以不再make_kl_df中使用parallel模式,上层进行多任务分配及任务数确定 panel = ABuSymbolPd.make_kl_df(symbols, data_mode=EMarketDataSplitMode.E_DATA_SPLIT_UNDO, benchmark=benchmark, show_progress=True) if panel is None or panel.empty: logging.info('pid {} panel is None'.format(os.getpid())) return None # 转换panel轴方向,即可方便获取所有金融时间数据的某一个列 panel = panel.swapaxes('items', 'minor') net_cg_df = panel['p_change'].fillna(value=0) """ 转轴后直接获取p_change,即所有金融时间序列涨跌幅度pd.DataFrame对象,形如下所示: usF usFCAU usGM usHMC usTM usTSLA usTTM 2015-06-25 -0.387 -0.517 -1.308 0.522 -0.391 1.365 -0.029 2015-06-26 -0.259 1.300 -0.922 0.366 0.437 -0.632 -0.229 2015-06-29 -2.468 -6.799 -3.345 -2.676 -2.222 -1.898 -2.550 2015-06-30 -0.067 0.000 0.301 1.250 0.982 2.381 1.353 2015-07-01 -0.133 0.688 -0.870 -1.605 -0.112 0.332 0.261 ................................................................. """ return net_cg_df
35,840
def split_dataset(args, dataset): """Split the dataset Parameters ---------- args : dict Settings dataset Dataset instance Returns ------- train_set Training subset val_set Validation subset test_set Test subset """ train_ratio, val_ratio, test_ratio = map(float, args['split_ratio'].split(',')) if args['split'] == 'scaffold': train_set, val_set, test_set = ScaffoldSplitter.train_val_test_split( dataset, frac_train=train_ratio, frac_val=val_ratio, frac_test=test_ratio, scaffold_func='smiles') elif args['split'] == 'random': train_set, val_set, test_set = RandomSplitter.train_val_test_split( dataset, frac_train=train_ratio, frac_val=val_ratio, frac_test=test_ratio) else: return ValueError("Expect the splitting method to be 'scaffold', got {}".format(args['split'])) return train_set, val_set, test_set
35,841
def test_join_smart_url_with_path_and_basename(): """Test join_smart functionality.""" CORRECT = "gsiftp://gridftp.zeuthen.desy.de:2811/pnfs/ifh.de/acs/icecube/archive/data/exp/IceCube/2015/filtered/level2/0320/fdd3c3865d1011eb97bb6224ddddaab7.zip" assert join_smart_url(["gsiftp://gridftp.zeuthen.desy.de:2811/pnfs/ifh.de/acs/icecube/archive/", "/data/exp/IceCube/2015/filtered/level2/0320", "fdd3c3865d1011eb97bb6224ddddaab7.zip"]) == CORRECT
35,842
def test_ky_mat_update(params): """ check ky_mat_update function """ hyps, name, kernel, cutoffs, \ kernel_m, hyps_list, hyps_mask_list = params # prepare old data set as the starting point n = 5 training_data = flare.gp_algebra._global_training_data[name] flare.gp_algebra._global_training_data['old'] = training_data[:n] training_labels = flare.gp_algebra._global_training_labels[name] flare.gp_algebra._global_training_labels['old'] = training_labels[:n] func = [get_ky_mat, get_ky_mat_update] # get the reference ky_mat0 = func[0](hyps, name, kernel[0], cutoffs) ky_mat_old = func[0](hyps, 'old', kernel[0], cutoffs) # update ky_mat = func[1](ky_mat_old, hyps, name, kernel[0], cutoffs) diff = (np.max(np.abs(ky_mat-ky_mat0))) assert (diff==0), "update function is wrong" # parallel version ky_mat = func[1](ky_mat_old, hyps, name, kernel[0], cutoffs, n_cpus=2, n_sample=20) diff = (np.max(np.abs(ky_mat-ky_mat0))) assert (diff==0), "parallel implementation is wrong" # check multi hyps implementation for i in range(len(hyps_list)): hyps = hyps_list[i] hyps_mask = hyps_mask_list[i] if hyps_mask is None: ker = kernel[0] else: ker = kernel_m[0] # serial implementation ky_mat = func[1](ky_mat_old, hyps, name, ker, cutoffs, hyps_mask) diff = (np.max(np.abs(ky_mat-ky_mat0))) assert (diff<1e-12), "multi hyps parameter implementation is wrong" # parallel implementation ky_mat = func[1](ky_mat_old, hyps, name, ker, cutoffs, hyps_mask, n_cpus=2, n_sample=20) diff = (np.max(np.abs(ky_mat-ky_mat0))) assert (diff<1e-12), "multi hyps parameter parallel "\ "implementation is wrong"
35,843
async def test_multi_level_wildcard_topic_not_matching(hass, mock_device_tracker_conf): """Test not matching multi level wildcard topic.""" dev_id = "paulus" entity_id = ENTITY_ID_FORMAT.format(dev_id) subscription = "/location/#" topic = "/somewhere/room/paulus" location = "work" hass.config.components = set(["mqtt", "zone"]) assert await async_setup_component( hass, device_tracker.DOMAIN, { device_tracker.DOMAIN: { CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}, } }, ) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert hass.states.get(entity_id) is None
35,844
def depthwise_conv2d_nchw(inputs, weight, bias=None, stride=1, padding=0, dilation=1): """Depthwise convolution 2d NCHW layout Args: ----------------------------- inputs : tvm.te.tensor.Tensor shape [batch, channel, height, width] weight : tvm.te.tensor.Tensor shape [in_channel, factor, kernel_height, kernel_width] bias : (optional:None) tvm.te.tensor.Tensor shape [out_channel] stride : (optional:1) int or tuple padding : (optional:0) int or tuple dilation: (optional:1) int ----------------------------- Returns: ----------------------------- tvm.te.tensor.Tensor shape [batch, out_channel, output_height, output_width] ----------------------------- """ batch_size, in_channel, in_h, in_w = inputs.shape _in_channel, factor, k_h, k_w = weight.shape assert_print(_in_channel.value == in_channel.value) out_channel = in_channel * factor stride = (stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding dilation = (dilation, dilation) if isinstance(dilation, (int, tvm.tir.IntImm)) else dilation assert_print(isinstance(stride, tuple) and len(stride) == 2) assert_print(isinstance(padding, tuple) and len(padding) == 2) assert_print(isinstance(dilation, tuple) and len(dilation) == 2) out_h = (in_h + 2 * padding[0] - dilation[0] * (k_h - 1) - 1) // stride[0] + 1 out_w = (in_w + 2 * padding[1] - dilation[1] * (k_w - 1) - 1) // stride[1] + 1 rh = tvm.te.reduce_axis((0, k_h)) rw = tvm.te.reduce_axis((0, k_w)) padded = zero_pad2d(inputs, padding=padding) output = tvm.te.compute( (batch_size, out_channel, out_h, out_w), lambda b, c, h, w: tvm.te.sum( (padded[b, c//factor, h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]] * weight[c//factor, c%factor, rh, rw]), axis=[rw, rh] ) ) if bias is not None: output = tvm.te.compute( (batch_size, out_channel, out_h, out_w), lambda b, c, h, w: output[b, c, h, w] + bias[c] ) return output
35,845
def xtransformed(geo, transformation): """Returns a copy of the transformed Rhino Geometry object. Args: geo (:class:`Rhino.Geometry.GeometryBase`): a Rhino Geometry object transformation (:class:`Transformation`): the transformation. Returns: (:class:`Rhino.Geometry.GeometryBase`): the transformed geometry """ T = xform_from_transformation(transformation) geo_copy = geo.Duplicate() geo_copy.Transform(T) return geo_copy
35,846
def function_calls(libfuncs): """ libfuncs is the list of library functions called in script. Returns ths list of all library functions required in script """ libfuncs2 = set() while libfuncs: func = libfuncs.pop() libfuncs2.add(func) for func in called_functions(func): if func not in libfuncs and func not in libfuncs2: libfuncs.add(func) return sorted(list(libfuncs2))
35,847
def test_stack_component_dict_only_contains_public_attributes( stub_component, ): """Tests that the `dict()` method which is used to serialize stack components does not include private attributes.""" assert stub_component._some_private_attribute_name == "Also Aria" expected_dict_keys = {"some_public_attribute_name", "name", "uuid"} assert stub_component.dict().keys() == expected_dict_keys
35,848
def preprocess_yaml_config(config: SimpleNamespace, prefix_keys=False) -> SimpleNamespace: """ Preprocess a simple namespace. Currently, - prepend the prefix key to all the configuration parameters - change 'None' strings to None values :param config: The SimpleNamespace containing the configuration. :return: Preprocessed configuration as a SimpleNamespace """ # Make sure there's a prefix in the configuration assert 'prefix' in config.__dict__, 'Please include a prefix in the yaml.' if prefix_keys: # Grab the prefix from the yaml file prefix = config.prefix # Prepend the prefix to all the keys, and get rid of the prefix config = SimpleNamespace(**{f'{prefix}_{k}': v for k, v in config.__dict__.items() if k != prefix}) # Change 'None' to None in the top level: recommended behavior is to use null instead of None in the yaml for key, value in config.__dict__.items(): config.__dict__[key] = value if value != 'None' else None return config
35,849
def TerminateProcess(hProcess, uExitCode): """ Terminates the specified process and all of its threads. .. seealso:: https://msdn.microsoft.com/en-us/library/ms686714 :param pywincffi.wintypes.HANDLE hProcess: A handle to the process to be terminated. :param int uExitCode: The exit code of the processes and threads as a result of calling this function. """ input_check("hProcess", hProcess, HANDLE) input_check("uExitCode", uExitCode, integer_types) ffi, library = dist.load() code = library.TerminateProcess( wintype_to_cdata(hProcess), ffi.cast("UINT", uExitCode) ) error_check("TerminateProcess", code=code, expected=NON_ZERO)
35,850
def gisinf(x): """ Like isinf, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- `isinf` and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level. """ from numpy.core import isinf, errstate with errstate(invalid="ignore"): st = isinf(x) if isinstance(st, type(NotImplemented)): raise TypeError("isinf not supported for this type") return st
35,851
def do_configuration_list(cs, args): """Lists all configuration groups.""" config_grps = cs.configurations.list(limit=args.limit, marker=args.marker) utils.print_list(config_grps, [ 'id', 'name', 'description', 'datastore_name', 'datastore_version_name'])
35,852
def make_const(g, # type: base_graph.BaseGraph name, # type: str value, # type: np.ndarray uniquify_name=False # type: bool ): """ Convenience method to add a `Const` op to a `gde.Graph`. Args: g: The graph that the node should be added to name: Name for the new `Const` node value: Value to use for the constant uniquify_name: if True, generate unique names by appending a numeric suffix in the event of a name collision. Otherwise name collisions result in an error. Returns `gde.Node` object representing the new node. """ dtype = tf.as_dtype(value.dtype) ret = g.add_node(name, "Const", uniquify_name=uniquify_name) ret.add_attr("dtype", dtype) ret.add_attr("value", value) ret.set_outputs_from_pairs([(dtype, tf.TensorShape(value.shape))]) return ret
35,853
def find_maxima(x): """Halla los índices de los máximos relativos""" idx = [] N = len(x) if x[1] < x[0]: idx.append(0) for i in range(1, N - 1): if x[i-1] < x[i] and x[i+1] < x[i]: idx.append(i) if x[-2] < x[-1]: idx.append(N - 1) return idx
35,854
def _transform_rankings(Y): """Transform the rankings to integer.""" Yt = np.zeros(Y.shape, dtype=np.int64) Yt[np.isfinite(Y)] = Y[np.isfinite(Y)] Yt[np.isnan(Y)] = RANK_TYPE.RANDOM.value Yt[np.isinf(Y)] = RANK_TYPE.TOP.value return Yt
35,855
async def lumberjack(ctx): """a class that Dylan wanted to play as for some reason""" if ctx.invoked_subcommand is None: embed=discord.Embed(title="LUMBERJACK", url="https://docs.google.com/document/d/1ZpodR2KdgA-BPQdYwoeiewuONXkjLt3vMwlK6JHzB_o/edit#bookmark=id.j8e62xi5b8cu", description=":evergreen_tree::hammer:", color=0xB6D7A8) embed.set_thumbnail(url="http://magic.wizards.com/sites/mtg/files/image_legacy_migration/images/magic/daily/arcana/953_orcishlumberjack.jpg") embed.add_field(name="-----", value="[Here's a list of Lumberjack Abilites!](https://drive.google.com/file/d/1CZUa9FzNthWtPwi_n4i8G4OO9E0g6rbY/view)", inline=False) await bot.say(embed=embed)
35,856
def get_config(key=None, default=None, raise_error=False): """Read expyfun preference from env, then expyfun config Parameters ---------- key : str The preference key to look for. The os environment is searched first, then the expyfun config file is parsed. default : str | None Value to return if the key is not found. raise_error : bool If True, raise an error if the key is not found (instead of returning default). Returns ------- value : str | None The preference key value. """ if key is not None and not isinstance(key, string_types): raise ValueError('key must be a string') # first, check to see if key is in env if key is not None and key in os.environ: return os.environ[key] # second, look for it in expyfun config file config_path = get_config_path() if not op.isfile(config_path): key_found = False val = default else: with open(config_path, 'r') as fid: config = json.load(fid) if key is None: return config key_found = True if key in config else False val = config.get(key, default) if not key_found and raise_error is True: meth_1 = 'os.environ["%s"] = VALUE' % key meth_2 = 'expyfun.utils.set_config("%s", VALUE)' % key raise KeyError('Key "%s" not found in environment or in the ' 'expyfun config file:\n%s\nTry either:\n' ' %s\nfor a temporary solution, or:\n' ' %s\nfor a permanent one. You can also ' 'set the environment variable before ' 'running python.' % (key, config_path, meth_1, meth_2)) return val
35,857
def check_exist(path, mode, flag_exit=True): """ function to check for file existence @param path(str): target file path @param mode(int): 1(existence) / 2(existence for file) / 3(existence for dir) @param flag_exit(bool): Exit if not present (Default: True) @param (bool) or exit(None) """ if mode == 1: if not os.path.exists(path): sys.stderr.write("ERROR: No such path (%s)\n" % path) if flag_exit: sys.exit(1) else: return False elif mode == 2: if not os.path.isfile(path): sys.stderr.write("ERROR: No such file (%s)\n" % path) if flag_exit: sys.exit(1) else: return False elif mode == 3: if not os.path.isdir(path): sys.stderr.write("ERROR: No such directory (%s)\n" % path) if flag_exit: sys.exit(1) else: return False else: sys.stderr.write("ERROR: Subroutine error: Not specified mode\n") sys.exit(1) return True
35,858
def convert_pf_patch_to_cg_patch(p, simnum): """Converts a pfpatch p to a CG patch.""" # Print patch info LOGGER.info("Macro patch is:") LOGGER.info(str(p)) LOGGER.info("Patch id = {}".format(p.id)) LOGGER.info("Protein bead ids = {}".format(p.protein_ids)) LOGGER.info("Protein bead states = {}".format(p.protein_states)) LOGGER.info("Protein bead pos. = {}".format(p.protein_positions)) LOGGER.info("Macro simname = {}".format(p.config.simname)) LOGGER.info("Macro filenames = {}".format(p.config.filenames)) LOGGER.info("Macro simname = {} / {}".format(p.config.simtime, p.config.tunit)) # Remove any suppriusly negative densities - this can happen in the macro model due to noise term p.concentrations = np.maximum(p.concentrations, 0) # For testing of lipid consentrations #np.save("concentrations_macro_all.npy", p.concentrations) # Convert patch from native 37x37 to 5x5 patch size # @TOOD Harsh is looking at this now #lconcentrations = p.subsample() #lconcentrations = p._subsample_mean(p.concentrations, 30.0, 5) #lconcentrations = p.subsample_intg() lconcentrations = p.concentrations # For testing of lipid consentrations #np.save("concentrations_macro.npy", lconcentrations) LOGGER.info("Lipid nat. grid = {}".format(p.concentrations.shape)) LOGGER.info("Lipid subs.grid = {}".format(lconcentrations.shape)) # Get protein bead posisions - these should all be within the patch # Get RAS posision within the patch - RAS is now back to be patch centric #localRasPos = p.rasPositions - p.extents[0] #LOGGER.info("RAS pos = {}".format(p.rasPositions)) #LOGGER.info("RAS local pos = {}".format(localRasPos)) #LOGGER.info("Protein bead pos. = {}".format(p.protein_positions)) # Convert patch to CG input # Info Patch.LIPID_NAMES = # ['nInner_POPC', 'nInner_PAPC', 'nInner_POPE', 'nInner_DIPE', 'nInner_DPSM', 'nInner_PAPS', 'nInner_PAP6', 'nInner_CHOL', # 'nOuter_POPC', 'nOuter_PAPC', 'nOuter_POPE', 'nOuter_DIPE', 'nOuter_DPSM', 'nOuter_CHOL'] lipidTypes = ["POPX", "POPC", "PAPC", "POPE", "DIPE", "DPSM", "PAPS", "PAP6", "CHOL"] lInner = lconcentrations[:, :, 0:8] lOuter = lconcentrations[:, :, 8:14] sumOuter = np.sum(lOuter) / (lconcentrations.shape[0]*lconcentrations.shape[1]) sumInner = np.sum(lInner) / (lconcentrations.shape[0]*lconcentrations.shape[1]) asym = np.rint(1600 * (1 - (sumInner / sumOuter))).astype(int) # For testing of lipid consentrations #np.save("concentrations_asym.npy", asym) if (lInner.min() < 0 or lOuter.min() < 0): error = "---> Negative lipid consentrations found " LOGGER.error(error) raise ValueError(error) if (asym > 500 or asym < -500): error = "---> Bilayer asymmetry is all to high asym = " + str(asym) LOGGER.error(error) raise ValueError(error) # @WARNING this should not be here - remove after C3 - a temp fix to not build patches with more than 4 protein - used in last part of C3 if (len(p.protein_ids) > 4): error = "---> To many protein beads in patch - stop build. Current p.protein_ids count = " + str(len(p.protein_ids)) LOGGER.error(error) raise ValueError(error) # @TODO change this to based on 64 lipids per subpatch (to explore rounding) # @TODO depdning on variations in cons from PF maybe to "proper" pre rounding here ??? #old_lOuter = np.rint(64 * lOuter / sumOuter).astype(int) #old_lInner = np.rint(64 * lInner / sumInner).astype(int) # Compute a probablistic construction of the particles in a cell. # We use a cumlative distribution function to # TODO: We should really assert or something here to verify that the # inner and outer leaflets are the same size. # Iterate through each grid point and convert to the number of particles # based on the probability. for i in range(0, lInner.shape[0]): for j in range(0, lInner.shape[1]): lInner[i, j] = generate_probabilistic_patch(lInner[i, j], 64) lInner = lInner.astype(int) for i in range(0, lOuter.shape[0]): for j in range(0, lOuter.shape[1]): lOuter[i, j] = generate_probabilistic_patch(lOuter[i, j], 64) lOuter = lOuter.astype(int) # @TODO Use this to explor lipid consetnrations - excel magic ( a 0 values will be left out of the list but 1/1000 is ok) # not needed for production and makes other consentrations a little strange # lOuter[lOuter == 0] = 1 # lInner[lInner == 0] = 1 # For testing of lipid consentrations # @TODO add this to a extra debug / analysis flag #saveCons = np.zeros(lconcentrations.shape) #saveCons[:, :, 0:8] = lInner #saveCons[:, :, 8:14] = lOuter #np.save("concentrations_macro_int_"+simnum+".npy", saveCons) # Convet into the right shape (adding empty lipids as 0 and chansing 5x5 to 25) # lipidUpperRatioArray = [[243, 0, 121, 20, 61, 242, 0, 0, 313]] # lipidLowerRatioArray = [[0, 139, 75, 54, 161, 108, 161, 22, 280]] lipidUpperRatioArray = np.insert(np.insert(np.insert(lOuter, 1, 0, axis=2), 6, 0, axis=2), 7, 0, axis=2) lipidLowerRatioArray = np.insert(lInner, 0, 0, axis=2) lipidUpperRatioArray = np.reshape(lipidUpperRatioArray, (lconcentrations.shape[0]*lconcentrations.shape[1], len(lipidTypes))) lipidLowerRatioArray = np.reshape(lipidLowerRatioArray, (lconcentrations.shape[0]*lconcentrations.shape[1], len(lipidTypes))) # Save full lipid type asignmet patch for insane to read lipid_types_inner = ["POPC", "PAPC", "POPE", "DIPE", "DPSM", "PAPS", "PAP6", "CHOL"] lipid_types_outer = ["POPX", "PAPC", "POPE", "DIPE", "DPSM", "CHOL"] #p.concentrations.shape # (37, 37, 14) (for new patches) #lconcentrations_full = pdcglob.reinterp(37, 37, 14, p.concentrations, 40, 40) # To fit protein placement we need to use the transpose of x,y columns lconcentrations_full = pdcglob.reinterp(37, 37, 14, p.concentrations.transpose((1,0,2)), 40, 40) lInner = lconcentrations_full[:, :, 0:8] lOuter = lconcentrations_full[:, :, 8:14] sumOuter = np.sum(lOuter) / (lconcentrations_full.shape[0]*lconcentrations_full.shape[1]) sumInner = np.sum(lInner) / (lconcentrations_full.shape[0]*lconcentrations_full.shape[1]) asym_full = np.rint(1600 * (1 - (sumInner / sumOuter))).astype(int) LOGGER.info("Both asym(s) shoudl be the same asym {} and asym_full {}".format(asym, asym_full)) lipid_counts_inner = pdcglob.pdcglob(40, 40, 8, lInner) lipid_counts_outer = pdcglob.pdcglob(40, 40, 6, lOuter) lipid_counts_file = "lipid_counts.npz" np.savez_compressed(lipid_counts_file, lipid_counts_inner = lipid_counts_inner, lipid_counts_outer = lipid_counts_outer, asym = asym_full, lipid_types_inner = lipid_types_inner, lipid_types_outer = lipid_types_outer) LOGGER.info("Full patch lipid type assignment saved to {}, asym {}, shape".format(lipid_counts_file, asym_full, lconcentrations_full.shape)) # Convert protein bead list to protein "unit" list - where each unit is inserted a one piece later in the pipeline # For each protein unit - get inital structure, insertion properties (location, angle etc), and charge LOGGER.info("Protein bead ids = {}".format(p.protein_ids)) LOGGER.info("Protein bead states = {}".format(p.protein_states)) LOGGER.info("Protein bead pos. = {}".format(p.protein_positions)) LOGGER.info("Protein types = {}".format(p.type_c3())) LOGGER.info("Protein complex ids = {}".format(p.complex_ids)) #LOGGER.info("Protein comple dis. = {}".format(p.complex_dists)) # removed from storage! # Initial shift not needed anymore as first beads is at center pixel (not 0,0 but within 1/2 pixel of that) # posArray = p.protein_positions - p.protein_positions[0] # Center at 0,0 # Shift from box center at 0,0 to box lower/left edge at 0,0 as used in the CG simulations posArray = [] if len(p.protein_positions) > 0: box_dim = 31.0 # @WARNING hared coded, also in particesim.py half_box_dim = box_dim / 2.0 # Get box center to move protein 0,0 to box edge posArray = p.protein_positions # First bead in the patche should be RAS and now centerd (close to 0,0) posArray = posArray + [half_box_dim, half_box_dim] if (posArray.max() > 31 or posArray.min() < 0): error = "---> Protein posisions are outside the CG box size; pos are: " + np.array2string(krasPosArray) LOGGER.error(error) raise ValueError(error) LOGGER.info("Protein bead pos ce = {}".format(posArray)) # Change from RAS4B to RAS4A for C4 hearling run (code below in if for both RAS-only and RAS-RAF) # If False this will be a normal 4B setup if True change to 4A c4_healing_convert_to_4A = True # @TODO fix this? this is an evil hack to get the protein unites form current complexes - Harsh make RAS also complex? # works as we only have x2 types of proteins proteinUnits = [] proteinUnitCount = len(p.protein_ids) - len(p.complex_ids) # Currently each complex has x2 beads LOGGER.info("Protein unit count = {}".format(proteinUnitCount)) remainig_ids = p.protein_ids for i in range(proteinUnitCount): cProteinDict = {} cProteinDict["id"] = i # check if current bead belongs to a complex if (len(p.complex_ids) > 0 and any(remainig_ids[0] in string for string in p.complex_ids)): cComplex = [] if c4_healing_convert_to_4A: # Convert 4B to 4A for C4 hearling runs # Now we are adding a RAS4A-RAF cProteinDict["type"] = "RAS4A-RAF" for x in p.complex_ids: if remainig_ids[0] in x: cComplex = x break ras_bead_id = cComplex[0] raf_bead_id = cComplex[1] # Flip beads if RAS/RAF bead order was reverse if "RAF" in p.protein_states[np.where(p.protein_ids == ras_bead_id)][0]: ras_bead_id = cComplex[1] raf_bead_id = cComplex[0] cProteinDict["beads"] = cComplex ras_state = p.protein_states[np.where(p.protein_ids == ras_bead_id)][0] cProteinDict["state"] = ras_state[0] + ras_state[-1] cPosRAS = posArray[np.where(p.protein_ids == ras_bead_id)][0] cPosRAF = posArray[np.where(p.protein_ids == raf_bead_id)][0] deltaPost = cPosRAF - cPosRAS rafAngle = math.degrees(math.atan2(deltaPost[1], deltaPost[0])) % 360 # atan2 returns in radians -pi to pi and takes in y,x so flip, convert and set to 0-360 cProteinDict["angle"] = rafAngle LOGGER.info("RAF angle RAS.pos {} RAF.pos {}, delta.pos {}, angle {}".format(cPosRAS, cPosRAF, deltaPost, rafAngle)) # RAS-RAF is places usign RAS farnesyl x,y pos cProteinDict["position"] = cPosRAS assert cProteinDict["position"].shape[0] == 2 cProteinDict["structure"] = "N/A" #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_RAF_shift_updateitp.gro" #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_RAF_shift.gro" # @TODO change to N/A and - after we fix library for 4A - cProteinDict["structure"] = "N/A" cProteinDict["charge"] = -1 LOGGER.info("Protein unit {} converting 4B to 4A - adding RAS4A-RAF complex for bead ids {} full info: {}".format(i, cComplex, str(cProteinDict))) else: # Now we are adding a RAS-RAF cProteinDict["type"] = "RAS-RAF" for x in p.complex_ids: if remainig_ids[0] in x: cComplex = x break ras_bead_id = cComplex[0] raf_bead_id = cComplex[1] # Flip beads if RAS/RAF bead order was reverse if "RAF" in p.protein_states[np.where(p.protein_ids == ras_bead_id)][0]: ras_bead_id = cComplex[1] raf_bead_id = cComplex[0] cProteinDict["beads"] = cComplex ras_state = p.protein_states[np.where(p.protein_ids == ras_bead_id)][0] cProteinDict["state"] = ras_state[0] + ras_state[-1] # @TODO get the angle orientation of RAS-RAF cPosRAS = posArray[np.where(p.protein_ids == ras_bead_id)][0] cPosRAF = posArray[np.where(p.protein_ids == raf_bead_id)][0] deltaPost = cPosRAF - cPosRAS rafAngle = math.degrees(math.atan2(deltaPost[1], deltaPost[0])) % 360 # atan2 returns in radians -pi to pi and takes in y,x so flip, convert and set to 0-360 cProteinDict["angle"] = rafAngle LOGGER.info("RAF angle RAS.pos {} RAF.pos {}, delta.pos {}, angle {}".format(cPosRAS, cPosRAF, deltaPost, rafAngle)) # RAS-RAF is places usign RAS farnesyl x,y pos cProteinDict["position"] = cPosRAS assert cProteinDict["position"].shape[0] == 2 # Now structure is assigned in placeprotein ''' # Numer of diffrent configurations saved for each RAS-RAF state # @WARNING hardcoded _0, _1, ... _librarySize-1 librarySize = 5000 cInt = randint(0, librarySize - 1) cStructureName = "{}/{}".format(Naming.dir_res('structures'), Naming.protein_structure(cProteinDict["type"], cProteinDict["state"], cInt)) LOGGER.info("Get {} structure: state {}, randint {}, filename {}".format(cProteinDict["type"], cProteinDict["state"], cInt, cStructureName)) if not os.path.isfile(cStructureName): error = "---> Protein structure file {} not found.".format(cStructureName) LOGGER.error(error) raise ValueError(error) cProteinDict["structure"] = cStructureName ''' cProteinDict["structure"] = "N/A" cProteinDict["charge"] = -1 LOGGER.info("Protein unit {} adding RAS-RAF complex for bead ids {} full info: {}".format(i, cComplex, str(cProteinDict))) # Remove both complex beads from list remainig_ids = remainig_ids[~np.isin(remainig_ids, cComplex)] else: if c4_healing_convert_to_4A: # Convert 4B to 4A for C4 hearling runs # Now we are adding a RAS4A-only cProteinDict["type"] = "RAS4A-ONLY" cProteinDict["beads"] = [remainig_ids[0]] cProteinDict["state"] = p.protein_states[np.where(p.protein_ids == remainig_ids[0])][0][3:] cProteinDict["position"] = posArray[np.where(p.protein_ids == remainig_ids[0])][0] assert cProteinDict["position"].shape[0] == 2 #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_shift.gro" #cProteinDict["structure"] = "/g/g90/helgi/mummi_resources/martini/test_RAS4A/KRAS4A_shift_updateitp.gro" cProteinDict["structure"] = "N/A" # @TODO change to N/A and - after we fix library for 4A - cProteinDict["structure"] = "N/A" cProteinDict["charge"] = -2 LOGGER.info("Protein unit {} converting 4B to 4A - adding RAS4A-ONLY for bead id {} full info: {}".format(i, remainig_ids[0], str(cProteinDict))) else: # Now we are adding a RAS-only cProteinDict["type"] = "RAS-ONLY" cProteinDict["beads"] = [remainig_ids[0]] cProteinDict["state"] = p.protein_states[np.where(p.protein_ids == remainig_ids[0])][0][3:] cProteinDict["position"] = posArray[np.where(p.protein_ids == remainig_ids[0])][0] assert cProteinDict["position"].shape[0] == 2 # Now structure is assigned in placeprotein ''' # Numer of diffrent configurations saved for each RAS-ONLY state # @WARNING hardcoded _0, _1, ... _librarySize-1 librarySize = 5000 cInt = randint(0, librarySize - 1) cStructureName = "{}/{}".format(Naming.dir_res('structures'), Naming.protein_structure(cProteinDict["type"], cProteinDict["state"], cInt)) LOGGER.info("Get {} structure: state {}, randint {}, filename {}".format(cProteinDict["type"], cProteinDict["state"], cInt, cStructureName)) if not os.path.isfile(cStructureName): error = "---> Protein structure file {} not found.".format(cStructureName) LOGGER.error(error) raise ValueError(error) cProteinDict["structure"] = cStructureName ''' cProteinDict["structure"] = "N/A" ''' @TODO, this # Temp add for RAS-ONLY seperate build using old patches cProteinDict["beads"] = 1 cProteinDict["state"] = "n/a" cProteinDict["position"] = posArray[0] assert cProteinDict["position"].shape[0] == 2 #/p/gpfs1/helgi/init_structures_ras/chris_fixed_10/sr2_pfpatch_000000343624_1000ns.gro cStructureName = "/p/gpfs1/helgi/init_structures_ras/chris_fixed_10/{}".format(simnum) LOGGER.info("Get {} structure: state {}, filename {}".format(cProteinDict["type"], cProteinDict["state"], cStructureName)) if not os.path.isfile(cStructureName): error = "---> Protein structure file {} not found.".format(cStructureName) LOGGER.error(error) raise ValueError(error) cProteinDict["structure"] = cStructureName # Temp end add for RAS-ONLY seperate build using old patches ''' cProteinDict["charge"] = -2 LOGGER.info("Protein unit {} adding RAS-ONLY for bead id {} full info: {}".format(i, remainig_ids[0], str(cProteinDict))) # remove current bead from list remainig_ids = remainig_ids[~np.isin(remainig_ids, remainig_ids[0])] #LOGGER.debug("Ids {}".format(remainig_ids)) proteinUnits.append(cProteinDict) # Print CG patch input LOGGER.info("CG_patch input:") LOGGER.info("macro patch summ_outer = {}".format(sumOuter)) LOGGER.info("macro patch summ_inner = {}".format(sumInner)) LOGGER.info("patch asym = {}".format(asym)) LOGGER.info("Lipids outer cons. = {}".format(str(lipidUpperRatioArray))) LOGGER.info("Lipids inner cons. = {}".format(str(lipidLowerRatioArray))) LOGGER.info("Protein units all = {}".format(proteinUnits)) ''' @TODO fix this and make it general for RAS and RAF # Select KRAS structures acording to states - pfPatch state to saved files + random value (e.g. 1-1000) # "KRAS-s"+str(cS)+"-r"+str(randint(1, librarySize))+".gro" #librarySize = 1000 # numer of diffrent configurations saved for each state #krasStructs = [] for i in range(len(p.rasStates)): cInt = randint(1, librarySize) cState = int(p.rasStates[i]) if (cState != 1 and cState != 2): error = "---> KRAS state {} is not supported: ".format(cState) LOGGER.error(error) raise ValueError(error) krasStruct = "{}/random_state{}.{}.KRAS.pbc.mol.gro".format(particlesim.dirKRASStructures, cState, cInt) LOGGER.info("Get KRAS structure file: KRAS # {}, in state {} and rand {}, name {}".format(i, cState, cInt, krasStruct)) krasStructs.append(krasStruct) # @TODO remove RAS-RAF temp test - NOW this is RAS only campain 1* structs krasStructs = [] #krasStruct = particlesim.dirKRASStructures + "/KRAS-04-protein-cg-M22-em-shift.gro" #krasStruct = "/g/g90/helgi/mummi_resources/kras-structures/RAS-RAF-01-fix-shift.gro" structNumber = simnum.split("_")[2] # get XXX number only RAS_RAF_XXX #structPath = "/p/gpfs1/helgi/init_structures_ras_craf/crafForHelgi_2020June20/sel_1000_weight_random" structPath = "/p/gpfs1/helgi/init_structures_ras/converterd" fileMatch = fnmatch.filter(os.listdir(structPath), '{}_*.gro'.format(structNumber)) krasStruct = "{}/{}".format(structPath, fileMatch[0]) for i in range(len(p.protein_states)): krasStructs.append(krasStruct) #print("s num {} file {}".format(structNumber, krasStruct)) ## @TODO remove this - it'shere Make this a none RAS patch: #krasStructs = [] #krasPosArray = [] #exit() ''' # Add simnum after name, if provided (shoud be empty in MuMMI runs) simName=p.id if len(simnum) > 0: simName += "_" + simnum patchSim = particlesim.ParticleSim(lipidTypes, lipidUpperRatioArray, lipidLowerRatioArray, [lconcentrations.shape[0], lconcentrations.shape[1]], proteinUnits, asym=-asym, simName=simName, lipidFullAsignment=True) return patchSim
35,859
def find_error_detect(image_path): """ 给一张图片,判断是否检查正确,错误则保存下来。 :param image_path: :return: """ save_path = './ctpn_detect_error.txt' image = cv2.imread(image_path) # 传输给服务器的数据 data = {'fname': image_path, 'img_str': _img_to_str_base64(image)} # test by EAST mode res_east_detect = c_det_east.detect(data, 0.8, False) # Debug mode, test image is need labeled? bboxdict_east = res_east_detect['data'] boxes_east = list() for idx, inst in enumerate(bboxdict_east): x0, y0, x1, y1 = int(inst['x0']), int(inst['y0']), int(inst['x2']), int(inst['y2']) boxes_east.append([x0, y0, x1, y1]) # test by CTPN mode res_frcnn_detect = c_det_frcnn.detect(data) # bboxdict_frcnn = res_frcnn_detect['data']['bbox_list'] boxes_ctpn = [_['bbox'] for _ in bboxdict_frcnn] # (n[4]) list_rec_ctpn = [rectangle(_[0], _[1], _[2], _[3]) for _ in boxes_ctpn] list_rec_east = [rectangle(_[0], _[1], _[2], _[3]) for _ in boxes_east] list_rec_onlyeast = list() for east_idx, one_east_rec in enumerate(list_rec_east): flag = 0 for ctpn_idx, one_ctpn_rec in enumerate(list_rec_ctpn): overlap_area, iou_width = iou_area(one_east_rec, one_ctpn_rec) if overlap_area*1./one_east_rec.area > 0.8 or overlap_area*1./one_ctpn_rec.area > 0.8: # 重叠面积到达两者其中一个的0.8以上,认为这两个框重合 flag = 1 break else: continue # 找晚所有的ctpn,都没很重合的,则这个east框可能是比ctpn多的 if not flag: list_rec_onlyeast.append(one_east_rec) detect_right = 1 # 得到了east比ctpn多的框,判断这个框是否处于最上面被ctpn滤掉的框,如果不是,说明ctpn检测错误 for east_rec in list_rec_onlyeast: if east_rec.y0 <= 2 or east_rec.y0 > image.shape[0]-60: continue else: detect_right = 0 break if not detect_right: # 如果检查错误,则保存图片路径,返回错误 with open(save_path, 'a') as f: f.write(image_path) return detect_right
35,860
def MPS_SimRemoveRule(rule_id: int) -> None: """Removes a simulation rule Parameters ---------- rule_id : int Rule identifier """ _check_limits(c_uint32, rule_id, 'rule_id') CTS3Exception._check_error(_MPuLib.MPS_SimRemoveRule( c_uint8(0), c_uint32(0), c_uint32(rule_id)))
35,861
def appendItem(): """Includes product into invoice and redirect""" app.logger.debug('This is appendItem to PO process') if request.method == 'POST': (prod_properties, check_up) = checkProduct(request.form) if check_up: appendProduct(prod_properties, session['userID']) session['active_tab'] = 'p_agg' return redirect(url_for('index'))
35,862
def gzip_bytes(bytes_obj): """byte: Compress a string as gzip in memory. """ if isinstance(bytes_obj, (str,)): bytes_obj = bytes_obj.encode() out_ = io.BytesIO() with gzip.GzipFile(fileobj=out_, mode='w') as fo: fo.write(bytes_obj) return out_
35,863
def ones_like(other_ary): """ Create a PitchArray with all entry equal 1, whose shape and dtype is the same as other_ary """ result = PitchArray(other_ary.shape, other_ary.dtype) result.fill(1) return result
35,864
def inference_multiview(views, n_classes, keep_prob): """ views: N x V x W x H x C tensor """ n_views = views.get_shape().as_list()[1] # transpose views : (NxVxWxHxC) -> (VxNxWxHxC) views = tf.transpose(views, perm=[1, 0, 2, 3, 4]) view_pool = [] for i in xrange(n_views): # set reuse True for i > 0, for weight-sharing reuse = (i != 0) view = tf.gather(views, i) # NxWxHxC conv1 = _conv('conv1', view, [11, 11, 3, 96], [1, 4, 4, 1], 'VALID', reuse=reuse) lrn1 = None pool1 = _maxpool('pool1', conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv2 = _conv('conv2', pool1, [5, 5, 96, 256], group=2, reuse=reuse) lrn2 = None pool2 = _maxpool('pool2', conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv3 = _conv('conv3', pool2, [3, 3, 256, 384], reuse=reuse) conv4 = _conv('conv4', conv3, [3, 3, 384, 384], group=2, reuse=reuse) conv5 = _conv('conv5', conv4, [3, 3, 384, 256], group=2, reuse=reuse) pool5 = _maxpool('pool5', conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') dim = np.prod(pool5.get_shape().as_list()[1:]) reshape = tf.reshape(pool5, [-1, dim]) view_pool.append(reshape) pool5_vp = _view_pool(view_pool, 'pool5_vp') print('pool5_vp', pool5_vp.get_shape().as_list()) fc6 = _fc('fc6', pool5_vp, 4096, dropout=keep_prob) fc7 = _fc('fc7', fc6, 4096, dropout=keep_prob) fc8 = _fc('fc8', fc7, n_classes) return fc8
35,865
def trueReturn(data, msg): """ 操作成功结果 """ result = { "status": True, "data": data, "msg": msg } return JSONResponse(content=result)
35,866
def active_power_from_waveform(t, V, I, sampling_rate, ts): """ :param t: time vector (numpy) :param V: voltage vector (numpy) :param I: current vector (numpy) :param sampling_rate - sampling rate (int) :param ts - test script with logging capabilities :return: : avg_P - Average active power """ pass
35,867
def parse_slurm_times(job_id: str, path: Path = Path.cwd()) -> float: """Performs the parsing of the file slurm-{job_id}.out by returning in milliseconds the time measured by Slurm. Args: out_file (str): The job slurm output file path to parse. path (Path): The path where to look for the slurm output. Defaults to the current working directory. Returns: float: The time elapsed by the application, in milliseconds. """ real_time = None out_file = path / f"slurm-{job_id}.out" try: with open(out_file, "r") as file: lines = file.readlines() for line in lines: if line.startswith("real"): time = re.split("\t", line)[-1].strip() real_time = parse_milliseconds(time) if real_time: return real_time raise ValueError( "Could not parse time of slurm output," f"content set to {real_time} !" ) except FileNotFoundError: raise FileNotFoundError("Slurm output was not generated.")
35,868
def bprl(positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor: """ Bayesian Personalized Ranking Loss https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf """ dist = positive - negative return -F.logsigmoid(dist).mean()
35,869
def select2_js_url(): """ Return the full url to the Select2 JavaScript library Default: ``None`` # Example {% select2_js_url %} """ return sl2.select2_js_url()
35,870
def _SharedSuffix(pattern1, pattern2): """Returns the shared suffix of two patterns.""" return _SharedPrefix(pattern1[::-1], pattern2[::-1])[::-1]
35,871
def run(args): """Handle credstash script.""" parser = argparse.ArgumentParser( description=("Modify Home Assistant secrets in credstash." "Use the secrets in configuration files with: " "!secret <name>")) parser.add_argument( '--script', choices=['credstash']) parser.add_argument( 'action', choices=['get', 'put', 'del', 'list'], help="Get, put or delete a secret, or list all available secrets") parser.add_argument( 'name', help="Name of the secret", nargs='?', default=None) parser.add_argument( 'value', help="The value to save when putting a secret", nargs='?', default=None) # pylint: disable=import-error, no-member import credstash args = parser.parse_args(args) table = _SECRET_NAMESPACE try: credstash.listSecrets(table=table) except Exception: # pylint: disable=broad-except credstash.createDdbTable(table=table) if args.action == 'list': secrets = [i['name'] for i in credstash.listSecrets(table=table)] deduped_secrets = sorted(set(secrets)) print('Saved secrets:') for secret in deduped_secrets: print(secret) return 0 if args.name is None: parser.print_help() return 1 if args.action == 'put': if args.value: the_secret = args.value else: the_secret = getpass.getpass('Please enter the secret for {}: ' .format(args.name)) current_version = credstash.getHighestVersion(args.name, table=table) credstash.putSecret(args.name, the_secret, version=int(current_version) + 1, table=table) print('Secret {} put successfully'.format(args.name)) elif args.action == 'get': the_secret = credstash.getSecret(args.name, table=table) if the_secret is None: print('Secret {} not found'.format(args.name)) else: print('Secret {}={}'.format(args.name, the_secret)) elif args.action == 'del': credstash.deleteSecrets(args.name, table=table) print('Deleted secret {}'.format(args.name))
35,872
def cromwell_version(host): """Return the version of this Cromwell server""" client = CromwellClient(host) data = call_client_method(client.version) click.echo(data)
35,873
def test_transform_section_data() -> None: """ Test transform_section_data """ expected = CON333_LEC2001 actual = a2_part4.transform_section_data(CON333_LEC2001_RAW) assert actual == expected
35,874
def export_urls(urls, file_path): """[summary] Arguments: urls {[list]} -- [extracted urls] file_path {[str]} -- [result text file path] """ with open(file_path.replace(".txt", "_links.txt"), "w") as f: f.write("\n".join(urls))
35,875
def handle_request_files_upload(request): """ Handle request.FILES if len(request.FILES) == 1. Returns tuple(upload, filename, is_raw, mime_type) where upload is file itself. """ # FILES is a dictionary in Django but Ajax Upload gives the uploaded file # an ID based on a random number, so it cannot be guessed here in the code. # Rather than editing Ajax Upload to pass the ID in the querystring, # note that each upload is a separate request so FILES should only # have one entry. # Thus, we can just grab the first (and only) value in the dict. is_raw = False upload = list(request.FILES.values())[0] filename = upload.name _, iext = os.path.splitext(filename) mime_type = upload.content_type.lower() if iext not in mimetypes.guess_all_extensions(mime_type): msg = "MIME-Type '{mimetype}' does not correspond to file extension of {filename}." raise UploadException(msg.format(mimetype=mime_type, filename=filename)) return upload, filename, is_raw, mime_type
35,876
def vgg16(num_class): """VGG 16-layer model (configuration "D") with batch normalization """ model = VGG(make_layers(cfg['D'], batch_norm=True), num_classes=num_class) return model
35,877
def setup_logging( config_path="logging.yaml", config_path_env="LOG_CFG_PATH" ): """Setup the python logger Args: config_path (str): Path to a default config file config_path_env (str): Env variable to specify alternate logging config """ config_override = os.getenv(config_path_env, None) config_path = config_path if not config_override else config_override if os.path.exists(config_path): import yaml with open(config_path, "rt") as yaml_file: config = yaml.load(yaml_file.read()) logging.config.dictConfig(config) else: logging.basicConfig(**DEFAULT_LOGGING_CONFIG)
35,878
def test_eval_dlM_dm_02(): """ Tests evaluation of derivative of log M w.r.t. mu for k = 0, l = 2. """ (N, U) = (6, 5) C = fcdiff.util.N_to_C(N) norm = rand_prob((C, U)) mix = rand_prob((C, U)) mu = 0.14 sigma = 0.02 epsilon = 0.07 eta = 0.29 act_dlM_dm = fcdiff.fit._eval_dlM_dm(norm, mix, mu, sigma, eta, epsilon, 0, 2) dlN_dm = fcdiff.fit._eval_dlN_dm(norm, mu, sigma) eps = eta * epsilon + (1 - eta) * (1 - epsilon) exp_dlM_dm = 0.5 * (1 - eps) * dlN_dm / mix nptest.assert_allclose(act_dlM_dm, exp_dlM_dm)
35,879
def __update_dict(orig, update): """Deep update of a dictionary For each entry (k, v) in update such that both orig[k] and v are dictionaries, orig[k] is recurisvely updated to v. For all other entries (k, v), orig[k] is set to v. """ for (key, value) in update.items(): if (key in orig and isinstance(value, collections.abc.Mapping) and isinstance(orig[key], collections.abc.Mapping)): __update_dict(orig[key], value) else: orig[key] = value
35,880
def EMA(moving_average_model: nn.Module, current_model: nn.Module, beta: float): """Exponential Moving Average""" for c_params, ma_params in zip(current_model.parameters(), moving_average_model.parameters()): ma_weight, c_weight = ma_params.data, c_params.data ma_params.data = beta*ma_weight + (1-beta)*c_weight
35,881
def IFS(*args) -> Function: """ Evaluates multiple conditions and returns a value that corresponds to the first true condition. Learn more: https//support.google.com/docs/answer/7014145 """ return Function("IFS", args)
35,882
def idwt_joined_(w, rec_lo, rec_hi, mode): """Computes single level discrete wavelet reconstruction """ n = len(w) m = n // 2 ca = w[:m] cd = w[m:] x = idwt_(ca, cd, rec_lo, rec_hi, mode) return x
35,883
def _get_pattern_nts(rule): """ Return a list of NT names present in given rule. """ nt_names = [] for bt in rule.ipattern.bits: if bt.is_nonterminal(): nt_name = bt.nonterminal_name() nt_names.append(nt_name) return nt_names
35,884
def collision_time(results_dir): """Calculate the time of the collision.""" out_file = results_dir + '/collision_time.txt' if os.path.isfile(out_file): return print("Calculating collision time in directory " + results_dir) import yt # Load all the output files in the output directory. # Search them one by one (in reverse) until we find # the time where the density threshold is crossed # at the center of the simulation. density_threshold = 1.0e2 plots = wdmerger.get_plotfiles(results_dir, prefix = 'smallplt') plots.reverse() if (plots == []) or (plots == None): return for i, plot in enumerate(plots): ds = yt.load(results_dir + '/output/' + plot) # Get a region that is only one coarse zone-width wide dx = ds.domain_width / ds.domain_dimensions sph = ds.sphere([0.0, 0.0, 0.0], max(dx)) # Find the maximum density in that region rho_max = sph.max("density") if rho_max < density_threshold: idx = i - 1 break ds = yt.load(results_dir + '/output/' + plots[idx]) file = open(out_file, 'w') file.write(str(ds.current_time.v)) file.close()
35,885
async def on_message(msg): """Ignore messages not sent via DM or in the bot's designated channel.""" if msg.guild is None or msg.channel.id == ANNOUNCEMENTS_CHANNEL_ID: await bot.process_commands(msg)
35,886
def do_extend(cs, args): """Increases the size of an existing share.""" share = _find_share(cs, args.share) cs.shares.extend(share, args.new_size)
35,887
def stopping_player(bot, state): """ A Player that just stands still. """ return bot.position
35,888
def check_continue(transformer: transformer_class.Transformer, check_md: dict, transformer_md: dict, full_md: dict) -> tuple: """Checks if conditions are right for continuing processing Arguments: transformer: instance of transformer class Return: Returns a tuple containining the return code for continuing or not, and an error message if there's an error """ have_file = False for one_file in check_md['list_files'](): if one_file.endswith('.bin'): have_file = True break return (0) if have_file else (-1, "Missing raw image bin file from list of files")
35,889
def getFlatten(listToFlat): """ :param listToFlat: anything ,preferably list of strings :return: flatten list (list of strings) #sacred """ preSelect=mc.ls(sl=True,fl=True) mc.select(cl=1) mc.select(listToFlat) flatten = mc.ls(sl=True, fl=True) mc.select(preSelect) return flatten
35,890
def can_exit_room(state: State, slot: int) -> bool: """ Return True if amphipod can escape a room because all amphipods are in their place Not exhaustive! If there are amphipods above it, it may still be stuck """ amphipod = state[slot] assert amphipod != EMPTY_SLOT room = slot // 4 bottom_amphipod = state[room * 4 + 3] if bottom_amphipod != room: return True assert bottom_amphipod != EMPTY_SLOT for i in range(room * 4 + 2, room * 4 - 1, -1): if state[i] == EMPTY_SLOT: return False if state[i] != bottom_amphipod: return True return False
35,891
def square_matrix(square): """ This function will calculate the value x (i.e blurred pixel value) for each 3*3 blur image. """ tot_sum = 0 # Calculate sum of all teh pixels in a 3*3 matrix for i in range(3): for j in range(3): tot_sum += square[i][j] return tot_sum//9
35,892
def create_continuous_plots(df: pd.DataFrame, reporting_features: List[str], model_config: Dict[str, Any]) -> None: """Create line plot to show how target varies according to feature. No returns; saves assets to model folder. Args: df: Full, feature rich dataframe, must contain config specified target, and numeric feature columns specified by `reporting_features` reporting_features: List of features to produce individual plots model_config: Loaded model experiment config """ df[reporting_features] = df[reporting_features].astype(float) for feature in reporting_features: plt.figure() plot_fig, plot_ax = plt.subplots() data = df.groupby(feature)[model_config["target"]].mean() title = f"Continuous plot of {model_config['target']} and {feature}" sns.lineplot(data=data, ax=plot_ax).set_title(title) # in case <na> comes in from dummy variables feature = feature.replace("<", "").replace(">", "") output_path = Path(utils.get_model_path(model_config), f"plots_continuous_{feature}.png") logging.info(f"Saving to: {output_path}") plot_fig.savefig(output_path) plt.close("all")
35,893
def before_after_text(join_set, index, interval_list): """ Extracts any preceeding or following markup to be joined to an interval's text. """ before_text, after_text = '', '' # Checking if we have some preceeding or following markup to join with. if join_set: if index > 0: before_text = ''.join(character for character in interval_list[index - 1][2] if character in join_set) if index < len(interval_list) - 1: after_text = ''.join(character for character in interval_list[index + 1][2] if character in join_set) return before_text, after_text
35,894
def validate_kubernetes_tag(config): """ Checks the presence of Kubernetes tag """ logger.info("checking kubernetes tag") validate_dict_data(config, consts.K8S_KEY)
35,895
def run_unpack(target_archive: str, target_folder: str, *target_files: str): """ creates target_folder where it unpacks listed target_files from target_archive then exits program :param target_archive: the path to an already created archive :param target_folder: the path to the unpack location directory; should not already exist :param target_files: a list of files inside target_archive :return: void """ prepare_archive_unpacking(target_archive, target_folder) metadata_length, files = get_archive_content(target_archive) possible_files = [x.replace("\\", "/") for x, y in files] for i in target_files: if i not in possible_files: raise CustomError(f"{i} is not in:{', '.join(possible_files)}]") with open(target_archive, "rb") as fp: fp.read(metadata_length) for file, size in files: if file.replace("\\", "/") in target_files: unpack_file(file=target_folder + "/" + file, size=int(size), fp=fp) else: empty_read_archive(size=int(size), fp=fp) pass
35,896
def calculate_platform_symmetries(cfg): """Calculate the Automorphism Group of a Platform Graph This task expects three hydra parameters to be available. **Hydra Parameters**: * **platform:** the input platform. The task expects a configuration dict that can be instantiated to a :class:`~mocasin.common.platform.Platform` object. * **out:** the output file (extension will be added) * **mpsym:** a boolean value selecting mpsym as backend (and JSON as output) Otherwise it outputs plaintext from the python implementation. """ platform = hydra.utils.instantiate(cfg["platform"]) log.info("start converting platform to edge graph for automorphisms.") plat_graph = platform.to_adjacency_dict(include_proc_type_labels=True) use_mpsym = cfg["mpsym"] ( adjacency_dict, num_vertices, coloring, nodes_correspondence, ) = aut.to_labeled_edge_graph(plat_graph) log.info("done converting platform to edge graph for automorphisms.") # print(nodes_correspondence) # print(coloring) # print(len(coloring)) # print(str(edge_graph)) log.info( "start calculating the automorphism group of the (edge) graph with " + str(num_vertices) + " nodes using nauty." ) nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict, coloring) autgrp_edges = pynauty.autgrp(nautygraph) log.info( "done calculating the automorphism group of the (edge) graph using nauty." ) log.info("start coverting automorhpism of edges to nodes.") autgrp, new_nodes_correspondence = aut.edge_to_node_autgrp( autgrp_edges[0], nodes_correspondence ) permutations_lists = map(aut.list_to_tuple_permutation, autgrp) # permutations = map(perm.Permutation,permutations_lists) # permgrp = perm.PermutationGroup(list(permutations)) # print(permgrp.point_orbit(0)) log.info("done coverting automorhpism of edges to nodes.") log.info("start writing to file.") if use_mpsym: try: mpsym except NameError: log.error( "Configured for mpsym output but could not load mpsym. Fallback to python implementation" ) use_mpsym = False if use_mpsym: out_filename = str(cfg["out_file"]) mpsym_autgrp = mpsym.ArchGraphAutomorphisms( [mpsym.Perm(g) for g in autgrp] ) json_out = mpsym_autgrp.to_json() with open(out_filename, "w") as f: f.write(json_out) else: out_filename = cfg["out_file"] with open(out_filename, "w") as f: f.write("Platform Graph:") f.write(str(plat_graph)) # f.write("Edge Group with ~" + str(autgrp_edges[1]) + " * 10^" + str(autgrp_edges[2]) + " elements.\n") f.write("Symmetry group generators:") f.write(str(list(permutations_lists))) f.write("\nCorrespondence:") f.write(str(new_nodes_correspondence)) log.info("done writing to file.")
35,897
def load_data(outputpath): """Load the numpy data as stored in directory outputpath. Parameters ---------- outputpath : str directory where the numpy files are stored Returns ------- x_train y_train_binary x_val y_val_binary x_test y_test_binary """ ext = '.npy' X_train = np.load(os.path.join(outputpath, 'X_train' + ext)) y_train = np.load(os.path.join(outputpath, 'y_train' + ext)) X_val = np.load(os.path.join(outputpath, 'X_val' + ext)) y_val = np.load(os.path.join(outputpath, 'y_val' + ext)) X_test = np.load(os.path.join(outputpath, 'X_test' + ext)) y_test = np.load(os.path.join(outputpath, 'y_test' + ext)) return X_train, y_train, X_val, y_val, X_test, y_test
35,898
def create_server_ssl(addr, port, backlog): """ """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((addr, port)) server.listen(backlog) context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_default_certs() wrap = context.wrap_socket(server, do_handshake_on_connect=False, server_side=True) sserver = SuperSocketSSL(wrap) ServerSSL(sserver) sserver.add_map(SSL_ACCEPT, handles_wrapper) return sserver
35,899