content
stringlengths
22
815k
id
int64
0
4.91M
def calculate_mean_probas(time_ser, model): """Calculate the metric to evaluate based on average probabilities Args: time_ser (np.ndarray): dynophore time series model (HMM): Fitted HMM Returns: np.float: Probability of prediting the given time series based on the fitted model Model """ probas = model.predict_proba(time_ser) states = model.predict(time_ser) prob_ser = np.zeros(probas.shape) for i in range(len(states)): prob_ser[i, states[i]] = probas[i, states[i]] return np.mean(np.mean(prob_ser, axis=0))
29,300
def baxter_callback(data): """! Computes the configuration of baxter's arm whenever the data are available, then extracts the rotation matrix from 0 to e.e and the position of the e.e. with respect to 0. It also computes the jacobian matrix. In case all the other callbacks have been called then it computes the velocity of the e.e. with respect to 0 and the end it calls the main_callback. @param data: coming from baxter node which provides a JointState message. """ global axis_vect, ini_bax, q, R0e_kmin1, R0e_ini, Jkmin1, x_0e_kmin1B, x_0e_kmin1, v_0e_kmin1B, key_bax, key_dot, key_smart, flag_bax, flag_dot if (key == 1 or ini_bax == 0): #start = time.time() #################################################### # Read from publisher of v-rep the q configuration. #################################################### if ini_bax != 0: flag_bax = data.effort[0] # configuration at time kmin1 q = np.array(data.velocity) #print("~~~~~") #print(int(flag_bax)) #print(int(flag_dot)) #print(int(flag_bax) == int(flag_dot)) #print("~~~~~") if int(flag_bax) == int(flag_dot): # relative T's with the configuration passed. T_rel_kmin1 = t.transformations(T_dh, q, info) #print(T_rel_kmin1) # absolute T's T_abs_kmin1 = t.abs_trans(T_rel_kmin1) # geometric vectors needed to compute jacobian. geom = j.geometric_vectors(T_abs_kmin1) ############################### # New part # axes of joints projected on zero i_j = j.i_j(T_abs_kmin1) # vector containing some axes. axis_vect = j.axis_vector(i_j[0], i_j[1], geom[0]) ############################### # jacobian computation Jkmin1 = j.jacob(geom[0], geom[1], n_joints, info) # Transformation matrix from 0 to end effector at time k T0e_kmin1 = T_abs_kmin1[7] ## print("T0e, ini: ") ## print(T0e_kmin1) # end effector position of baxter at time k for i in range(3): x_0e_kmin1B[i] = T0e_kmin1[i][3] # end effector orientation of baxter at time k. At time 0 i have the # orientation of zero with respect of inertial frame also. for i in range(3): for k in range(3): R0e_kmin1[i][k] = T0e_kmin1[i][k] #print("----") #print("first TOe:") #print(T0e_kmin1) if ini_bax == 0: #print("Init bax") #R0inert = R0e_kmin1 # Constant in time. #print(R0e_kmin1) R0e_ini = R0e_kmin1 # equal at starting configuration #x_0e_kmin1 = x_0e_kmin1B # Initially they are equal x_0e_kmin1 = np.array([[ 1.1759, -4.3562e-06, 0.1913]]).transpose() # Initially they are equa ini_bax = ini_bax + 1 key_bax = key_bax + 1 if (key_bax >= 1 and key_dot >= 1): x_dot = np.dot(Jkmin1, q_dot) for i in range(3): v_0e_kmin1B[i][0] = x_dot[i][0] if(key_smart >= 1): key_bax = 0 key_dot = 0 key_smart = 0 main_callback() #end = time.time() #print("Bax Frequency: " + str(1/(end-start)))
29,301
def compute_t(i, automata_list, target_events): """ Compute alphabet needed for processing L{automata_list}[i-1] in the sequential abstraction procedure. @param i: Number of the automaton in the L{automata_list} @type i: C{int} in range(1, len(automata_list)+1) @param automata_list: List of automata @type automata_list: C{list} of L{Automaton} @param target_events: List of events to preserve after abstraction @type target_events: C{set} of L{Event} @return: New alphabet for the next step in sequential abstraction @rtype: C{set} of L{Event} """ processed = set() for j in range(0, i): processed = processed.union(automata_list[j].alphabet) unprocessed = target_events.copy() for j in range(i, len(automata_list)): unprocessed = unprocessed.union(automata_list[j].alphabet) result = processed.intersection(unprocessed) processed.clear() unprocessed.clear() return result
29,302
def cleaner(f_stop): """ Clear the data from the data dictionary """ if len(data) > 0: print(f'LOG: Cleaned {len(data)} items!') data.clear() if not f_stop.is_set(): threading.Timer(5000, cleaner, [f_stop]).start()
29,303
def cal_min_sim(y): """Calculate the minimal value given multiple trajectories from different isomers""" y = y.copy() if len(y.shape) == 2: # add one more dimension if only two provided y = y[np.newaxis, :] n_sim, nT, nP = y.shape y_min_sim = np.min(y, axis = 0) return y_min_sim
29,304
async def create_rsa_key( hub, ctx, name, vault_url, key_ops=None, enabled=None, expires_on=None, not_before=None, tags=None, **kwargs, ): """ .. versionadded:: 2.0.0 Create a new RSA key or, if name is already in use, create a new version of the key. Requires the keys/create permission. Key properties can be specified as keyword arguments. :param name: The name of the new key. Key names can only contain alphanumeric characters and dashes. :param vault_url: The URL of the vault that the client will access. :param key_ops: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt', 'sign', 'unwrap_key', 'verify', 'wrap_key'. :param enabled: Whether the key is enabled for use. :param expires_on: When the key will expire, in UTC. This parameter must be a string representation of a Datetime object in ISO-8601 format. :param not_before: The time before which the key can not be used, in UTC. This parameter must be a string representation of a Datetime object in ISO-8601 format. :param tags: Application specific metadata in the form of key-value pairs. CLI Example: .. code-block:: bash azurerm.keyvault.key.create_rsa_key test_name test_vault """ result = {} kconn = await hub.exec.azurerm.keyvault.key.get_key_client(ctx, vault_url, **kwargs) try: key = kconn.create_rsa_key( name=name, key_operations=key_ops, enabled=enabled, expires_on=expires_on, not_before=not_before, tags=tags, ) result = _key_as_dict(key) except (KeyVaultErrorException, ValidationError, HttpResponseError) as exc: result = {"error": str(exc)} return result
29,305
def _parse_size_string(size): """ Parse a capacity string. Takes a string representing a capacity and returns the size in bytes, as an integer. Accepts strings such as "5", "5B", "5g", "5GB", " 5 GiB ", etc. Case insensitive. See `man virsh` for more details. :param size: The size string to parse. :returns: The number of bytes represented by `size`, as an integer. """ # Base values for units. BIN = 1024 DEC = 1000 POWERS = {"": 0, "k": 1, "m": 2, "g": 3, "t": 4} # If an integer is passed, treat it as a string without units. size = str(size).lower() regex = r"\s*(\d+)\s*([%s])?(i?b)?\s*$" % "".join(POWERS.keys()) match = re.compile(regex).match(size) if not match: msg = "The size string '%s' is not of a valid format." % size raise AnsibleFilterError(to_text(msg)) number = match.group(1) power = match.group(2) unit = match.group(3) if not power: power = "" if unit == "b": base = DEC else: base = BIN return int(number) * (base ** POWERS[power])
29,306
def connect(cfg=None, jar=None): """ Connect to MF using a token with authority to access the data collection :return: A new :py:class:`Session` Example:: >>> from MFQuery.MF import MF >>> cfg = "$HOME/aterm.cfg" >>> jar = "$HOME/aterm.jar" >>> wath = MF.connect(cfg,jar) # doctest: +SKIP >>> outputs = wath.query() # doctest: +SKIP """ # if user didn't pass configuration and jar file assume their in $HOME if cfg is None: cfg = os.environ.get('ATERMCFG', "$HOME/aterm.cfg") if jar is None: jar = os.environ.get('ATERMJAR', "$HOME/aterm.jar") session = Session(cfg, jar) return session
29,307
def main(): # pragma: no cover """ Main func """ parser = setup_argparser() args = parser.parse_args() log_level = "DEBUG" if args.verbose else "INFO" setup_logger(log_level) call_ibm_webhook(args)
29,308
def get_exp_lr(base_lr, xs, power=4e-10): """Get learning rates for each step.""" ys = [] for x in xs: ys.append(base_lr / np.exp(power*x**2)) return ys
29,309
def dashboard(): """ Main dashboard function. Run stats across all accounts. """ start = time.time() instance_count = 0 user_count = 0 sg_count = 0 elb_count = 0 aws_accounts = AwsAccounts() accounts = aws_accounts.all() pool = Pool(10) results = pool.map(get_account_stats, accounts) pool.close() pool.join() for acc_result in results: instance_count += acc_result['InstanceCount'] user_count += acc_result['UserCount'] sg_count += acc_result['SecurityGroupCount'] elb_count += acc_result['ELBCount'] end = time.time() result = dict( Time=(end - start), Summary=dict( AccountsCount=len(accounts), InstanceCount=instance_count, UserCount=user_count, SecurityGroupCount=sg_count, ELBCount=elb_count)) return result
29,310
def strip_new_line(str_json): """ Strip \n new line :param str_json: string :return: string """ str_json = str_json.replace('\n', '') # kill new line breaks caused by triple quoted raw strings return str_json
29,311
def rebuild_from_dat(inputDatfile, outputSessionName): """ Rebuilds a pymanip HDF5 file from the ASCII dat file. """ if not has_panda: print("Pandas is not available.") else: with inputDatfile.open() as in_f: data = pd.read_csv(in_f, sep=" ") liste_var = list(data.keys()) liste_var.remove("Time") MI = Session(outputSessionName, liste_var) for line in data.iterrows(): MI.log_addline(timestamp=line[1].Time, dict_caller=dict(line[1])) MI.Stop()
29,312
def fromAtoB(x1, y1, x2, y2, color='k', connectionstyle="arc3,rad=-0.4", shrinkA=10, shrinkB=10, arrowstyle="fancy", ax=None): """ Draws an arrow from point A=(x1,y1) to point B=(x2,y2) on the (optional) axis ``ax``. .. note:: See matplotlib documentation. """ if ax is None: return pl.annotate("", xy=(x2, y2), xycoords='data', xytext=(x1, y1), textcoords='data', arrowprops=dict( arrowstyle=arrowstyle, # linestyle="dashed", color=color, shrinkA=shrinkA, shrinkB=shrinkB, patchA=None, patchB=None, connectionstyle=connectionstyle), ) else: return ax.annotate("", xy=(x2, y2), xycoords='data', xytext=(x1, y1), textcoords='data', arrowprops=dict( arrowstyle=arrowstyle, # linestyle="dashed", color=color, shrinkA=shrinkA, shrinkB=shrinkB, patchA=None, patchB=None, connectionstyle=connectionstyle), )
29,313
def test_bandit(src_dir): """Run Bandit.""" bandit = plumbum.local["bandit"] with plumbum.local.cwd(PROJECT_ROOT_DIR): result = bandit("-ll", "-r", src_dir) if result: print("\nBandit:", result)
29,314
def exp(input_): """Wrapper of `torch.exp`. Parameters ---------- input_ : DTensor Input dense tensor. """ return torch.exp(input_._data)
29,315
def capsule_sdf(mesh_verts, mesh_normals, query_points, query_normals, caps_rad, caps_top, caps_bot, foreach_on_mesh): """ Find the SDF of query points to mesh verts Capsule SDF formulation from https://iquilezles.org/www/articles/distfunctions/distfunctions.htm :param mesh_verts: (batch, V, 3) :param mesh_normals: (batch, V, 3) :param query_points: (batch, Q, 3) :param caps_rad: scalar, radius of capsules :param caps_top: scalar, distance from mesh to top of capsule :param caps_bot: scalar, distance from mesh to bottom of capsule :param foreach_on_mesh: boolean, foreach point on mesh find closest query (V), or foreach query find closest mesh (Q) :return: normalized sdf + 1 (batch, V or Q) """ # TODO implement normal check? if foreach_on_mesh: # Foreach mesh vert, find closest query point knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(mesh_verts, query_points, K=1, return_nn=True) # TODO should attract capsule middle? capsule_tops = mesh_verts + mesh_normals * caps_top capsule_bots = mesh_verts + mesh_normals * caps_bot delta_top = nearest_pos[:, :, 0, :] - capsule_tops normal_dot = torch.sum(mesh_normals * batched_index_select(query_normals, 1, nearest_idx.squeeze(2)), dim=2) else: # Foreach query vert, find closest mesh point knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(query_points, mesh_verts, K=1, return_nn=True) # TODO should attract capsule middle? closest_mesh_verts = batched_index_select(mesh_verts, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3) closest_mesh_normals = batched_index_select(mesh_normals, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3) capsule_tops = closest_mesh_verts + closest_mesh_normals * caps_top # Coordinates of the top focii of the capsules (batch, V, 3) capsule_bots = closest_mesh_verts + closest_mesh_normals * caps_bot delta_top = query_points - capsule_tops normal_dot = torch.sum(query_normals * closest_mesh_normals, dim=2) bot_to_top = capsule_bots - capsule_tops # Vector from capsule bottom to top along_axis = torch.sum(delta_top * bot_to_top, dim=2) # Dot product top_to_bot_square = torch.sum(bot_to_top * bot_to_top, dim=2) h = torch.clamp(along_axis / top_to_bot_square, 0, 1) # Could avoid NaNs with offset in division here dist_to_axis = torch.norm(delta_top - bot_to_top * h.unsqueeze(2), dim=2) # Distance to capsule centerline return dist_to_axis / caps_rad, normal_dot
29,316
def test_just_single_point_plotting(): """ Testing this because this has caused problems since for a single point min == max """ x = [2.34] plot(x)
29,317
def get_lat_lon(fp, fs=FS): """ get lat lon values for concat dataset """ logger.info(f"{str(datetime.datetime.now())} : Retrieving lat lon") with xr.open_dataset(fs.open(fp)) as ds: lat, lon = ds["latitude"].values, ds["longitude"].values logger.info(f"{str(datetime.datetime.now())} : Retrieved lat lon") return lat, lon
29,318
def one_hot_encode(vec, vals=10): """ For use to one-hot encode the 10- possible labels """ n = len(vec) out = np.zeros((n, vals)) out[range(n), vec] = 1 return out
29,319
def COUNTA(*args) -> Function: """ Returns a count of the number of values in a dataset. Learn more: https//support.google.com/docs/answer/3093991 """ return Function("COUNTA", args)
29,320
def stop(): """ Syncs the database and then starts the development server. """ stop_django()
29,321
def decode_url_json_string(json_string): """ Load a string representing serialised json into :param json_string: :return: """ strings = json.loads(h.unescape(json_string), object_pairs_hook=parse_json_pairs) return strings
29,322
def syn_test_helper(): """Provides the SynapseTestHelper as a fixture per function.""" helper = SynapseTestHelper() yield helper helper.dispose()
29,323
def init_argparser(): """ Define and parse commandline arguments. """ # training settings parser = argparse.ArgumentParser(description="PyTorch MNIST Example") parser.add_argument("--experiment", type=str, help="Choose the experiment.") parser.add_argument( "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)", ) parser.add_argument( "--test-batch-size", type=int, metavar="N", help="input batch size for testing (default: same as --batch-size)", ) parser.add_argument( "--log-level", default="info", choices=["verbose", "info", "warning", "error", "debug"], help="Log level", ) parser.add_argument( "--result-dir", default="results", help="path to the result directory", metavar="DIR", ) parser.add_argument( "--reuse-base-dir", help="path to the an already existing base directory (e.g. to continue certain experiments)", metavar="DIR", ) parser.add_argument( "--epochs", type=int, default=10, metavar="N", help="number of epochs to train (default: 10)", ) parser.add_argument( "--lr", type=float, default=0.01, metavar="N", help="learning rate (default: 0.01)", ) parser.add_argument( "--cuda", action="store_true", default=False, help="Enable CUDA training" ) parser.add_argument( "--cuda-device-id", nargs="+", type=int, default=[0], help="Cuda device ids. E.g. [0,1,2]. Use -1 for all GPUs available and -2 for cpu only.", ) parser.add_argument( "--debug", action="store_true", default=False, help="Enable debugging." ) parser.add_argument( "--experiment-name", type=str, help="Set the experiment name", required=True ) parser.add_argument("--net", type=str, help="Define network", required=True) parser.add_argument( "--n-gaussians", type=int, default=3, metavar="N", help="number of possible independence combinations of gaussians", ) parser.add_argument( "--njobs", type=int, default=4, metavar="S", help="Number of threads (default: 4)", ) parser.add_argument( "--seed", type=int, default=1, metavar="S", help="random seed (default: 1)" ) parser.add_argument( "--tag", default="", type=str, help="Tag to identify runs in the result directory and tensorboard overviews", ) parser.add_argument( "--resnet-arch", default="resnet18", type=str, choices=["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"], help="Resnet architecture", ) parser.add_argument( "--log-interval", type=int, default=10, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument( "--dataset", type=str, choices=[ "iris-2d", "wine-2d", "diabetes", "audit", "banknotes", "ionosphere", "sonar", "wheat-2d", "synth-8-easy", "synth-64-easy", "synth-8-hard", "synth-64-hard", ], ) parser.add_argument( "--force-overfit", action="store_true", default=False, help="Force overfitting (set num train samples to 1000)", ) parser.add_argument( "--save-model", action="store_true", default=False, help="For Saving the current Model", ) parser.add_argument( "--l2", type=float, default=0.0, help="L2 weight decay parameter. (default: 0.0)", ) return parser # args = parser.parse_args() # ensure_dir(args.result_dir) # if args.debug: # args.epochs = 2 # if args.n_digits > args.n_labels: # raise Exception("Option --n-digits has to be <= --n-labels.") # return args
29,324
def generate(traj: pca3dvis.trajectory.ProjectedTrajectory, markers: typing.Tuple[typing.Tuple[np.ndarray, dict]], titles: typing.Tuple[str], outfolder: str, draft: bool = False, clusters: bool = True): """Generates a video and corresponding snapshots into the specified directory. If draft is true, this uses the draft settings. Otherwise, this uses high-quality production settings. For fine control over the settings it is recommended that you use the underlying pympanim functions. Args: traj (ProjectedTrajectory): the trajectory to plot markers (tuple[tuple[ndarray, dict]]): each titles (tuple[str]): one title per snapshot in the trajectory outfolder (str): where to save. must not already exist draft (bool): if true, lower quality settings are used clusters (bool): if clusters are detected and zoomed to """ tus.check( traj=(traj, pca3dvis.trajectory.ProjectedTrajectory), markers=(markers, (list, tuple)), titles=(titles, (list, tuple)), outfolder=(outfolder, str), draft=(draft, bool), clusters=(clusters, bool) ) for i, marker in enumerate(markers): tus.check(**{f'marker[{i}]': (marker, (list, tuple))}) if len(marker) != 2: raise ValueError( f'expected marker[{i}] is (ndarray, dict), got {marker}') mask, kwargs = marker tus.check_ndarrays(**{ f'marker[{i}][0]': (mask, (('samples', traj.num_samples),), 'bool') }) tus.check(**{ f'marker[{i}][1]': (kwargs, dict) }) tus.check_listlike(titles=(titles, str, traj.num_snapshots)) os.makedirs(outfolder) os.makedirs(os.path.join(outfolder, 'snapshots')) state = pca3dvis.state.ProjectedState(traj, markers) rend = pca3dvis.renderer.ProjectedRenderer( (19.2, 10.8) if not draft else (6.4, 4.8), 100 ) for i, title in enumerate(titles): filepath = os.path.join(outfolder, 'snapshots', f'snapshot_{i}') ext = '.png' if draft else '.pdf' transp = not draft state.set_snapshot_visible(i, True) state.title = title for rot in range(15, 375, 60 if draft else 30): state.rotation = (30, rot) fig = rend.render_mpl(state) fname = filepath + f'_rot{rot}' + ext fig.savefig(fname, transparent=transp, dpi=rend.dpi) plt.close(fig) pts = traj.snapshots[0].projected_samples zoom = pca3dvis.state.get_square_bounds_for(pts) my_scene = ( acts.FluentScene(scenes.SnapshotScene(0)) .join(scenes.FixedTitleScene(titles[0]), False) .join(scenes.RotationScene((30, 45), (30, 45 + 360)), False) .join(scenes.FixedZoomScene(zoom), False) .dilate(pytweening.easeInOutSine) .time_rescale_exact(12, 's') ) if clusters: _cluster_scene(my_scene, traj, 0, titles[0], draft) for snap_ind in range(1, traj.num_snapshots): snap = traj.snapshots[snap_ind] npts = snap.projected_samples nzoom = pca3dvis.state.get_square_bounds_for(npts) mzoom = pca3dvis.state.get_square_bounds_for_all((pts, npts)) ntitle = titles[snap_ind] ititle = titles[snap_ind - 1] + ' -> ' + ntitle if not np.allclose(zoom, mzoom): (my_scene.push(scenes.ZoomScene(zoom, mzoom)) .join(scenes.SnapshotScene(snap_ind - 1), False) .join(scenes.FixedTitleScene(ititle), False) .join(scenes.FixedRotationScene((30, 45)), False) .dilate(pympanim.easing.smoothstep) .time_rescale_exact(2, 's') .pop() ) (my_scene.push(scenes.InterpScene(snap_ind - 1, snap_ind)) .dilate(pytweening.easeInOutCirc) .join(scenes.FixedZoomScene(mzoom), False) .join(scenes.FixedTitleScene(ititle), False) .push(scenes.RotationScene((30, 45), (30, 45 + 360))) .dilate(pytweening.easeInOutSine) .dilate(pympanim.easing.squeeze, {'amt': 0.1}) .pop('join') .time_rescale_exact(6, 's') .pop() ) if not np.allclose(mzoom, nzoom): (my_scene.push(scenes.ZoomScene(mzoom, nzoom)) .join(scenes.SnapshotScene(snap_ind), False) .join(scenes.FixedTitleScene(ititle), False) .join(scenes.FixedRotationScene((30, 45)), False) .dilate(pympanim.easing.smoothstep) .time_rescale_exact(2, 's') .pop() ) (my_scene.push(scenes.SnapshotScene(snap_ind)) .join(scenes.FixedTitleScene(ntitle), False) .join(scenes.RotationScene((30, 45), (30, 45 + 360)), False) .join(scenes.FixedZoomScene(nzoom), False) .dilate(pytweening.easeInOutSine) .time_rescale_exact(10, 's') .pop() ) if clusters: _cluster_scene(my_scene, traj, snap_ind, ntitle, draft) pts = npts zoom = nzoom if draft: my_scene.time_rescale(5) pympanim.worker.produce( acts.Act(state, rend, [my_scene.build()]), 60 if not draft else 30, 100, -1, os.path.join(outfolder, 'video.mp4' if not draft else 'draft.mp4') )
29,325
def write_enum(enum, writer): """ Write class representing Avro enum schema :param schema.EnumSchema enum: :param TabbedWriter writer: :return: """ fullname = clean_fullname(enum.fullname) namespace, type_name = ns_.split_fullname(enum.fullname) writer.write('''\nclass {name}Class(object):'''.format(name=type_name)) with writer.indent(): writer.write('\n\n') writer.write('"""\n') writer.write(enum.doc or '') writer.write('\n') writer.write('"""\n\n') for field in enum.symbols: writer.write('{name} = "{name}"\n'.format(name=field))
29,326
def svn_wc_merge2(*args): """ svn_wc_merge2(enum svn_wc_merge_outcome_t merge_outcome, char left, char right, char merge_target, svn_wc_adm_access_t adm_access, char left_label, char right_label, char target_label, svn_boolean_t dry_run, char diff3_cmd, apr_array_header_t merge_options, apr_pool_t pool) -> svn_error_t """ return apply(_wc.svn_wc_merge2, args)
29,327
def test_finetuning_callback_warning(tmpdir): """Test finetuning callbacks works as expected.""" seed_everything(42) class FinetuningBoringModel(BoringModel): def __init__(self): super().__init__() self.backbone = nn.Linear(32, 2, bias=False) self.layer = None self.backbone.has_been_used = False def training_step(self, batch, batch_idx): output = self(batch) loss = self.loss(batch, output) return {"loss": loss} def forward(self, x): self.backbone.has_been_used = True x = self.backbone(x) return x def train_dataloader(self): return DataLoader(RandomDataset(32, 64), batch_size=2) def configure_optimizers(self): optimizer = torch.optim.SGD(self.parameters(), lr=0.1) return optimizer chk = ModelCheckpoint(dirpath=tmpdir, save_last=True) model = FinetuningBoringModel() model.validation_step = None callback = TestBackboneFinetuningWarningCallback(unfreeze_backbone_at_epoch=3, verbose=False) with pytest.warns(UserWarning, match="Did you init your optimizer in"): trainer = Trainer(limit_train_batches=1, default_root_dir=tmpdir, callbacks=[callback, chk], max_epochs=2) trainer.fit(model) assert model.backbone.has_been_used trainer = Trainer(max_epochs=3) trainer.fit(model, ckpt_path=chk.last_model_path)
29,328
def idempotent(function): """Shallows 304 errors, making actions repeatable.""" @wraps(function) def decorator(*args, **kwargs): with suppress(GitlabCreateError): return function(*args, **kwargs) return decorator
29,329
def write_binary_file(output_path, data): """Writes the given bytes stored in the bytearray 'data' to a binary file at the location pointed to by 'output_path'.""" with open(output_path, "wb") as f: f.write(data)
29,330
def enclose(g): """ Create a one-wide permimeter along the edges of the grid. """ w, h = g.size() wi, hi = 0, 0 while wi < w: # Top g.put(wi, hi, False) wi += 1 wi -= 1 while hi < h: # Right g.put(wi, hi, False) hi += 1 wi, hi = 0, 0 while hi < h: # Left g.put(wi, hi, False) hi += 1 hi -= 1 while wi < w: # Bottom g.put(wi, hi, False) wi += 1
29,331
def newline_formatter(func): """ Wrap a formatter function so a newline is appended if needed to the output """ def __wrapped_func(*args, **kwargs): """ Wrapper function that appends a newline to result of original function """ result = func(*args, **kwargs) # The result may be a string, or bytes. In python 2 they are the same, but in python 3, they are not. # First, check for strings as that works the same in python 2 and 3, THEN check for bytes, as that # implementation is python 3 specific. If it's neither (future proofing), we use a regular new line line_ending = "\n" if isinstance(result, str): line_ending = "\n" elif isinstance(result, bytes): # We are redefining the variable type on purpose since python broke backwards compatibility between 2 & 3. line_ending = b"\n" # Avoid double line endings if not result.endswith(line_ending): result += line_ending return result # Return the wrapper return __wrapped_func
29,332
def sigma_splitter(float_arr: List[float]) -> Tuple[List[List[int]], List[List[int]], List[List[int]]]: """ separates the NCOF score into the 1-3 sigma outliers for the NCOF input @param float_arr: List[float] @return: inliers , pos_outliers , neg_outliers: List[List[int]], List[List[int]], List[List[int]] """ "calculates the mean and std of the input score" mean = np.mean(float_arr) std = np.std(float_arr) "calculate which indexes that are input inliers" inliers = np.where(np.logical_and(float_arr >= mean - std, float_arr <= mean + std)) inliers = inliers[0].tolist() "Calculates the 1-sigma postive outliers" one_pos_sigma = np.where(np.logical_and(mean + std <= float_arr, float_arr < mean + 2 * std)) "Calculates the 2-sigma postive outliers" two_pos_sigma = np.where(np.logical_and(mean + 2 * std <= float_arr, float_arr < mean + 3 * std)) "Calculates the 3-sigma postive outliers" three_pos_sigma = np.where(mean + 3 * std <= float_arr) "Calculates the 1-sigma negative outliers" one_neg_sigma = np.where(np.logical_and(mean - 2 * std < float_arr, float_arr <= mean - std)) "Calculates the 2-sigma negative outliers" two_neg_sigma = np.where(np.logical_and(mean - 3 * std < float_arr, float_arr <= mean - 2 * std)) "Calculates the 3-sigma negative outliers" three_neg_sigma = np.where(float_arr <= mean - 3 * std) "stores the positive outliers in a list of lists" pos_outliers = [one_pos_sigma[0], two_pos_sigma[0], three_pos_sigma[0]] pos_outliers = [l.tolist() for l in pos_outliers] "stores the negative outliers in a list of lists" neg_outliers = [one_neg_sigma[0], two_neg_sigma[0], three_neg_sigma[0]] neg_outliers = [l.tolist() for l in neg_outliers] "OUTPUT: list of indexes" "inliers: list of all inliers" "pos_outliers: list of 3 lists that corresponds to the 1,2,3 positive sigma outlers" "neg_outliers: list of 3 lists that corresponds to the 1,2,3 negative sigma outlers" return inliers, pos_outliers, neg_outliers
29,333
def populate_diff_chunks(files, enable_syntax_highlighting=True, request=None): """Populates a list of diff files with chunk data. This accepts a list of files (generated by get_diff_files) and generates diff chunk data for each file in the list. The chunk data is stored in the file state. """ from reviewboard.diffviewer.chunk_generator import get_diff_chunk_generator for diff_file in files: generator = get_diff_chunk_generator( request, diff_file['filediff'], diff_file['interfilediff'], diff_file['force_interdiff'], enable_syntax_highlighting, base_filediff=diff_file.get('base_filediff')) chunks = list(generator.get_chunks()) diff_file.update({ 'chunks': chunks, 'num_chunks': len(chunks), 'changed_chunk_indexes': [], 'whitespace_only': len(chunks) > 0, }) for j, chunk in enumerate(chunks): chunk['index'] = j if chunk['change'] != 'equal': diff_file['changed_chunk_indexes'].append(j) meta = chunk.get('meta', {}) if not meta.get('whitespace_chunk', False): diff_file['whitespace_only'] = False diff_file.update({ 'num_changes': len(diff_file['changed_chunk_indexes']), 'chunks_loaded': True, })
29,334
def cosine_beta_schedule(timesteps, s = 0.008, thres = 0.999): """ cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps, dtype = torch.float64) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0, thres)
29,335
def start_generator(args): """ Triggers the execution of the generator given the parameters from arguments """ start( brokerServers=args.brokerServers.split(',') or env.DEFAULT_BROKER_SERVERS.split(','), topic=args.topic or env.DEFAULT_TOPIC, total_messages=int( args.messages) if args.messages else env.DEFAULT_NUMBER_OF_MESSAGES, available_time_in_secs=int( args.time) if args.time else env.DEFAULT_TIME_IN_SECS, generators=int( args.generators) if args.generators else env.DEFAULT_NUMBER_OF_GENERATORS, wait_time_in_secs=int( args.waitTime) if args.waitTime else env.DEFAULT_WAIT_TIME_IN_SECS )
29,336
def construct_from_yaml( constructor: Callable[..., T], yaml_dict: Optional[Dict[str, Any]] = None, ) -> T: """Build ``constructor`` from ``yaml_dict`` Args: constructor (Callable): The constructor to test (such as an Hparams class) yaml_dict (Dict[str, Any], optional): The YAML. Defaults to ``None``, which is equivalent to an empty dictionary. """ yaml_dict = {} if yaml_dict is None else yaml_dict # ensure that yaml_dict is actually a dictionary of only json-serializable objects yaml_dict = yaml.safe_load(yaml.safe_dump(yaml_dict)) instance = hp.create(constructor, yaml_dict, cli_args=False) return instance
29,337
def text_to_document(text, language="en"): """ Returns string text as list of Sentences """ splitter = _sentence_splitters[language] utext = unicode(text, 'utf-8') if isinstance(text, str) else text sentences = splitter.tokenize(utext) return [tokenize(text, language) for text in sentences]
29,338
def get_top_playlists_route(type): """ An endpoint to retrieve the "top" of a certain demographic of playlists or albums. This endpoint is useful in generating views like: - Top playlists - Top Albums - Top playlists of a certain mood - Top playlists of a certain mood from people you follow Args: type: (string) The `type` (same as repost/save type) to query from. limit?: (number) default=16, max=100 mood?: (string) default=None filter?: (string) Optional filter to include (supports 'followees') default=None """ args = to_dict(request.args) if 'limit' in request.args: args['limit'] = min(request.args.get('limit', type=int), 100) else: args['limit'] = 16 if 'mood' in request.args: args['mood'] = request.args.get('mood') else: args['mood'] = None if "with_users" in request.args: args["with_users"] = parse_bool_param(request.args.get("with_users")) try: playlists = get_top_playlists(type, args) return api_helpers.success_response(playlists) except exceptions.ArgumentError as e: return api_helpers.error_response(str(e), 400)
29,339
def parse_host(incomplete_uri: str) -> str: """Get netloc/host from incomplete uri.""" # without // it is interpreted as relative return urllib.parse.urlparse(f"//{incomplete_uri}").netloc
29,340
def inner_by_delta(vec1: Vec, vec2: Vec): """Compute the inner product of two vectors by delta. The two vectors are assumed to be from the same base and have the same number of indices, or ValueError will be raised. """ indices1 = vec1.indices indices2 = vec2.indices if vec1.label != vec2.label or len(indices1) != len(indices2): raise ValueError( 'Invalid vectors to computer inner product by delta', (vec1, vec2) ) return functools.reduce(operator.mul, ( KroneckerDelta(i, j) for i, j in zip(indices1, indices2) ), Integer(1))
29,341
def setup_drake(*, version, build='nightly'): """Installs drake on Google's Colaboratory and (if necessary) adds the installation location to `sys.path`. This will take approximately two minutes, mostly to provision the machine with drake's prerequisites, but the server should remain provisioned for 12 hours. Colab may ask you to "Reset all runtimes"; say no to save yourself the reinstall. Args: version: A string to identify which revision of drake to install. build: An optional string to specify the hosted directory on https://drake-packages.csail.mit.edu/drake/ of the build identified by version. Current options are 'nightly', 'continuous', or 'experimental'. Default is 'nightly', which is recommended. Note: Possible version names vary depending on the build. - Nightly builds are versioned by date, e.g., '20200725', and the date represents the *morning* (not the prior evening) of the build. You can also use 'latest'. - Continuous builds are only available with the version 'latest'. - (Advanced) Experimental builds use the version name '<timestamp>-<commit>'. See https://drake.mit.edu/jenkins#building-binary-packages-on-demand for information on obtaining a binary from an experimental branch. See https://drake.mit.edu/from_binary.html for more information. Note: If you already have pydrake installed to the target location, this will confirm that the build/version are the same as the installed version, otherwise it will overwrite the previous installation. If you have pydrake available on your ``sys.path`` in a location that is different than the target installation, this script will throw an Exception to avoid possible confusion. If you had already imported pydrake, this script will throw an assertion to avoid promising that we can successfully reload the module. """ assert 'google.colab' in sys.modules, ( "This script is intended for use on Google Colab only.") assert 'pydrake' not in sys.modules, ( "You have already imported a version of pydrake. Please choose " "'Restart runtime' from the menu to restart with a clean environment.") # Check for conflicting pydrake installations. v = sys.version_info path = f"/opt/drake/lib/python{v.major}.{v.minor}/site-packages" spec = importlib.util.find_spec('pydrake') if spec is not None and path not in spec.origin: raise Exception("Found a conflicting version of pydrake on your " f"sys.path at {spec.origin}. Please remove it from " "the path to avoid ambiguity.") # Check to see if this build/version is already installed. setup_version_info = {"version": version, "build": build} setup_version_file = "/opt/drake/.setup_drake_colab_token.json" already_installed = False if os.path.isfile(setup_version_file): with open(setup_version_file, "r") as file: if json.load(file) == setup_version_info: already_installed = True # Download the binaries and install. if not already_installed: if os.path.isdir('/opt/drake'): shutil.rmtree('/opt/drake') #base_url = 'https://drake-packages.csail.mit.edu/drake/' #urlretrieve(f"{base_url}{build}/drake-{version}-bionic.tar.gz", # 'drake.tar.gz') # THESE ARE A WORKAROUND FOR COLAB WITH PYTHON3.7 urlretrieve("https://drake-packages.csail.mit.edu/tmp/drake-20210409-pip-snopt-bionic.tar.gz", 'drake.tar.gz') subprocess.run(["pip3", "install", "meshcat"]) # END PYTHON3.7 WORKAROUND subprocess.run(['tar', '-xzf', 'drake.tar.gz', '-C', '/opt'], check=True) subprocess.run(['apt-get', 'update', '-o', 'APT::Acquire::Retries=4', '-qq'], check=True) with open("/opt/drake/share/drake/setup/packages-bionic.txt", "r") as f: packages = f.read().splitlines() subprocess.run(['apt-get', 'install', '-o', 'APT::Acquire::Retries=4', '-o', 'Dpkg::Use-Pty=0', '-qy', '--no-install-recommends'] + packages, check=True) # Write setup information to disk (so that we can avoid re-running it # if the machine is already provisioned). with open(setup_version_file, "w") as file: json.dump(setup_version_info, file) # Check if our new installation is already in the path. spec = importlib.util.find_spec('pydrake') if spec is None: sys.path.append(path) spec = importlib.util.find_spec('pydrake') # Confirm that we now have pydrake on the path. assert spec is not None, ( "Installation failed. find_spec('pydrake') returned None.") assert path in spec.origin, ( "Installation failed. find_spec is locating pydrake, but not in the " "expected path.")
29,342
def remove_mapping(rxn_smi: str, keep_reagents: bool = False) -> str: """ Removes all atom mapping from the reaction SMILES string Parameters ---------- rxn_smi : str The reaction SMILES string whose atom mapping is to be removed keep_reagents : bool (Default = False) whether to keep the reagents in the output reaction SMILES string Returns ------- str The reaction SMILES string with all atom mapping removed Also see: clean_rxn_smis_50k_one_phase, clean_rxn_smis_FULL_one_phase """ rxn = rdChemReactions.ReactionFromSmarts(rxn_smi, useSmiles=True) if not keep_reagents: rxn.RemoveAgentTemplates() prods = [mol for mol in rxn.GetProducts()] for prod in prods: for atom in prod.GetAtoms(): if atom.HasProp("molAtomMapNumber"): atom.ClearProp("molAtomMapNumber") rcts = [mol for mol in rxn.GetReactants()] for rct in rcts: for atom in rct.GetAtoms(): if atom.HasProp("molAtomMapNumber"): atom.ClearProp("molAtomMapNumber") return rdChemReactions.ReactionToSmiles(rxn)
29,343
def test_multiple_jobs(caplog, no_job_dirs): """Test with multiple jobs. From: https://ci.appveyor.com/project/racker-buildbot/luv :param caplog: pytest extension fixture. :param str no_job_dirs: Test with --no-job-dirs. """ jobs_artifacts = [ ('v5wnn9k8auqcqovw', 'luajit.exe', 675840), ('v5wnn9k8auqcqovw', 'luv.dll', 891392), ('v5wnn9k8auqcqovw', '.coverage', 123), ('v5wnn9k8auqcqovw', 'no_ext', 456), ('bpgcbvqmawv1jw06', 'luajit.exe', 539136), ('bpgcbvqmawv1jw06', 'luv.dll', 718336), ('bpgcbvqmawv1jw06', '.coverage', 789), ('bpgcbvqmawv1jw06', 'no_ext', 101), ] config = dict(always_job_dirs=False, no_job_dirs=no_job_dirs, dir=None) # Handle collision. if no_job_dirs == 'unknown': with pytest.raises(HandledError): artifacts_urls(config, jobs_artifacts) assert caplog.records[-2].message.startswith('Collision:') return actual = artifacts_urls(config, jobs_artifacts) expected = dict() messages = [r.message for r in caplog.records] # Test-specific API URL. url = API_PREFIX + '/buildjobs/%s/artifacts/%s' if not no_job_dirs: assert 'Multiple job IDs with file conflicts, automatically setting job_dirs = True' in messages expected[py.path.local('v5wnn9k8auqcqovw/luajit.exe')] = (url % ('v5wnn9k8auqcqovw', 'luajit.exe'), 675840) expected[py.path.local('v5wnn9k8auqcqovw/luv.dll')] = (url % ('v5wnn9k8auqcqovw', 'luv.dll'), 891392) expected[py.path.local('v5wnn9k8auqcqovw/.coverage')] = (url % ('v5wnn9k8auqcqovw', '.coverage'), 123) expected[py.path.local('v5wnn9k8auqcqovw/no_ext')] = (url % ('v5wnn9k8auqcqovw', 'no_ext'), 456) expected[py.path.local('bpgcbvqmawv1jw06/luajit.exe')] = (url % ('bpgcbvqmawv1jw06', 'luajit.exe'), 539136) expected[py.path.local('bpgcbvqmawv1jw06/luv.dll')] = (url % ('bpgcbvqmawv1jw06', 'luv.dll'), 718336) expected[py.path.local('bpgcbvqmawv1jw06/.coverage')] = (url % ('bpgcbvqmawv1jw06', '.coverage'), 789) expected[py.path.local('bpgcbvqmawv1jw06/no_ext')] = (url % ('bpgcbvqmawv1jw06', 'no_ext'), 101) else: assert 'Multiple job IDs with file conflicts, automatically setting job_dirs = True' not in messages if no_job_dirs == 'skip': assert any(re.match(r'Skipping.*luajit\.exe.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Skipping.*luv\.dll.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Skipping.*\.coverage.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Skipping.*no_ext.*bpgcbvqmawv1jw06', m) for m in messages) expected[py.path.local('luajit.exe')] = (url % ('v5wnn9k8auqcqovw', 'luajit.exe'), 675840) expected[py.path.local('luv.dll')] = (url % ('v5wnn9k8auqcqovw', 'luv.dll'), 891392) expected[py.path.local('.coverage')] = (url % ('v5wnn9k8auqcqovw', '.coverage'), 123) expected[py.path.local('no_ext')] = (url % ('v5wnn9k8auqcqovw', 'no_ext'), 456) else: assert not any(re.match(r'Skipping.*luajit\.exe.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Skipping.*luv\.dll.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Skipping.*\.coverage.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Skipping.*no_ext.*bpgcbvqmawv1jw06', m) for m in messages) if no_job_dirs == 'overwrite': assert any(re.match(r'Overwriting.*luajit\.exe.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Overwriting.*luv\.dll.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Overwriting.*\.coverage.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Overwriting.*no_ext.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) expected[py.path.local('luajit.exe')] = (url % ('bpgcbvqmawv1jw06', 'luajit.exe'), 539136) expected[py.path.local('luv.dll')] = (url % ('bpgcbvqmawv1jw06', 'luv.dll'), 718336) expected[py.path.local('.coverage')] = (url % ('bpgcbvqmawv1jw06', '.coverage'), 789) expected[py.path.local('no_ext')] = (url % ('bpgcbvqmawv1jw06', 'no_ext'), 101) else: assert not any(re.match(r'Overwriting.*luajit\.exe.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Overwriting.*luv\.dll.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Overwriting.*\.coverage.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Overwriting.*no_ext.*v5wnn9k8auqcqovw.*bpgcbvqmawv1jw06', m) for m in messages) if no_job_dirs == 'rename': assert any(re.match(r'Renaming.*luajit\.exe.*luajit_\.exe.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Renaming.*luv\.dll.*luv_\.dll.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Renaming.*\.coverage.*\.coverage_.*bpgcbvqmawv1jw06', m) for m in messages) assert any(re.match(r'Renaming.*no_ext.*no_ext_.*bpgcbvqmawv1jw06', m) for m in messages) expected[py.path.local('luajit.exe')] = (url % ('v5wnn9k8auqcqovw', 'luajit.exe'), 675840) expected[py.path.local('luv.dll')] = (url % ('v5wnn9k8auqcqovw', 'luv.dll'), 891392) expected[py.path.local('.coverage')] = (url % ('v5wnn9k8auqcqovw', '.coverage'), 123) expected[py.path.local('no_ext')] = (url % ('v5wnn9k8auqcqovw', 'no_ext'), 456) expected[py.path.local('luajit_.exe')] = (url % ('bpgcbvqmawv1jw06', 'luajit.exe'), 539136) expected[py.path.local('luv_.dll')] = (url % ('bpgcbvqmawv1jw06', 'luv.dll'), 718336) expected[py.path.local('.coverage_')] = (url % ('bpgcbvqmawv1jw06', '.coverage'), 789) expected[py.path.local('no_ext_')] = (url % ('bpgcbvqmawv1jw06', 'no_ext'), 101) else: assert not any(re.match(r'Renaming.*luajit\.exe.*luajit_\.exe.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Renaming.*luv\.dll.*luv_\.dll.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Renaming.*\.coverage.*\.coverage_.*bpgcbvqmawv1jw06', m) for m in messages) assert not any(re.match(r'Renaming.*no_ext.*no_ext_.*bpgcbvqmawv1jw06', m) for m in messages) assert actual == expected
29,344
def generate_bot_master_get_results_message(message_id, receiving_host, receiving_port): """ :rtype : fortrace.net.proto.genericmessage_pb2.GenericMessage :type receiving_port: int :type receiving_host: str :type message_id: long :param message_id: the id of this message :param receiving_host: the host that receives the order :param receiving_port: the host's port :return: the message to be generated """ m = genericmessage_pb2.GenericMessage() m.message_type = messagetypes_pb2.BOTMASTERGETRESULT m.message_id = message_id m.Extensions[botmastermessage_pb2.bm_result].receiving_host = receiving_host m.Extensions[botmastermessage_pb2.bm_result].receiving_port = receiving_port assert m.IsInitialized() return m
29,345
def update_comments_in_parent(reference_doctype, reference_name, _comments): """Updates `_comments` property in parent Document with given dict. :param _comments: Dict of comments.""" if not reference_doctype or not reference_name or frappe.db.get_value("DocType", reference_doctype, "issingle"): return try: # use sql, so that we do not mess with the timestamp frappe.db.sql("""update `tab{0}` set `_comments`=%s where name=%s""".format(reference_doctype), # nosec (json.dumps(_comments[-50:]), reference_name)) except Exception as e: if frappe.db.is_column_missing(e) and getattr(frappe.local, 'request', None): # missing column and in request, add column and update after commit frappe.local._comments = (getattr(frappe.local, "_comments", []) + [(reference_doctype, reference_name, _comments)]) elif frappe.db.is_data_too_long(e): raise frappe.DataTooLongException else: raise ImplicitCommitError else: if not frappe.flags.in_patch: reference_doc = frappe.get_doc(reference_doctype, reference_name) if getattr(reference_doc, "route", None): clear_cache(reference_doc.route)
29,346
def print_encoding_dic(obj, f=stdout): """ Generate and print the definition of encoding dictionary """ print( """\ /// map from composed character (normal) to decomposed components (HFS+) /// /// # Examples /// /// ```ignore /// assert_eq!((*MAP_TO_HFS).get(&'\\u{00E9}').unwrap(), "e\\u{0301}"); /// ``` pub static ref MAP_TO_HFS: AHashMap<char, &'static str> = { let mut map = AHashMap::new();""", file=f, ) for (compose, decompose) in obj.items(): print( f" map.insert('\\u{{{ord(compose):04X}}}', \"" + "".join((f"\\u{{{ord(c):04X}}}" for c in decompose)) + '");', file=f, ) print(" return map;\n };", file=f)
29,347
def readLogData(username,level,root='.'): """ Extracts key events from a log """ filename = getFilename(username,level,extension='log',root=root) log = [] start = None for line in fileinput.input(filename): elements = line.split() if elements[2] == MESSAGE_TAG: now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') log.insert(0,{'type': 'message','content': ' '.join(elements[3:]), 'time': now-start}) elif elements[2] == LOCATION_TAG: now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') index = symbol2index(elements[3],level) waypoint = WAYPOINTS[level][index] log.insert(0,{'type': 'location','destination': waypoint['name'], 'buildingNo': index+1,'buildingTotal': len(WAYPOINTS[level]), 'time': now-start}) elif elements[2] == CREATE_TAG: start = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') log.insert(0,{'type': 'create', 'time': 'Start','start': start}) elif elements[2] == COMPLETE_TAG: now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') log.insert(0,{'type': 'complete','success': elements[3] == 'success', 'time': now-start}) elif elements[2] == USER_TAG: log[0]['choice'] = elements[3] log[0]['location'] = WAYPOINTS[level][symbol2index(elements[4],level)]['name'] log[0]['danger'] = elements[5] log[0]['dead'] = elements[6] log[0]['image'] = elements[7] log[0]['content'] = ' '.join(elements[8:])[1:-1] if ') (' in log[0]['content']: log[0]['content'],log[0]['ack'] = log[0]['content'].split(') (') else: log[0]['ack'] = '' fileinput.close() return log
29,348
def get_type1(pkmn): """get_type1(pkmn) returns Type 1 of the Pokémon with the name 'pkmn' """ return __pokemon__[pkmn]['Type 1']
29,349
def load_json(fname): """ Load a JSON file containing a riptide object (or list/dict/composition thereof) """ with open(fname, 'r') as f: return from_json(f.read())
29,350
def build(dockerfile, force_rm, no_cache, quiet, rm, tag, path): """ Build a new image from the source code at PATH. """ build_dockerfile = os.path.join(path, DOCKERFILE) if dockerfile and os.path.exists(build_dockerfile): click.echo('A Dockerfile already exists at {}, refusing to run!' .format(path), err=True) sys.exit(1) options = [ DOCKER, 'build', '--force-rm={}'.format(str(force_rm).lower()), '--no-cache={}'.format(str(no_cache).lower()), '--quiet={}'.format(str(quiet).lower()), '--rm={}'.format(str(rm).lower()), ] if tag: options.append('--tag={}'.format(tag)) options.append(path) if dockerfile: shutil.copyfile(dockerfile, build_dockerfile) try: subprocess.call(options) finally: if dockerfile: os.remove(build_dockerfile)
29,351
def cargo_build(name, srcs, binaries, cargo_flags, profile = "release", target = None, env_paths = {}, deps = []): """ Builds cargo binaries. Args: name: name of the target. srcs: list of input labels. binaries: names of binaries to build. cargo_flags: extra flags to pass to cargo. profile: cargo profile to build. target: the build target. env_paths: environment variables passing paths to files. deps: prerequisites for the cargo build. """ args = ["$$CARGO", "build", "--profile", profile] if target: args += ["--target", target] suffix = ".wasm" if target and target.startswith("wasm") else "" out_dir = "$$CARGO_TARGET_DIR/" if target: out_dir = out_dir + target + "/" out_dir = out_dir + profile cp_cmds = [] outs = [] for bin in binaries: args += ["--bin", bin] bin_name = bin + suffix cp_cmds.append("".join(["cp ", out_dir, "/", bin_name, " $(location ", bin_name, ")"])) outs.append(bin_name) args.extend(cargo_flags) env_cmds = [] for (k, v) in env_paths.items(): env_cmds.append("export %s=$$PWD/%s" % (k, v)) cargo_cmd = " ".join(args) cmds = "\n".join(env_cmds + [cargo_cmd] + cp_cmds) native.genrule( name = name, srcs = srcs + deps, message = "Cargo build", tools = [ "@rules_rust//rust/toolchain:current_exec_cargo_files", "@rules_rust//rust/toolchain:current_exec_rustc_files", "@rules_rust//rust/toolchain:current_exec_rustfmt_files", ], outs = outs, cmd = """ export CARGO=$(location @rules_rust//rust/toolchain:current_exec_cargo_files) export RUSTC=$(location @rules_rust//rust/toolchain:current_exec_rustc_files) export RUSTFMT=$$(realpath $(location @rules_rust//rust/toolchain:current_exec_rustfmt_files)) export CARGO_TARGET_DIR=$(BINDIR)/cargo/target export CARGO_HOME=$(BINDIR)/cargo/home """ + cmds, )
29,352
def get_stock_data(symbol, start_date, end_date, source="phisix", format="c"): """Returns pricing data for a specified stock and source. Parameters ---------- symbol : str Symbol of the stock in the PSE or Yahoo. You can refer to these links: PHISIX: https://www.pesobility.com/stock YAHOO: https://www.nasdaq.com/market-activity/stocks/screener?exchange=nasdaq start_date : str Starting date (YYYY-MM-DD) of the period that you want to get data on end_date : str Ending date (YYYY-MM-DD) of the period you want to get data on source : str First source to query from ("pse", "yahoo"). If the stock is not found in the first source, the query is run on the other source. format : str Format of the output data Returns ------- pandas.DataFrame Stock data (in the specified `format`) for the specified company and date range """ df_columns = [DATA_FORMAT_COLS[c] for c in format] if source == "phisix": # The query is run on 'phisix', but if the symbol isn't found, the same query is run on 'yahoo'. df = get_pse_data(symbol, start_date, end_date, format=format) if df is None: df = get_yahoo_data(symbol, start_date, end_date) elif source == "yahoo": # The query is run on 'yahoo', but if the symbol isn't found, the same query is run on 'phisix'. df = get_yahoo_data(symbol, start_date, end_date) if df is None: df = get_pse_data(symbol, start_date, end_date) else: raise Exception("Source must be either 'phisix' or 'yahoo'") missing_columns = [col for col in df_columns if col not in df.columns] # Fill missing columns with np.nan for missing_column in missing_columns: df[missing_column] = np.nan if len(missing_columns) > 0: print("Missing columns filled w/ NaN:", missing_columns) return df[df_columns]
29,353
def extract_borderless(result) -> list: """ extracts borderless masks from result Args: result: Returns: a list of the borderless tables. Each array describes a borderless table bounding box. the two coordinates in the array are the top right and bottom left coordinates of the bounding box. """ result_borderless = [] for r in result[0][2]: if r[4] > .85: # slices the threshold value of result_borderless.append(r[:4].astype(int)) return result_borderless
29,354
def get_frequent_length_k_itemsets(transactions, min_support=0.2, k=1, frequent_sub_itemsets=None): """Returns all the length-k itemsets, from the transactions, that satisfy min_support. Parameters ---------- transactions : list of list min_support : float, optional From 0.0 to 1.0. Percentage of transactions that should contain an itemset for it to be considered frequent. k : int, optional Length that the frequent itemsets should be frequent_sub_itemsets : frozenset of frozenset, optional Facilitates candidate pruning by the Apriori property. Length-k itemset candidates that aren't supersets of at least 1 frequent sub-itemset are pruned. Returns ------- list of frozenset list of float """ if min_support <= 0 or min_support > 1: raise ValueError('min_support must be greater than 0 and less than or equal to 1.0') if k <= 0: raise ValueError('k must be greater than 0') all_items = set() if frequent_sub_itemsets: for sub_itemset in frequent_sub_itemsets: all_items = all_items.union(sub_itemset) else: for transaction in transactions: all_items = all_items.union(transaction) all_length_k_itemsets = itertools.product(all_items, repeat=k) all_length_k_itemsets = frozenset(frozenset(itemset) for itemset in all_length_k_itemsets) all_length_k_itemsets = frozenset(filter(lambda itemset: len(itemset) == k, all_length_k_itemsets)) # Remove itemsets that don't have a frequent sub-itemset to take advantage # of the Apriori property pruned_length_k_itemsets = all_length_k_itemsets if frequent_sub_itemsets: pruned_length_k_itemsets = set() for itemset in all_length_k_itemsets: has_frequent_sub_itemset = False for sub_itemset in frequent_sub_itemsets: if sub_itemset.issubset(itemset): has_frequent_sub_itemset = True if has_frequent_sub_itemset: pruned_length_k_itemsets.add(itemset) frequent_itemsets = [] frequent_supports = [] supports = support(transactions, pruned_length_k_itemsets) for itemset, itemset_support in supports.items(): if itemset_support >= min_support: frequent_itemsets.append(itemset) frequent_supports.append(itemset_support) return frequent_itemsets, frequent_supports
29,355
def long_slice(image_path, out_name, out_dir, slice_size): """slice an image into parts slice_size tall""" img = Image.open(image_path) width, height = img.size upper = 0 slices = int(math.ceil(height/slice_size)) for i, _ in enum(range(slices)): left = 0 upper = upper if i == slices: lower = height else: lower = int(i * slice_size) bbox = (left, upper, width, lower) working_slice = img.crop(bbox) upper += slice_size working_slice.save(os.path.join(out_dir, "slice_" + out_name + "_" + str(i)+".png"))
29,356
def create_news_markup(): """ Метод, создающий клавиатуру для новостей кино :return: telebot.types.ReplyKeyboardMarkup """ news_markup = types.ReplyKeyboardMarkup() news_markup.row(Commands.GET_BACK_COMMAND) return news_markup
29,357
def NVDA_restarts(): """Ensure NVDA can be restarted from keyboard.""" spy = _nvdaLib.getSpyLib() spy.wait_for_specific_speech("Welcome to NVDA") # ensure the dialog is present. spy.wait_for_speech_to_finish() # Get handle of the message window for the currently running NVDA oldMsgWindowHandle = _getNvdaMessageWindowhandle() spy.emulateKeyPress("NVDA+q") spy.wait_for_specific_speech("Exit NVDA") _builtIn.sleep(0.5) # the dialog is not always receiving the enter keypress, wait a little longer for it spy.emulateKeyPress("downArrow") spy.wait_for_specific_speech("Restart") spy.emulateKeyPress("enter", blockUntilProcessed=False) # don't block so NVDA can exit _blockUntilConditionMet( getValue=lambda: windowWithHandleExists(oldMsgWindowHandle) is False, giveUpAfterSeconds=10, errorMessage="Old NVDA is still running" ) _builtIn.should_not_be_true( windowWithHandleExists(oldMsgWindowHandle), msg="Old NVDA process is stil running" ) waitUntilWindowFocused("Welcome to NVDA")
29,358
def get_bibtex_query_set(params): """Returns bibtex objects which match the search parameters. Args: params: dict which is maded by `parse_GET_params` Returns: QuerySet request_dict """ bibtex_queryset = Bibtex.objects.all() # Book_style book_style = params.get("book_style") if (book_style is not None) and (book_style != "ALL"): # TODO: Make it more better (remove if sentence) if (book_style == "AWARD") or (book_style == "KEYNOTE"): bibtex_queryset = bibtex_queryset.filter(bib_type=book_style) else: bibtex_queryset = bibtex_queryset.filter( book__style=book_style, bib_type="SAMEASBOOK", ) # Filter by published year period_method = params.get("period_method", "ACADEMIC_YEAR") year = params.get("period_year", datetime.datetime.now().year) if period_method == "YEAR": bibtex_queryset = bibtex_queryset.filter( pub_date__gte=datetime.date(int(year), 1, 1), pub_date__lte=datetime.date(int(year), 12, 31), ) elif period_method == "ACADEMIC_YEAR": bibtex_queryset = bibtex_queryset.filter( pub_date__gte=datetime.date(int(year), 4, 1), pub_date__lte=datetime.date(int(year) + 1, 3, 31), ) else: pass # Keywords keywords = params.get("keywords") if keywords is not None: keywords_list = keywords.split(" ") for keyword in keywords_list: bibtex_queryset = bibtex_queryset.filter( Q(title__icontains=keyword) | Q(book__title__icontains=keyword) | Q(book__abbr__icontains=keyword) | Q(authors__name_en__icontains=keyword) | Q(authors__name_ja__icontains=keyword) ).distinct() # Tags tags = params.get("tags") if tags is not None: tags_list = tags.split(" ") for tag in tags_list: bibtex_queryset = bibtex_queryset.filter( Q(tags__name__icontains=tag) ).distinct() # Sort sort = params.get("sort") if sort is None: return bibtex_queryset.order_by("-pub_date", "book", "title") elif sort == "ascending": return bibtex_queryset.order_by("-pub_date", "book", "title") elif sort == "desending": return bibtex_queryset.order_by("pub_date", "book", "title")
29,359
def _generate_description_from(command, name, description): """ Generates description from the command and it's optionally given description. If both `description` and `command.__doc__` is missing, defaults to `name`. Parameters ---------- command : `None` or `callable` The command's function. name : `str` or `None` The command's name, if name defaulting should be applied. description : `Any` The command's description. Returns ------- description : `str` The generated description. Raises ------ ValueError If `description` length is out of range [2:100]. """ while True: if (description is not None) or isinstance(description, str): break if command is not None: description = getattr(command, '__doc__', None) if (description is not None) and isinstance(description, str): break if name is not None: description = name break return description = normalize_description(description) if description is None: description_length = 0 else: description_length = len(description) if ( description_length < APPLICATION_COMMAND_DESCRIPTION_LENGTH_MIN or description_length > APPLICATION_COMMAND_DESCRIPTION_LENGTH_MAX ): raise ValueError( f'`description` length is out of the expected range ' f'[{APPLICATION_COMMAND_DESCRIPTION_LENGTH_MIN}:{APPLICATION_COMMAND_DESCRIPTION_LENGTH_MAX}], got ' f'{description_length!r}; {description!r}.' ) return description
29,360
def test_source_observation(gcc_bin: str): """Test observation spaces.""" with gym.make("gcc-v0", gcc_bin=gcc_bin) as env: env.reset() lines = env.source.split("\n") assert re.match(r"# \d+ \"adpcm.c\"", lines[0])
29,361
def uses_na_format(station: str) -> bool: """ Returns True if the station uses the North American format, False if the International format """ if station[0] in NA_REGIONS: return True elif station[0] in IN_REGIONS: return False elif station[:2] in M_NA_REGIONS: return True elif station[:2] in M_IN_REGIONS: return False raise BadStation("Station doesn't start with a recognized character set")
29,362
def openFile(prompt,key = "r",defaulttype = None, defaultname = None): """ Method to open a text file with sanity checking, optional defaults and reprompt on failure. This is the main used callable function to open files. :param prompt: the prompt to be displayed :type prompt: str :param key: the key passed to open, default is "r" (read) :type key: str :param defaulttype: the default extension which will be added if not supplied, (default to None) :type defailttype: str :param defaultname: the defaault filename, (defaults to None) :type defaultname: str :return: the the opened file descriptor. The file names is processded to expand environmental variable and user names\ so for example $ENV/dir/file.data or ~user/dir/file.data are expanded """ while True: filename = getFilename(prompt,defaulttype,defaultname) # Get the filename try: filestream = open(filename,str(key)) # try and open return filestream except IOError: logger.error("Failed to open file '{0:s}' with key '{1:s}'".\ format(filename,str(key)))
29,363
def dsphere(n=100, d=2, r=1, noise=None, ambient=None): """ Sample `n` data points on a d-sphere. Parameters ----------- n : int Number of data points in shape. r : float Radius of sphere. ambient : int, default=None Embed the sphere into a space with ambient dimension equal to `ambient`. The sphere is randomly rotated in this high dimensional space. """ data = np.random.randn(n, d+1) # Normalize points to the sphere data = r * data / np.sqrt(np.sum(data**2, 1)[:, None]) if noise: data += noise * np.random.randn(*data.shape) if ambient: assert ambient > d, "Must embed in higher dimensions" data = embed(data, ambient) return data
29,364
def topological_sort_by_down(start_nodes=None, all_nodes=None): """ Topological sort method by down stream direction. 'start_nodes' and 'all_nodes' only one needs to be given. Args: start_nodes (list[NodeGraphQt.BaseNode]): (Optional) the start update nodes of the graph. all_nodes (list[NodeGraphQt.BaseNode]): (Optional) if 'start_nodes' is None the function can calculate start nodes from 'all_nodes'. Returns: list[NodeGraphQt.BaseNode]: sorted nodes. """ if not start_nodes and not all_nodes: return [] if start_nodes: start_nodes = __remove_BackdropNode(start_nodes) if all_nodes: all_nodes = __remove_BackdropNode(all_nodes) if not start_nodes: start_nodes = [n for n in all_nodes if not _has_input_node(n)] if not start_nodes: return [] if not [n for n in start_nodes if _has_output_node(n)]: return start_nodes graph = _build_down_stream_graph(start_nodes) return _sort_nodes(graph, start_nodes, True)
29,365
def generate_and_upload_doxygen(): """Generate Doxygen.""" # Create empty dir and add static_footer.txt recreate_dir(DOXYGEN_WORKING_DIR) static_footer_path = os.path.join(DOXYGEN_WORKING_DIR, 'static_footer.txt') shutil.copyfile(os.path.join('tools', 'doxygen_footer.txt'), static_footer_path) # Make copy of doxygen config file, overriding any necessary configs, # and run doxygen. recreate_dir(DOXYGEN_CONFIG_DIR) modified_doxyfile = os.path.join(DOXYGEN_CONFIG_DIR, DOXYFILE_BASENAME) with open(DOXYFILE_BASENAME, 'r') as reader: with open(modified_doxyfile, 'w') as writer: shutil.copyfileobj(reader, writer) writer.write('OUTPUT_DIRECTORY = %s\n' % DOXYGEN_WORKING_DIR) writer.write('HTML_FOOTER = %s\n' % static_footer_path) subprocess.check_call([DOXYGEN_BINARY, modified_doxyfile]) # Create iframe_footer.html with open(os.path.join(DOXYGEN_WORKING_DIR, 'iframe_footer.html'), 'w') as f: f.write(IFRAME_FOOTER_TEMPLATE % ( datetime.datetime.now().isoformat(' '), subprocess.check_output([DOXYGEN_BINARY, '--version']).rstrip())) # Upload. cmd = ['gsutil', 'cp', '-a', 'public-read', '-R', DOXYGEN_WORKING_DIR, DOXYGEN_GS_PATH] subprocess.check_call(cmd)
29,366
def test_sigmat(): """ Test the support functionality for attached signature cryptographic material """ with pytest.raises(EmptyMaterialError): sigmet = SigMat() assert SigTwoDex.Ed25519 == 'A' # Ed25519 signature. assert SigTwoDex.ECDSA_256k1 == 'B' # ECDSA secp256k1 signature. assert SigTwoSizes[SigTwoDex.Ed25519] == 88 assert SigTwoSizes[SigTwoDex.ECDSA_256k1] == 88 cs = IntToB64(0) assert cs == "A" i = B64ToInt(cs) assert i == 0 cs = IntToB64(27) assert cs == "b" i = B64ToInt(cs) assert i == 27 cs = IntToB64(27, l=2) assert cs == "Ab" i = B64ToInt(cs) assert i == 27 cs = IntToB64(80) assert cs == "BQ" i = B64ToInt(cs) assert i == 80 cs = IntToB64(4095) assert cs == '__' i = B64ToInt(cs) assert i == 4095 cs = IntToB64(4096) assert cs == 'BAA' i = B64ToInt(cs) assert i == 4096 cs = IntToB64(6011) assert cs == "Bd7" i = B64ToInt(cs) assert i == 6011 # Test attached signature code (empty raw) qsc = SigCntDex.Base64 + IntToB64(0, l=2) assert qsc == '-AAA' qscb = qsc.encode("utf-8") sigmat = SigMat(raw=b'', code=SigCntDex.Base64, index=0) assert sigmat.raw == b'' assert sigmat.code == SigCntDex.Base64 assert sigmat.index == 0 assert sigmat.qb64 == qsc assert sigmat.qb64b == qscb assert sigmat.qb2 == b'\xf8\x00\x00' sigmat = SigMat(qb64b=qscb) assert sigmat.raw == b'' assert sigmat.code == SigCntDex.Base64 assert sigmat.index == 0 assert sigmat.qb64 == qsc assert sigmat.qb64b == qscb assert sigmat.qb2 == b'\xf8\x00\x00' sigmat = SigMat(qb64=qsc) assert sigmat.raw == b'' assert sigmat.code == SigCntDex.Base64 assert sigmat.index == 0 assert sigmat.qb64 == qsc assert sigmat.qb64b == qscb assert sigmat.qb2 == b'\xf8\x00\x00' sigmat = SigMat(qb64=qscb) # also works for bytes assert sigmat.raw == b'' assert sigmat.code == SigCntDex.Base64 assert sigmat.index == 0 assert sigmat.qb64 == qsc assert sigmat.qb64b == qscb assert sigmat.qb2 == b'\xf8\x00\x00' idx = 5 qsc = SigCntDex.Base64 + IntToB64(idx, l=2) assert qsc == '-AAF' qscb = qsc.encode("utf-8") sigmat = SigMat(raw=b'', code=SigCntDex.Base64, index=idx) assert sigmat.raw == b'' assert sigmat.code == SigCntDex.Base64 assert sigmat.index == 5 assert sigmat.qb64 == qsc assert sigmat.qb64b == qscb assert sigmat.qb2 == b'\xf8\x00\x05' # Test signatures sig = (b"\x99\xd2<9$$0\x9fk\xfb\x18\xa0\x8c@r\x122.k\xb2\xc7\x1fp\x0e'm\x8f@" b'\xaa\xa5\x8c\xc8n\x85\xc8!\xf6q\x91p\xa9\xec\xcf\x92\xaf)\xde\xca' b'\xfc\x7f~\xd7o|\x17\x82\x1d\xd4<o"\x81&\t') assert len(sig) == 64 sig64b = encodeB64(sig) sig64 = sig64b.decode("utf-8") assert len(sig64) == 88 assert sig64 == 'mdI8OSQkMJ9r-xigjEByEjIua7LHH3AOJ22PQKqljMhuhcgh9nGRcKnsz5KvKd7K_H9-1298F4Id1DxvIoEmCQ==' qsig64 = 'AAmdI8OSQkMJ9r-xigjEByEjIua7LHH3AOJ22PQKqljMhuhcgh9nGRcKnsz5KvKd7K_H9-1298F4Id1DxvIoEmCQ' assert len(qsig64) == 88 qsig64b = qsig64.encode("utf-8") qbin = decodeB64(qsig64b) assert len(qbin) == 66 assert qbin == (b'\x00\t\x9d#\xc3\x92BC\t\xf6\xbf\xb1\x8a\x08\xc4\x07!#"\xe6\xbb,q\xf7' b'\x00\xe2v\xd8\xf4\n\xaaX\xcc\x86\xe8\\\x82\x1fg\x19\x17\n\x9e\xcc' b'\xf9*\xf2\x9d\xec\xaf\xc7\xf7\xedv\xf7\xc1x!\xddC\xc6\xf2(\x12`\x90') sigmat = SigMat(raw=sig) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 0 assert sigmat.qb64 == qsig64 assert sigmat.qb2 == qbin # test wrong size of raw longsig = sig + bytes([10, 11, 12]) sigmat = SigMat(raw=longsig) shortsig = sig[:-3] with pytest.raises(ValidationError): sigmat = SigMat(raw=shortsig) sigmat = SigMat(qb64b=qsig64b) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 0 sigmat = SigMat(qb64=qsig64) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 0 sigmat = SigMat(qb64=qsig64b) # test with bytes not str assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 0 # test wrong size of qb64 longqsig64 = qsig64 + "ABCD" oksigmat = SigMat(qb64=longqsig64) assert len(oksigmat.qb64) == SigSizes[oksigmat.code] shortqsig64 = qsig64[:-4] # too short with pytest.raises(ShortageError): oksigmat = SigMat(qb64=shortqsig64) sigmat = SigMat(qb64=qsig64.encode("utf-8")) # test bytes not str assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.raw == sig assert sigmat.qb64 == qsig64 assert sigmat.qb64b == qsig64.encode("utf-8") sigmat = SigMat(qb2=qbin) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 0 sigmat = SigMat(raw=sig, code=SigTwoDex.Ed25519, index=5) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 5 qsig64 = 'AFmdI8OSQkMJ9r-xigjEByEjIua7LHH3AOJ22PQKqljMhuhcgh9nGRcKnsz5KvKd7K_H9-1298F4Id1DxvIoEmCQ' assert sigmat.qb64 == qsig64 qbin = (b'\x00Y\x9d#\xc3\x92BC\t\xf6\xbf\xb1\x8a\x08\xc4\x07!#"\xe6\xbb,q\xf7' b'\x00\xe2v\xd8\xf4\n\xaaX\xcc\x86\xe8\\\x82\x1fg\x19\x17\n\x9e\xcc' b'\xf9*\xf2\x9d\xec\xaf\xc7\xf7\xedv\xf7\xc1x!\xddC\xc6\xf2(\x12`\x90') assert sigmat.qb2 == qbin sigmat = SigMat(qb64=qsig64) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 5 sigmat = SigMat(qb2=qbin) assert sigmat.raw == sig assert sigmat.code == SigTwoDex.Ed25519 assert sigmat.index == 5 """ Done Test """
29,367
def test_fail(testdir): """Fail example. Should fail for several reasons.""" testdir.makefile('.t', r""" Output needing escaping: $ printf '\00\01\02\03\04\05\06\07\010\011\013\014\016\017\020\021\022\n' foo $ printf '\023\024\025\026\027\030\031\032\033\034\035\036\037\040\047\n' bar Wrong output and bad regexes: $ echo 1 2 $ printf '1\nfoo\n1\n' +++ (re) foo\ (re) (re) Filler to force a second diff hunk: Offset regular expression: $ printf 'foo\n\n1\n' \d (re) """) # Subprocess needed for these weird shell commands result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines(["test_fail.t F*", "@@ -1,18 +1,18 @@", r"+*\x11\x12 (esc)", r"*\x1e\x1f ' (esc)", "+ 1", "@@ -20,5 +20,6 @@", "+ foo", "*1 failed*"])
29,368
def check_directories(directories): """Checks if all given directories are really directories and on the same device. Parameters: directories (list of strings) - The directories to check. Returns: The tuple (ok, ok_dirs) where ok is a boolean and ok_dirs a list of directories (as strings). If the given directories contained no existing directories or it contained at least two directories that are not on the same device, then ok is False and ok_dirs is empty. Otherwise ok is True and ok_dirs contains all directories in the given directories that really exist. """ ok_dirs = [] for d in directories: if not os.path.exists(d): print("'%s' does not exist. Ignoring." % d) continue if not os.path.isdir(d): print("'%s' is no directory. Ignoring." % d) continue ok_dirs.append(d) if len(ok_dirs) == 0: print("No existing directory given. Exiting.") return False, [] prev_dir = None prev_device = None for d in ok_dirs: current_device = os.stat(d).st_dev if prev_device is not None and current_device != prev_device: print("'%s' and '%s' are not on the same device. Exiting." % \ (d, prev_dir)) return False, [] prev_dir = d prev_device = current_device return True, ok_dirs
29,369
def pqm_incoming_message_thread(thread_name, dealer_id_string, poll_timeout, no_activity_sleep, action_queue, notification_queue, received_message_queue): """ This thread is responsible for handling incoming messages from PQMs. An outside thread can command this thread to take action via the action queue. Results and notifications are put in the notification queue (when necessary). A PQM connection action will create a dealer socket to the desired PQM. This thread will then watch for incoming messages from that PQM. Messages received from PQMs will be put on the PQM received message queue. Note that this ROUTER/DEALER connection relationship is modeled after PUSH/PULL for asynchronous data transmission. """ # Send a system message to notify we have started our thread.thread. notification_queue.put(system_messages.SystemThreadStateMessage(thread_name, True, os.getpid())) # We will need to track our current dealer sockets and which peer queue manager they map to. dealer_socket_to_id_string_map = dict() # Create our ZMQ context. zmq_context = zmq.Context(1) # Use a poller so we can receive with timeouts (receive is blocking; we can poll with a timeout). # This will allow us to see if we should shut down the socket in between polls. # This will also allow us to check our request queue to add more connections to the poller. poller = zmq.Poller() # Enter our loop. # We have two steps: # 1) Accept any data sent to our dealer sockets by PQMs (poll to check for existence). # 2) Respond to requests put this thread's action queue. is_running = True while is_running == True: # Track if we have handled any data in each iteration so we know if we should sleep or not to give the CPU some rest. handled_data = False # 1) Accept any data sent to our dealer sockets by PQMs. try: # Poll for socket events. socket_events_dictionary = dict(poller.poll(poll_timeout)) # Check each dealer socket in our map. for dealer_socket in list(dealer_socket_to_id_string_map.keys()): # If the dealer socket has data waiting, process it. if dealer_socket in socket_events_dictionary and socket_events_dictionary[dealer_socket] == zmq.POLLIN: # Receive. received_message = SocketWrapper.pull_message(dealer_socket) # Place the PQM's id string and received message in our received queue. # Received messages are handled outside of this thread; we must allow it to continue receiving uninterrupted. received_message_queue.put((dealer_socket_to_id_string_map[dealer_socket], received_message)) # Denote data was handled. handled_data = True except KeyboardInterrupt: # Ignore keyboard interrupts; the main thread will handle as desired. pass except: # When we get an exception, log it and break from our loop. notification_queue.put(system_messages.SystemErrorMessage(thread_name, "Failed to receive an incoming PQM message: " + ExceptionFormatter.get_message())) break # 2) Respond to requests put this thread's action queue. action_queue_exception = False while True: try: # Get a new message; initialize to not being handled. # Note that we will be routed to our empty message exception if no messages are waiting. message = action_queue.get(False) message_handled = False # Denote data was handled. handled_data = True # Based on the message type, handle. if message.get_type() == message_types.SYSTEM_PEER_CONNECTION_UDPATE: # If we should connect. if message.connect_flag == True: # This message is requesting we make a DEALER socket connection to the given peer. # Create the dealer socket, setting the identity to this QM. # This will tell tell the corresponding ROUTER running on the PQM to only send data intended for this QM. dealer_socket = zmq_context.socket(zmq.DEALER) dealer_socket.setsockopt(zmq.IDENTITY, message.dealer_id_string.encode()) dealer_socket.connect(message.peer_socket_connection_string) # Send a system message to notify we have opened our socket. notification_queue.put(system_messages.SystemSocketStateMessage(thread_name, True, "Dealer connected to {0} in response to action request.".format(message.peer_socket_connection_string))) # Register in our map. dealer_socket_to_id_string_map[dealer_socket] = message.peer_id_string # Register in our poller. poller.register(dealer_socket, zmq.POLLIN) # If we should disconnect. else: # This message is requesting we remove a DEALER socket connection from the given peer. # Get the dealer socket from the ID string. dealer_socket = None for test_socket, test_id_string in list(dealer_socket_to_id_string_map.items()): if message.peer_id_string == test_id_string: dealer_socket = test_socket break # Close and remove from our poller. dealer_socket.setsockopt(zmq.LINGER, 0) dealer_socket.close() poller.unregister(dealer_socket) # Remove the dealer socket. del dealer_socket_to_id_string_map[dealer_socket] # Send a system message to notify we have closed the socket. notification_queue.put(system_messages.SystemSocketStateMessage(thread_name, False, "Dealer disconnected from {0} in response to action request.".format(message.peer_socket_connection_string))) # Denote the message has been handled. message_handled = True elif message.get_type() == message_types.SYSTEM_STOP_THREAD: notification_queue.put(system_messages.SystemNotificationMessage(thread_name, "Shutting down per main thread request")) message_handled = True is_running = False # If the message hasn't been handled, notify. if message_handled == False: notification_queue.put(system_messages.SystemErrorMessage(thread_name, "Action queue message was not handled: {0}".format(message), thread_name)) except EmptyQueueException: # We are looping as we read from our queue object without waiting; as soon as we hit an empty message, we should break from the loop. break except KeyboardInterrupt: # Ignore keyboard interrupts; the main thread will handle as desired. pass except: # When we get an exception, log it and break from our loop. notification_queue.put(system_messages.SystemErrorMessage(thread_name, "Action queue message processing raised exception: " + ExceptionFormatter.get_message())) try: notification_queue.put(system_messages.SystemErrorMessage(thread_name, str(message))) except: pass action_queue_exception = True break # If we had an action queue exception, we must exit out of our thread loop. if action_queue_exception == True: break # If we handled no messages, sleep. if handled_data == False: time.sleep(no_activity_sleep) # Close all sockets. for dealer_socket in list(dealer_socket_to_id_string_map.keys()): # Close and remove from our poller. dealer_socket.setsockopt(zmq.LINGER, 0) dealer_socket.close() poller.unregister(dealer_socket) # Send a system message to notify we have closed the socket. notification_queue.put(system_messages.SystemSocketStateMessage(thread_name, False, "Dealer: {0}".format(dealer_socket_to_id_string_map[dealer_socket]))) # Send a system message to notify we have shut down. notification_queue.put(system_messages.SystemThreadStateMessage(thread_name, False))
29,370
def download_accessions(force_download=False): """Downloads the compound accessions :param bool force_download: If true, overwrites a previously cached file :rtype: str """ if os.path.exists(ACCESSION_DATA_PATH) and not force_download: log.info('using cached data at %s', ACCESSION_DATA_PATH) else: log.info('downloading %s to %s', ACCESSION_URL, ACCESSION_DATA_PATH) urlretrieve(ACCESSION_URL, ACCESSION_DATA_PATH) return ACCESSION_DATA_PATH
29,371
def ssh_zero_retry(): """ Fixture to provide quick access to changing the ssh retry count. This is useful for speeding up mocked ssh commands. """ # Save the original interval off original_retry_count = environment.EPYTHON_SSH_RETRIES # Set the updated interval to zero environment.EPYTHON_SSH_RETRIES = 0 yield # Set the interval back to the original state environment.EPYTHON_SSH_RETRIES = original_retry_count
29,372
def parseYear(year, patterns): """"This function returns a string representing a year based on the input and a list of possible patterns. >>> parseYear('2021', ['%Y']) '2021' >>> parseYear('2021', ['(%Y)', '%Y']) '2021' >>> parseYear('(2021)', ['%Y', '(%Y)']) '2021' """ parsedYear = None for p in patterns: try: tmp = datetime.strptime(year, p).date().year parsedYear = str(tmp) break except ValueError: pass if parsedYear == None: return year else: return parsedYear
29,373
def fit_oxy_nii(target_row, velocity_column = None, data_column = None, IP = "center", **kwargs): """ Fits oxygen bright line to spectrum for future subtraction Parameters ---------- target_row: `SkySurvey` row Row to match spectra to data_column: 'str', optional, must be keyword Name of data column, default of "DATA" velocity_column: 'str', optional, must be keyword Name of velocity column, default of "VELOCITY" **kwargs: dict keywords passed to Model.fit() """ if velocity_column is None: velocity_column = "VELOCITY_GEO" if data_column is None: data_column = "DATA" def bright_atm(x, baseline, amp, mean, std): g = c_component(amp, mean, std, IP = IP) y = np.zeros_like(x) y+= baseline y+= g(x) return y bright_atm_model = Model(bright_atm) params = Parameters() params.add("baseline", value = np.nanmin(target_row[data_column])) params.add("amp", value = np.nanmax(target_row[data_column])) params.add("mean", value = -281.3) params.add("std", value = 3) exclusion_mask = (target_row[velocity_column] < -315) | (target_row[velocity_column] > -215) res = bright_atm_model.fit(target_row[data_column][np.invert(exclusion_mask)], x = target_row[velocity_column][np.invert(exclusion_mask)], params = params, nan_policy = "omit", **kwargs) return res
29,374
def get(url, accept=None, headers=None): """ Make a basic HTTP call to CMR using the POST action Parameters: url (string): resource to get body (dictionary): parameters to send, or string if raw text to be sent accept (string): encoding of the returned data, some form of json is expected client_id (string): name of the client making the (not python or curl) headers (dictionary): HTTP headers to apply """ logger.debug(" Headers->CMR= %s", headers) req = urllib.request.Request(url) if accept is not None: apply_headers_to_request(req, {'Accept': accept}) apply_headers_to_request(req, headers) try: #pylint: disable=R1732 # the mock code does not support this in tests resp = urllib.request.urlopen(req) response = resp.read() raw_response = response.decode('utf-8') if resp.status == 200: obj_json = json.loads(raw_response) if isinstance(obj_json, list): data = obj_json obj_json = {"hits": len(data), "items" : data} #print (obj_json) head_list = {} for head in resp.getheaders(): head_list[head[0]] = head[1] if logger.getEffectiveLevel() == logging.DEBUG: stringified = str(common.mask_dictionary(head_list, ["cmr-token", "authorization"])) logger.debug(" CMR->Headers = %s", stringified) #obj_json['http-headers'] = head_list elif resp.status == 204: obj_json = {} head_list = {} for head in resp.getheaders(): head_list[head[0]] = head[1] obj_json['http-headers'] = head_list else: if raw_response.startswith("{") and raw_response.endswith("}"): return json.loads(raw_response) return raw_response return obj_json except urllib.error.HTTPError as exception: raw_response = exception.read() try: obj_json = json.loads(raw_response) obj_json['code'] = exception.code obj_json['reason'] = exception.reason return obj_json except json.decoder.JSONDecodeError as err: return err return raw_response
29,375
def image_reproject_from_healpix_to_file(source_image_hdu, target_image_hdu_header, filepath=None): """ reproject from healpix image to normal wcs image :param source_image_hdu: the HDU object of source image (healpix) :param target_image_hdu_header: the HDU object of target image (wcs) :param filepath: the output file path :return: array, footprint """ array, footprint = reproject_from_healpix(source_image_hdu, target_image_hdu_header) if filepath is not None: # write file fits.writeto(filepath, array, target_image_hdu_header, clobber=True) # clobber=OVERWRITE else: # return array & footprint return array, footprint
29,376
def flux_reddening_wl(wl, flux_wl, ebv, Rv=None, law=LawFitz, mode=ReddeningLaw.MW): """ Apply extinction curves to flux(lambda) values :param wl: [A] :param flux_wl: [ergs s^-1 cm^-2 A^-1] :param ebv: E(B-V) :param Rv: R_V :param law: the variant of extinction curves :param mode: type of extinction curves (MW, LMC, SMC) :return: reddening flux """ if Rv is None: Rv = law.Rv[mode] A_lambda = law.Almd(wl, ebv, Rv=Rv) res = flux_wl * 10 ** (-0.4 * A_lambda) return res
29,377
def test_cray_artifacts_create(cli_runner): """ Test cray artifacts create ... """ runner, cli, _ = cli_runner # Missing bucket name result = runner.invoke(cli, ['artifacts', 'create', ]) assert result.exit_code == 2
29,378
def genRandomString( size: int = 5, upper: bool = False, lower: bool = False, mix: bool = False, numbers: bool = True) -> str: """ Generates a random string of the given size and content. :param numbers: Numbers are included in the string. Default True. :param upper: Uppercase only. Default False. :param lower: Lowecase only. Default False. :param mix: Mix lowecase and uppercase. Default False. :param size: Size of the desired string. :return: String """ chars = '' if upper: chars = string.ascii_uppercase elif lower: chars = string.ascii_lowercase elif mix: chars = string.ascii_letters if numbers: chars = chars + string.digits return ''.join(choice(chars) for _ in range(size))
29,379
def process_whole_image(model, images, num_crops=4, receptive_field=61, padding=None): """Slice images into num_crops * num_crops pieces, and use the model to process each small image. Args: model: model that will process each small image images: numpy array that is too big for model.predict(images) num_crops: number of slices for the x and y axis to create sub-images receptive_field: receptive field used by model, required to pad images padding: type of padding for input images, one of {'reflect', 'zero'} Returns: model_output: numpy array containing model outputs for each sub-image """ if K.image_data_format() == 'channels_first': channel_axis = 1 row_axis = len(images.shape) - 2 col_axis = len(images.shape) - 1 else: channel_axis = len(images.shape) - 1 row_axis = len(images.shape) - 3 col_axis = len(images.shape) - 2 if not padding: padding_layers = get_padding_layers(model) if padding_layers: padding = 'reflect' if 'reflect' in padding_layers[0] else 'zero' if str(padding).lower() not in {'reflect', 'zero'}: raise ValueError('Expected `padding_mode` to be either `zero` or ' '`reflect`. Got ', padding) # Split the frames into quarters, as the full image size is too large crop_x = images.shape[row_axis] // num_crops crop_y = images.shape[col_axis] // num_crops # Set up receptive field window for padding win_x, win_y = (receptive_field - 1) // 2, (receptive_field - 1) // 2 # instantiate matrix for model output model_output_shape = tuple(list(model.layers[-1].output_shape)[1:]) if channel_axis == 1: output = np.zeros((images.shape[0], model_output_shape[1], *images.shape[2:])) else: output = np.zeros((*images.shape[0:-1], model_output_shape[-1])) expected_input_shape = get_cropped_input_shape(images, num_crops, receptive_field) if expected_input_shape != model.input_shape[1:]: raise ValueError('Expected model.input_shape to be {}. Got {}. Use ' '`get_cropped_input_shape()` to recreate your model ' ' with the proper input_shape'.format( expected_input_shape, model.input_shape[1:])) # pad the images only in the x and y axes pad_width = [] for i in range(len(images.shape)): if i == row_axis: pad_width.append((win_x, win_x)) elif i == col_axis: pad_width.append((win_y, win_y)) else: pad_width.append((0, 0)) if str(padding).lower() == 'reflect': padded_images = np.pad(images, pad_width, mode='reflect') else: padded_images = np.pad(images, pad_width, mode='constant', constant_values=0) for i in range(num_crops): for j in range(num_crops): e, f = i * crop_x, (i + 1) * crop_x + 2 * win_x g, h = j * crop_y, (j + 1) * crop_y + 2 * win_y if images.ndim == 5: if channel_axis == 1: predicted = model.predict(padded_images[:, :, :, e:f, g:h]) else: predicted = model.predict(padded_images[:, :, e:f, g:h, :]) else: if channel_axis == 1: predicted = model.predict(padded_images[:, :, e:f, g:h]) else: predicted = model.predict(padded_images[:, e:f, g:h, :]) # if using skip_connections, get the final model output if isinstance(predicted, list): predicted = predicted[-1] # if the model uses padding, trim the output images to proper shape # if model does not use padding, images should already be correct if padding: predicted = trim_padding(predicted, win_x, win_y) a, b = i * crop_x, (i + 1) * crop_x c, d = j * crop_y, (j + 1) * crop_y if images.ndim == 5: if channel_axis == 1: output[:, :, :, a:b, c:d] = predicted else: output[:, :, a:b, c:d, :] = predicted else: if channel_axis == 1: output[:, :, a:b, c:d] = predicted else: output[:, a:b, c:d, :] = predicted return output
29,380
def compute_covariance(model, xy, XY=None): """Returns the covariance matrix for a given set of data""" if xy.size == 1: dist = 0 elif XY is None: dist = squareform(pdist(xy)) else: dist = cdist(xy, XY) C = model(dist) return C
29,381
def prob1(cur: sqlite3.Cursor) -> pd.DataFrame: """List how many stops are in the database. Parameters ---------- cur (sqlite3.Cursor) : The cursor for the database we're accessing. Returns ------- (pd.DataFrame) : Table with the solution. """ cur.execute("SELECT COUNT(*) FROM stops;") return pd.DataFrame(cur.fetchall())
29,382
def q_fn(x): """ The Q-function assesses all possible actions that can be taken, given a state. Two layer feed forward neural network. All layers are fully connected, biases initialized with 0. The constants above define the layer sizes. :param x: Batch input tensor to the network. :return: Action softmax over three values. """ with tf.variable_scope('dense1') as scope: weights = tf.get_variable('weights', [INPUT_SIZE, DENSE1_UNITS], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=1.0 / DENSE1_UNITS)) biases = tf.get_variable('biases', shape=[DENSE1_UNITS], dtype=tf.float32, initializer=tf.constant_initializer(0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(x, weights), biases, name='pre_activation') dense1 = tf.sigmoid(pre_activation, name=scope.name) with tf.variable_scope('dense2') as scope: weights = tf.get_variable('weights', [DENSE1_UNITS, DENSE2_UNITS], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=1.0 / DENSE2_UNITS)) biases = tf.get_variable('biases', shape=[DENSE2_UNITS], dtype=tf.float32, initializer=tf.constant_initializer(0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(dense1, weights), biases, name='pre_activation') dense2 = tf.sigmoid(pre_activation, name=scope.name) with tf.variable_scope('actions') as scope: weights = tf.get_variable('weights', [DENSE2_UNITS, NUM_ACTIONS], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=1.0 / NUM_ACTIONS)) biases = tf.get_variable('biases', shape=[NUM_ACTIONS], dtype=tf.float32, initializer=tf.constant_initializer(0.0, dtype=tf.float32)) action_q = tf.add(tf.matmul(dense2, weights), biases, name='action_q_value') return action_q
29,383
def getPlayer(env, name, decoder): """Get user's player data""" players = getPlayers(env, decoder) if name in players.keys(): return players[name] else: return False
29,384
def test_esd_lbaas_stcp_lbaas_persist(track_bigip_cfg, ESD_Pairs_Experiment): """Validate application of a pair of tags.""" apply_validate_remove_validate(ESD_Pairs_Experiment)
29,385
def set_nonblocking_pipe(pipe): # type: (typing.Any) -> None """Set PIPE unblocked to allow polling of all pipes in parallel.""" descriptor = pipe.fileno() # pragma: no cover if _posix: # pragma: no cover # Get flags flags = fcntl.fcntl(descriptor, fcntl.F_GETFL) # Set nonblock mode fcntl.fcntl(descriptor, fcntl.F_SETFL, flags | os.O_NONBLOCK) elif _win: # pragma: no cover # noinspection PyPep8Naming SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState SetNamedPipeHandleState.argtypes = [ wintypes.HANDLE, wintypes.LPDWORD, wintypes.LPDWORD, wintypes.LPDWORD ] SetNamedPipeHandleState.restype = wintypes.BOOL # noinspection PyPep8Naming PIPE_NOWAIT = wintypes.DWORD(0x00000001) handle = msvcrt.get_osfhandle(descriptor) windll.kernel32.SetNamedPipeHandleState( handle, ctypes.byref(PIPE_NOWAIT), None, None )
29,386
def step_impl(context): """Check that context.last_executor_id is executed""" executor = context.manager.get_executor(context.last_executor_id) assert executor.executed is True
29,387
def get_config_errors(conf, filename="<no name>"): """ Validate a configuration object and return the list of errors found. """ rv = [] # Give a clearer error message than what jsonschema would give # Something like: None is not of type 'object' if not isinstance(conf, dict): msg = "config must be an object containing 'db_objects'" rv.append(located_message(None, filename, msg)) return rv errors = list(validator.iter_errors(conf)) for error in errors: loc = location_from_error(conf, error) rv.append(located_message(loc, filename, error.message)) for obj in conf.get("db_objects", ()): if isinstance(obj, dict): rv.extend(_get_rule_errors(obj, filename)) # sort by line number def lineno(s): m = re.search(r":(\d+)", s) return int(m.group(1)) if m is not None else 0 rv.sort(key=lineno) return rv
29,388
def process_vcf( info ): """ pass izip object of line object and other needed vars info[0] = list of vcf lines from VCF object iterator. info[1] = clf object info[2] = dataset dictionary info[3] = filter arg supplied by user info[4] = min classification frequency supplied by user (defaults to None) """ #sys.stderr.write("... running process VCF with job id %d \n" %(os.getpid() ) ) #parse the args to function line_list = info[0] #list of lines from VCF obj clf = info[1] #randomForest object dataset = info[2] #dataset with class names filter = info[3] #filter arg supplied by user minclassfreq = info[4] #iterate over lines in the chunked data return_list = [] for line in line_list: line = line.strip().split("\t") vdat = parse_vcf_data( line[7] ) #parse all of vcf appended data filter_bool = run_filters( vdat, filtering=filter ) #boolean of whether line info passes filters if filter_bool: _x = vdat[ 'AT' ].split(",") #create list from data in 'AT' field _x = _x[1:] #results = classify_data( _x, clf, dataset['target_names'] ) results = classify_data( _x, clf, dataset['target_names'], minclassfreq ) line[7] = line[7] + ";" + results #append data to correct vcf column #print "\t".join( line ) #print results to stdout print_line = "\t".join( line ) return_list.append( print_line ) else: return_list.append( None ) #return the full list of updated line data return( return_list )
29,389
def createLayerOnFrameDepend(job, layer, onjob, onlayer, onframe): """Creates a layer on frame dependency @type job: string @param job: the name of the dependant job @type layer: string @param layer: the name of the dependant layer @type onjob: string @param onjob: the name of the job to depend on @type onlayer: string @param onlayer: the name of the layer to depend on @type onframe: int @param onframe: the number of the frame to depend on @rtype: Depend @return: the created dependency""" __is_valid(job, ERR_INVALID_ER_JOB) __is_valid(layer, ERR_INVALID_ER_LAYER) __is_valid(onjob, ERR_INVALID_ON_JOB) __is_valid(onlayer, ERR_INVALID_ON_LAYER) __is_valid(onframe, ERR_INVALID_ON_FRAME) logger.debug( "creating lof depend from %s/%s to %s/%s-%04d", job, layer, onjob, onlayer, onframe) depend_er_layer = opencue.api.findLayer(job,layer) depend_on_frame = opencue.api.findFrame(onjob, onlayer, onframe) return depend_er_layer.createDependencyOnFrame(depend_on_frame)
29,390
def compute_task1_f1_score(truth, solutions): """ compute f1 score for task 1 :param truth: list of ground truth values for all problem-ids :param solutions: list of solutions for all problem-ids :return: f1 score """ task1_truth, task1_solution = extract_task_results(truth, solutions, 'multi-author') return f1_score(task1_truth, task1_solution, average='micro')
29,391
def multiply(t1,t2): """ Multiplies (expands) two binary expressions t1 and t2 based on the distributive rule Args: t1 (str): first binary expression t2 (str): second binary expression Returns: A string representing the expansion of the boolean algebraic expressions """ t1 = t1.split('+') t2 = t2.split('+') prod = '' for m in t1: temp = "" for n in t2: if t1.index(m) == len(t1)-1 and t2.index(n) == len(t2)-1: if m!=n: temp=(temp+m+n) else: temp += m else: if m!=n: temp=temp + m+n+'+' else: temp+=m+'+' prod+=temp return prod
29,392
def source_remove_all(obj_type, obj_id, name, analyst=None): """ Remove a source from a top-level object. :param obj_type: The CRITs type of the top-level object. :type obj_type: str :param obj_id: The ObjectId to search for. :type obj_id: str :param name: The name of the source. :type name: str :param analyst: The user performing the removal. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) """ obj = class_from_id(obj_type, obj_id) if not obj: return {'success': False, 'message': 'Unable to find object in database.'} try: result = obj.remove_source(source=name, remove_all=True) obj.save(username=analyst) return result except ValidationError, e: return {'success':False, 'message': e}
29,393
def driver(dbname): """ Determine driver module :Parameters: `dbname` : ``str`` DB name (section token in db.conf) :Return: Driver module :Rtype: ``module`` :Exceptions: - `DBConfigurationError` : DB not configured - `KeyError` : DB name not found - `ImportError` : Driver not found """ return _connection.driver(dbname)
29,394
def d1tile_x_d2(d1: Union[float, np.ndarray], d2: np.ndarray) -> np.ndarray: """ Create array of repeated values with dimensions that match those of energy array Useful to multiply frequency-dependent values to frequency-time matrices :param d1: 1D input vector, nominally frequency/scale multipliers :param d2: 2D array, first dimension should be that same as d1 :return: array with matching values """ shape_out = d2.shape if len(shape_out) == 1: d1_matrix = np.tile(d1, (shape_out[0])) elif len(shape_out) == 2: d1_matrix = np.tile(d1, (shape_out[1], 1)).T else: raise TypeError('Cannot handle an array of shape {}.'.format(str(d1.shape))) if d1_matrix.shape == d2.shape: d1_x_d2 = d1_matrix * d2 else: raise TypeError('Cannot handle an array of shape {}.'.format(str(d1.shape))) return d1_x_d2
29,395
def load_ascii_font(font_name): """ Load ascii font from a txt file. Parameter --------- font_name : name of the font (str). Return ------ font : font face from the file (dic). Version ------- Specification : Nicolas Van Bossuyt (v1. 27/02/17) Notes ----- Load font in figlet format (http://www.figlet.org). """ chars = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_'abcdefghijklmnopqrstuvwxyz{|}~ÄÖÜäöüβ"
29,396
def get_all_gradients_for_Q4( theta, X, Y ): """ Do the same thing as Q(iv) but it is actually only for storing and observing the sample gradient and whole gradient for the Q(iv) step Output the sample grdient and whole grdient data """ # Get difference of uclidean distance def get_difference( old_theta, new_theta ): difference_mat = old_theta - new_theta difference_square = np.multiply( difference_mat, difference_mat ) difference = math.sqrt( np.sum( difference_square ) ) return difference # Contains all gradient_i grad_i_val_observe = [] grad_val_observe = [] # Set random seed random.seed( 1 ) # Get updated theta def get_new_theta( old_theta, eta ): # Code for using single sample gradient random_i = random.randint( 0, X.shape[0] - 1 ) grad_i_val = get_grad_f_i( old_theta, X, Y, random_i ) # Get the whole gradient to observe grad_val = get_grad_f( old_theta, X, Y ) # Scale by the size N (multiply by 10,000) grad_i_val = grad_i_val * X.shape[0] # Store grad_val to observe Q(v) grad_i_val_list = grad_i_val.tolist() grad_i_val_list = grad_i_val_list[0] grad_val_list = grad_val.tolist() grad_val_list = grad_val_list[0] grad_i_val_observe.append( grad_i_val_list ) grad_val_observe.append( grad_val_list ) new_theta = old_theta - ( eta * grad_i_val ) return new_theta ############################################################ precision = 0.01 # eta = 0.000000008 # ############################################################ old_theta = theta new_theta = get_new_theta( old_theta, eta ) difference = get_difference( old_theta, new_theta ) while difference > precision: old_theta = new_theta new_theta = get_new_theta( old_theta, eta ) # Get new difference difference = get_difference( old_theta, new_theta ) value = op_func( new_theta, X, Y ) # Showing information... print print "difference: " + str( difference ) print "theta: " print new_theta print "function value: " + str( value ) return grad_i_val_observe, grad_val_observe
29,397
def static(ctx): """Run static analysis.""" print(f'🎉🌩️ All static analysis passed.')
29,398
def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons): """Array to store the results returned from the function""" res=np.zeros((4),dtype=np.float64) minLon=min_lon; maxLon=max_lon distances1 = []; distances2 = [] indices=[]; index=1 for point in lats: s1 = max_lat-point # (vector subtract) s2 = min_lat-point # (vector subtract) distances1.append((np.dot(s1, s1), point, index)) distances2.append((np.dot(s2, s2), point, index-1)) index=index+1 distances1.sort() distances2.sort() indices.append(distances1[0]) indices.append(distances2[0]) distances1 = []; distances2 = []; index=1 for point in lons: s1 = maxLon-point # (vector subtract) s2 = minLon-point # (vector subtract) distances1.append((np.dot(s1, s1), point, index)) distances2.append((np.dot(s2, s2), point, index-1)) index=index+1 distances1.sort() distances2.sort() indices.append(distances1[0]) indices.append(distances2[0]) """ Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices""" minJ=indices[1][2] maxJ=indices[0][2] minI=indices[3][2] maxI=indices[2][2] res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ; return res
29,399