content
stringlengths
22
815k
id
int64
0
4.91M
def divide(x, y): """A version of divide that also rounds.""" return round(x / y)
24,400
def mark_bad_wavelength_groups(src_file: H5File, filtered_paths: List[str]) -> None: """Sets the `isbad` attribute of each wavelength group to `True` if the group contains one or more path that has been filtered. Args: src_file (H5File): The file containing the experimental data filtered_paths (List[str]): A list of paths to signal datasets that have been caught by filters due to noise, etc """ bad_paths = set(filtered_paths) for wav_path in rawnav.wavelengths_under_rounds_paths(src_file): paths_below = dataset_paths_below(src_file, wav_path) if any(p in bad_paths for p in paths_below): src_file[wav_path].attrs['isbad'] = True else: src_file[wav_path].attrs['isbad'] = False return
24,401
def create_root_ca_cert(root_common_name, root_private_key, days=365): """ This method will create a root ca certificate. :param root_common_name: The common name for the certificate. :param root_private_key: The private key for the certificate. :param days: The number of days for which the certificate is valid. The default is 1 year or 365 days. :return: The root certificate. :rtype: :class:`x509.Certificate` """ file_root_certificate = "demoCA/newcerts/ca_cert.pem" root_public_key = root_private_key.public_key() subject = x509.Name( [x509.NameAttribute(NameOID.COMMON_NAME, str.encode(root_common_name).decode("utf-8"))] ) builder = create_cert_builder( subject=subject, issuer_name=subject, public_key=root_public_key, days=days, is_ca=True ) root_cert = builder.sign( private_key=root_private_key, algorithm=hashes.SHA256(), backend=default_backend() ) with open(file_root_certificate, "wb") as f: f.write(root_cert.public_bytes(serialization.Encoding.PEM)) return root_cert
24,402
def hist1d(arr, bins=None, amp_range=None, weights=None, color=None, show_stat=True, log=False,\ figsize=(6,5), axwin=(0.15, 0.12, 0.78, 0.80),\ title=None, xlabel=None, ylabel=None, titwin=None): """Makes historgam from input array of values (arr), which are sorted in number of bins (bins) in the range (amp_range=(amin,amax)) """ #print 'hist1d: title=%s, size=%d' % (title, arr.size) if arr.size==0: return None, None, None fig = plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='w', frameon=True) if titwin is not None: fig.canvas.set_window_title(titwin) elif title is not None: fig.canvas.set_window_title(title) axhi = fig.add_axes(axwin) hbins = bins if bins is not None else 100 hi = axhi.hist(arr.ravel(), bins=hbins, range=amp_range, weights=weights, color=color, log=log) #, log=logYIsOn) if amp_range is not None: axhi.set_xlim(amp_range) # axhi.set_autoscale_on(False) # suppress autoscailing if title is not None: axhi.set_title(title, color='k', fontsize=20) if xlabel is not None: axhi.set_xlabel(xlabel, fontsize=14) if ylabel is not None: axhi.set_ylabel(ylabel, fontsize=14) if show_stat: weights, bins, patches = hi add_stat_text(axhi, weights, bins) return fig, axhi, hi
24,403
def format_validate_parameter(param): """ Format a template parameter for validate template API call Formats a template parameter and its schema information from the engine's internal representation (i.e. a Parameter object and its associated Schema object) to a representation expected by the current API (for example to be compatible to CFN syntax). """ # map of Schema object types to API expected types schema_to_api_types = { param.schema.STRING: api.PARAM_TYPE_STRING, param.schema.NUMBER: api.PARAM_TYPE_NUMBER, param.schema.LIST: api.PARAM_TYPE_COMMA_DELIMITED_LIST, param.schema.MAP: api.PARAM_TYPE_JSON, param.schema.BOOLEAN: api.PARAM_TYPE_BOOLEAN } res = { api.PARAM_TYPE: schema_to_api_types.get(param.schema.type, param.schema.type), api.PARAM_DESCRIPTION: param.description(), api.PARAM_NO_ECHO: 'true' if param.hidden() else 'false', api.PARAM_LABEL: param.label() } if param.has_value(): res[api.PARAM_DEFAULT] = param.value() constraint_description = [] # build constraints for c in param.schema.constraints: if isinstance(c, constr.Length): if c.min is not None: res[api.PARAM_MIN_LENGTH] = c.min if c.max is not None: res[api.PARAM_MAX_LENGTH] = c.max elif isinstance(c, constr.Range): if c.min is not None: res[api.PARAM_MIN_VALUE] = c.min if c.max is not None: res[api.PARAM_MAX_VALUE] = c.max elif isinstance(c, constr.AllowedValues): res[api.PARAM_ALLOWED_VALUES] = list(c.allowed) elif isinstance(c, constr.AllowedPattern): res[api.PARAM_ALLOWED_PATTERN] = c.pattern elif isinstance(c, constr.CustomConstraint): res[api.PARAM_CUSTOM_CONSTRAINT] = c.name if c.description: constraint_description.append(c.description) if constraint_description: res[api.PARAM_CONSTRAINT_DESCRIPTION] = " ".join( constraint_description) return res
24,404
def get_template(name): """Retrieve the template by name Args: name: name of template Returns: :obj:`string.Template`: template """ file_name = "{name}.template".format(name=name) data = resource_string("pyscaffoldext.beeproject.templates", file_name) return string.Template(data.decode("UTF-8"))
24,405
def create_benefits_online(context): """ Test creating a BenifitsOnline object """ context.benefits_online = BenefitsOnline(context.bol_username, context.password)
24,406
def run_kitti_native_script_with_05_iou(kitti_native_code_copy, checkpoint_name, score_threshold, global_step): """Runs the kitti native code script.""" make_script = kitti_native_code_copy + '/run_eval_05_iou.sh' script_folder = kitti_native_code_copy results_dir = _sys_init.root_dir() + '/experiments/results/' results_dir = _sys_init.root_dir() + '/experiments/results_05_iou/' # Round this because protobuf encodes default values as full decimal score_threshold = round(score_threshold, 3) subprocess.call([make_script, script_folder, str(score_threshold), str(global_step), str(checkpoint_name), str(results_dir)])
24,407
def box3d_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0): """kitti camera format z_axis=1. """ bev_axes = list(range(7)) bev_axes.pop(z_axis + 3) bev_axes.pop(z_axis) # t = time.time() # rinc = box_np_ops.rinter_cc(boxes[:, bev_axes], qboxes[:, bev_axes]) rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2) # print("riou time", time.time() - t) box3d_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center) return rinc
24,408
def _apply_graph_transform_tool_rewrites(g, input_node_names, output_node_names): # type: (gde.Graph, List[str], List[str]) -> tf.GraphDef """ Use the [Graph Transform Tool]( https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/ graph_transforms/README.md) to perform a series of pre-deployment rewrites. Args: g: GDE representation of the core graph. input_node_names: Names of placeholder nodes that are used as inputs to the graph for inference. Placeholders NOT on this list will be considered dead code. output_node_names: Names of nodes that produce tensors that are outputs of the graph for inference purposes. Nodes not necessary to produce these tensors will be considered dead code. Returns: GraphDef representation of rewritten graph. """ # Invoke the Graph Transform Tool using the undocumented Python APIs under # tensorflow.tools.graph_transforms after_tf_rewrites_graph_def = graph_transforms.TransformGraph( g.to_graph_def(), inputs=input_node_names, outputs=output_node_names, # Use the set of transforms recommended in the README under "Optimizing # for Deployment" transforms=['strip_unused_nodes(type=float, shape="1,299,299,3")', 'remove_nodes(op=Identity, op=CheckNumerics)', 'fold_constants(ignore_errors=true)', 'fold_batch_norms', 'fold_old_batch_norms'] ) return after_tf_rewrites_graph_def
24,409
def draw_embedding_rel_space(h_emb, r_emb, t_emb, h_name, r_name, t_name, resultpath, algos, show_label): """Function to draw the embedding in relation space. Args: h_emb (matrix): Two dimesnional embeddings of head. r_emb (matrix): Two dimesnional embeddings of relation. t_emb (matrix): Two dimesnional embeddings of tail. h_name (list):List of string name of the head. r_name (list):List of string name of the relation. t_name (list):List of string name of the tail. resultpath (str):Path where the result will be save. algos (str): Name of the algorithms which generated the algorithm. show_label (bool): If True, prints the string names of the entities and relations. """ print("\t drawing figure!") pos = {} node_color_mp_ent = {} node_color_mp_rel = {} unique_ent = set(h_name) | set(t_name) unique_rel = set(r_name) colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys()) tot_col = len(colors) j = 0 for i, e in enumerate(unique_ent): node_color_mp_ent[e] = colors[j] j += 1 if j >= tot_col: j = 0 tot_col = len(colors) j = 0 for i, r in enumerate(unique_rel): node_color_mp_rel[r] = colors[j] j += 1 if j >= tot_col: j = 0 G = nx.DiGraph() idx = 0 head_colors = [] rel_colors = [] tail_colors = [] head_nodes = [] tail_nodes = [] rel_nodes = [] for i in range(len(h_name)): G.add_edge(idx, idx + 1) G.add_edge(idx + 1, idx + 2) head_nodes.append(idx) rel_nodes.append(idx + 1) tail_nodes.append(idx + 2) head_colors.append(node_color_mp_ent[h_name[i]]) rel_colors.append(node_color_mp_rel[r_name[i]]) tail_colors.append(node_color_mp_ent[t_name[i]]) pos[idx] = h_emb[i] pos[idx + 1] = r_emb[i] pos[idx + 2] = t_emb[i] idx += 3 plt.figure() nodes_draw = nx.draw_networkx_nodes(G, pos, nodelist=head_nodes, node_color=head_colors, node_shape='o', node_size=50) nodes_draw.set_edgecolor('k') nodes_draw = nx.draw_networkx_nodes(G, pos, nodelist=rel_nodes, node_color=rel_colors, node_size=50, node_shape='D', with_labels=show_label) nodes_draw.set_edgecolor('k') nodes_draw = nx.draw_networkx_nodes(G, pos, nodelist=tail_nodes, node_color=tail_colors, node_shape='*', node_size=50) nodes_draw.set_edgecolor('k') if show_label: nx.draw_networkx_labels(G, pos, font_size=8) nx.draw_networkx_edges(G, pos, arrows=True, width=0.5, alpha=0.5) if not os.path.exists(resultpath): os.mkdir(resultpath) files = os.listdir(resultpath) file_no = len( [c for c in files if algos + '_embedding_plot' in c]) plt.savefig(str(resultpath / (algos + '_embedding_plot_' + str(file_no) + '.png')), bbox_inches='tight', dpi=300) # plt.show()
24,410
def big_bcast(comm, objs, root=0, return_split_info=False, MAX_BYTES=INT_MAX): """ Broadcast operation that can exceed the MPI limit of ~4 GiB. See documentation on :meth:`big_gather` for details. Parameters ---------- comm: mpi4py.MPI.Intracomm MPI communicator to use. objs: objects Data to gather from all processes. root: int Rank of process to receive the data. return_split_info: bool On root process, also a return a dictionary describing how the data were split. Used for testing. MAX_BYTES: int Maximum bytes per chunk. Defaults to the INT_MAX of 32 bit integers. Used for testing. Returns ------- list of objects: Length Npus list, such that the n'th entry is the data gathered from the n'th process. This is only filled on the root process. Other processes get None. dict: If return_split_info, the root process also gets a dictionary containing: - ranges: A list of tuples, giving the start and end byte of each chunk. - MAX_BYTES: The size limit that was used. Notes ----- Running this on MPI.COMM_WORLD means that every process gets a full copy of `objs`, potentially using up available memory. This function is currently used to send large data once to each node, to be put in shared memory. """ bufsize = None nopickle = False shape = None dtype = None if comm.rank == root: if isinstance(objs, np.ndarray): shape = objs.shape dtype = objs.dtype buf = objs.tobytes() nopickle = True else: buf = dumps(objs) bufsize = len(buf) # Sizes of send buffers to be sent from each rank. bufsize = comm.bcast(bufsize, root=root) nopickle = comm.bcast(nopickle, root=root) if nopickle: shape = comm.bcast(shape, root=root) dtype = comm.bcast(dtype, root=root) if comm.rank != root: buf = np.empty(bufsize, dtype=bytes) # Ranges of output bytes for each chunk. start = 0 end = 0 ranges = [] while end < bufsize: end = min(start + MAX_BYTES, bufsize) ranges.append((start, end)) start += MAX_BYTES for start, end in ranges: comm.Bcast([buf[start:end], MPI.BYTE], root=root) if nopickle: result = np.frombuffer(buf, dtype=dtype) result = result.reshape(shape) else: result = loads(buf) split_info_dict = {'MAX_BYTES': MAX_BYTES, 'ranges': ranges} if return_split_info: return result, split_info_dict return result
24,411
def main(config): """ setup """ working_dir = os.getcwd() project_dir = hydra.utils.get_original_cwd() folder_path = os.path.join(project_dir, config['input_dir']) if config['anatomy'] == 'stanford_knees': files = get_all_files(folder_path, pattern=f'*R{config["R"]}*.h5') else: files = get_all_files(folder_path, pattern='*.h5') if not config['multiprocessing']: mp_run(0, config, project_dir, working_dir, files) else: mp.spawn(mp_run, args=(config, project_dir, working_dir, files), nprocs=config['world_size'], join=True)
24,412
def get_task(appname, taskqueue, identifier): """Gets identified task in a taskqueue Request ------- ``` GET http://asynx.host/apps/:appname/taskqueues/:taskqueue/tasks/:identifier ``` Parameters: - appname: url param, string, the application name under which the queue lies - taskqueue: url param, string, the name of the taskqueue in which the task belongs - identifier: url param, string, the identifier to the task. the identifier can be: - id, form: {integer} or id:{integer}; - uuid, form: uuid:{string} - cname, form: cname:{string} Request body: Do not supply a request body with this method Response -------- Task resource same as `insert_task`. """ try: kind, kind_id = validate(forms.identifier_form, identifier) except MultipleInvalid as e: raise IdentifierNotFound(str(e)) tq = TaskQueue(appname, taskqueue) if kind == 'id': task = tq.get_task(kind_id) elif kind == 'uuid': task = tq.get_task_by_uuid(kind_id) elif kind == 'cname': task = tq.get_task_by_cname(kind_id) return jsonify(task)
24,413
def GetBuiltins(stdlib=True): """Get the "default" AST used to lookup built in types. Get an AST for all Python builtins as well as the most commonly used standard libraries. Args: stdlib: Whether to load the standard library, too. If this is False, TypeDeclUnit.modules will be empty. If it's True, it'll contain modules like itertools and signal. Returns: A pytd.TypeDeclUnit instance. It'll directly contain the builtin classes and functions, and submodules for each of the standard library modules. """ cache_key = stdlib if cache_key in _cached_builtins: return _cached_builtins[cache_key] # TODO: This can be fairly slow; suggest pickling the result and # reusing if possible (see lib2to3.pgen2.grammar) # We use the same parser instance to parse all builtin files. This changes # the run time from 1.0423s to 0.5938s (for 21 builtins). p = parser.TypeDeclParser(parser.DEFAULT_VERSION) builtins = p.Parse(_FindBuiltinFile("__builtin__.pytd")) # We list modules explicitly, because we might have to extract them out of # a PAR file, which doesn't have good support for listing directories. modules = ["array", "codecs", "errno", "fcntl", "gc", "itertools", "marshal", "os", "posix", "pwd", "select", "signal", "_sre", "StringIO", "strop", "_struct", "sys", "_warnings", "warnings", "_weakref"] if stdlib: for mod in modules: builtins.modules[mod] = p.Parse(_FindBuiltinFile(mod + ".pytd")) _cached_builtins[cache_key] = builtins return builtins
24,414
def assign_style_props(df, color=None, marker=None, linestyle=None, cmap=None): """Assign the style properties for a plot Parameters ---------- df : pd.DataFrame data to be used for style properties """ if color is None and cmap is not None: raise ValueError('`cmap` must be provided with the `color` argument') # determine color, marker, and linestyle for each line n = len(df[color].unique()) if color in df.columns else \ len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates()) defaults = default_props(reset=True, num_colors=n, colormap=cmap) props = {} rc = run_control() kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)] for kind, var in kinds: rc_has_kind = kind in rc if var in df.columns: rc_has_var = rc_has_kind and var in rc[kind] props_for_kind = {} for val in df[var].unique(): if rc_has_var and val in rc[kind][var]: props_for_kind[val] = rc[kind][var][val] # cycle any way to keep defaults the same next(defaults[kind]) else: props_for_kind[val] = next(defaults[kind]) props[kind] = props_for_kind # update for special properties only if they exist in props if 'color' in props: d = props['color'] values = list(d.values()) # find if any colors in our properties corresponds with special colors # we know about overlap_idx = np.in1d(values, list(PYAM_COLORS.keys())) if overlap_idx.any(): # some exist in our special set keys = np.array(list(d.keys()))[overlap_idx] values = np.array(values)[overlap_idx] # translate each from pyam name, like AR6-SSP2-45 to proper color # designation for k, v in zip(keys, values): d[k] = PYAM_COLORS[v] # replace props with updated dict without special colors props['color'] = d return props
24,415
def to_dbtext(text): """Helper to turn a string into a db.Text instance. Args: text: a string. Returns: A db.Text instance. """ if isinstance(text, unicode): # A TypeError is raised if text is unicode and an encoding is given. return db.Text(text) else: try: return db.Text(text, encoding='utf-8') except UnicodeDecodeError: return db.Text(text, encoding='latin-1')
24,416
def write_vis_file_ring(numCameras,numNeighbors=1,visFilePath=Path('vis.dat')): """ Generate a vis.dat file that pmvs2 expects for a camera array with ring topology and a configurable number of neighbors to be used for reconstruction. Inputs: numCameras -- The number of cameras in the ring numNeighbors -- For any camera, the number of other adjacent cameras to use for matching i.e. 1 for stereo, 2 for trinocular... """ with visFilePath.open('w') as fd: fd.write('VISDATA\n') fd.write(str(numCameras)+'\n') assert(numNeighbors >= 1) assert(numNeighbors+1) for center_camera in range(numCameras): numPositiveNeighbors = int(numNeighbors)//2 + numNeighbors%2 numNegativeNeighbors = int(numNeighbors)//2 fd.write(str(center_camera)+' ') fd.write(str(numNeighbors)+' ') for i in range(numPositiveNeighbors): neighbor_camera = (center_camera+i+1)%numCameras fd.write(str(neighbor_camera) + ' ') for i in range(numNegativeNeighbors): neighbor_camera = (center_camera-i-1)%numCameras fd.write(str(neighbor_camera) + ' ') fd.write('\n')
24,417
def zmq_init(pub_port, sub_port_list): """ Initialize the ZeroMQ publisher and subscriber. `My` publisher publishes `my` data to the neighbors. `My` subscriber listen to the ports of other neighbors. `sub_port_list` stores all the possible neighbors' TCP ports. The data packs are wrapped as an XBee interface, compatable with the XBee transmission and reception functions in this module. Args: pub_port(str/int): TCP port for the publisher. sub_port_list(list): TCP port list for the subscriber to listen to. Returns: list: `my` publisher and `my` subscriber (i.e. listener). """ pub = zmq.Context().socket(zmq.PUB) pub.bind('tcp://*:%s' % pub_port) sub = zmq.Context().socket(zmq.SUB) for port in sub_port_list: if sub_port_list[port] != pub_port: sub.connect('tcp://127.0.0.1:%s' % sub_port_list[port]) time.sleep(0.05) sub.setsockopt(zmq.SUBSCRIBE, 'XBEE') return [pub, sub]
24,418
def test_kb_invalid_entity_vector(nlp): """Test the invalid construction of a KB with non-matching entity vector lengths""" mykb = KnowledgeBase(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[1, 2, 3]) # this should fail because the kb's expected entity vector length is 3 with pytest.raises(ValueError): mykb.add_entity(entity="Q2", freq=5, entity_vector=[2])
24,419
def convert(conf_dict): """ convert """ converter.run_convert(conf_dict)
24,420
def initialise_harness_from_file(file_name = None): """Initialise interoperability test harness. This function creates an instance of :class:`prov_interop.harness.HarnessResources` and then configures it using configuration loaded from a YAML file (using :func:`prov_interop.files.load_yaml`). The file loaded is: - `file_name` if this argument is provided (when called from within this module itself, no value is provided). - Else, the file named in an environment variable with name ``PROV_HARNESS_CONFIGURATION``, if such an environment variable has been defined. - Else, ``localconfig/harness.yaml``. The function will not reinitialise the :class:`prov_interop.harness.HarnessResources` instance once it has been created and initialised. A valid YAML configuration file, which, when loaded, yields a Python dictionary holding the configuration required by :class:`prov_interop.harness.HarnessResources` is:: --- test-cases: /home/user/test-cases comparators: ProvPyComparator: class: prov_interop.provpy.comparator.ProvPyComparator executable: prov-compare arguments: -f FORMAT1 -F FORMAT2 FILE1 FILE2 formats: [provx, json] :param file_name: Configuration file name (optional) :type file_name: str or unicode :raises IOError: if the file is not found. :raises ConfigError: if the configuration in the file does not contain the configuration properties expected by :class:`prov_interop.harness.HarnessResources` :raises YamlError: if the file is an invalid YAML file """ global harness_resources global CONFIGURATION_FILE_ENV global DEFAULT_CONFIGURATION_FILE if harness_resources is None: harness_resources = HarnessResources() config = load_yaml(CONFIGURATION_FILE_ENV, DEFAULT_CONFIGURATION_FILE, file_name) harness_resources.configure(config) print("Comparators available:") for format in harness_resources.format_comparators: print((" " + format + ":" + harness_resources.format_comparators[format].__class__.__name__)) print("Test cases directory:") print((harness_resources.test_cases_dir)) print("Test cases available:") num_test_cases = 0 for (index, format1, _, format2, _) in harness_resources.test_cases_generator(): num_test_cases += 1 print((str(index) + ":" + format1 + "->" + format2)) print("Total: " + str(num_test_cases))
24,421
def register_uri_image_loader(scheme, loader): """ Image can be represented as "scheme://path", image will be retrived by calling Image.open(loader(path)). """ logger.info( "Register image loader for scheme: {} with loader: {}".format(scheme, loader) ) _IMAGE_LOADER_REGISTRY[scheme] = loader
24,422
def get_all_match_fractions( residuals: Dict[str, np.ndarray], roi_mask: np.ndarray, hypotheses: np.ndarray, parang: np.ndarray, psf_template: np.ndarray, frame_size: Tuple[int, int], n_roi_splits: int = 1, roi_split: int = 0, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ This is essentially a convenience function which wraps the loop over the ROI and calls :func:`get_match_fraction_for_position()` for every spatial pixel. Args: residuals: A dictionary containing the full residuals as they are produced by :func:`hsr4hci.training.train_all_models`. hypotheses: A 2D numpy array containing the hypotheses map. parang: A 1D numpy array of shape `(n_frames, )` containing the parallactic angle for every frame. psf_template: A 2D numpy array containing the unsaturated PSF template. frame_size: A tuple `(x_size, y_size)` containing the spatial size of the input stack in pixels. n_roi_splits: Total number of splits for the ROI if we want to compute the match fraction map in parallel. roi_split: Index of the ROI split that we want to process here. Returns: A 3-tuple consisting of 1. ``mean_mfs``: A 2D numpy array containing the match fraction map when using the mean to average. 2. ``median_mfs``: A 2D numpy array containing the match fraction map when using the median to average. 3. ``affected_pixels``: A 4D numpy array containing which, for each position `(x, y)` contains a 2D binary mask with the affected mask (see :func:`get_match_fraction_for_position`). """ # Initialize array for the match fractions (mean and median) mean_mfs = np.full(frame_size, np.nan) median_mfs = np.full(frame_size, np.nan) # Define an array in which we keep track of the "affected pixels" (i.e., # the planet traces for every hypothesis), mostly for debugging purposes affected_pixels = np.full(frame_size + frame_size, np.nan) # Define positions for which to run (= subset of the ROI) positions = get_positions_from_mask(roi_mask)[roi_split::n_roi_splits] # Get signal times based on the keys of the given results dictionary _digit_keys = filter(lambda _: _.isdigit(), residuals.keys()) signal_times = np.array(sorted(list(map(int, _digit_keys)))) # Loop over (subset of) ROI and compute match fractions for position in tqdm(positions, ncols=80): mean_mf, median_mf, affected_mask = get_match_fraction_for_position( position=position, hypothesis=hypotheses[position[0], position[1]], residuals=residuals, parang=parang, psf_template=psf_template, signal_times=signal_times, frame_size=frame_size, ) mean_mfs[position] = mean_mf median_mfs[position] = median_mf affected_pixels[position] = affected_mask return mean_mfs, median_mfs, affected_pixels
24,423
def test_predict_test_data(): """ Make sure the length of predictions matches the length of test_labels """ trees = 1000 random_state = 42 maxfeatures = float(1/3) features = [[1]*6, [2]*6, [3]*6, [4]*6, [5]*6, [6]*6,[7]*6,[8]*6,[9]*6] features = np.array(features) labels = [1, 2, 3, 4, 5, 6, 7, 8, 9] rf = create_random_forest(trees, random_state, maxfeatures, features, labels) predictions = predict_test_data(rf, features) assert len(predictions) == len(labels)
24,424
def test_tensorboard_dir_script_specify_tensorboard_dir(): """ In script mode, passing `export_tensorboard` and `tensorboard_dir` works. """ with ScriptSimulator(tensorboard_dir="/tmp/tensorboard_dir") as sim: hook = smd.Hook( out_dir=sim.out_dir, export_tensorboard=True, tensorboard_dir=sim.tensorboard_dir ) assert hook.tensorboard_dir == sim.tensorboard_dir
24,425
def dot(u, v): """ Returns the dot product of the two vectors. >>> u1 = Vec([1, 2]) >>> u2 = Vec([1, 2]) >>> u1*u2 5 >>> u1 == Vec([1, 2]) True >>> u2 == Vec([1, 2]) True """ assert u.size == v.size sum = 0 for index, (compv, compu) in enumerate(zip(u.store,v.store)): sum = sum + compv * compu return sum
24,426
def isvalid(number, numbers, choices=2): """Meh >>> isvalid(40, (35, 20, 15, 25, 47)) True >>> isvalid(62, (20, 15, 25, 47, 40)) True >>> isvalid(127, (182, 150, 117, 102, 95)) False """ return number in sums(numbers, choices)
24,427
def from_rotation_matrix(rotation_matrix: type_alias.TensorLike, name: str = "quaternion_from_rotation_matrix" ) -> tf.Tensor: """Converts a rotation matrix representation to a quaternion. Warning: This function is not smooth everywhere. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "quaternion_from_rotation_matrix". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.name_scope(name): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-1, 3), (-2, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) trace = tf.linalg.trace(rotation_matrix) eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype) rows = tf.unstack(rotation_matrix, axis=-2) entries = [tf.unstack(row, axis=-1) for row in rows] def tr_positive(): sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq) qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq) qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_1(): sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] + eps_addition) * 2. # sq = 4 * qx. qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq) qx = 0.25 * sq qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq) qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_2(): sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] + eps_addition) * 2. # sq = 4 * qy. qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq) qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq) qy = 0.25 * sq qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_3(): sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] + eps_addition) * 2. # sq = 4 * qz. qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq) qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq) qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq) qz = 0.25 * sq return tf.stack((qx, qy, qz, qw), axis=-1) def cond_idx(cond): cond = tf.expand_dims(cond, -1) cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4]) return cond where_2 = tf.where( cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3()) where_1 = tf.where( cond_idx((entries[0][0] > entries[1][1]) & (entries[0][0] > entries[2][2])), cond_1(), where_2) quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1) return quat
24,428
def set_config(target_offload=None, allow_fallback_to_host=None, **sklearn_configs): """Set global configuration Parameters ---------- target_offload : string or dpctl.SyclQueue, default=None The device primarily used to perform computations. If string, expected to be "auto" (the execution context is deduced from input data location), or SYCL* filter selector string. Global default: "auto". allow_fallback_to_host : bool, default=None If True, allows to fallback computation to host device in case particular estimator does not support the selected one. Global default: False. See Also -------- config_context : Context manager for global configuration. get_config : Retrieve current values of the global configuration. """ skl_set_config(**sklearn_configs) local_config = _get_sklearnex_threadlocal_config() if target_offload is not None: local_config["target_offload"] = target_offload if allow_fallback_to_host is not None: local_config["allow_fallback_to_host"] = allow_fallback_to_host
24,429
def mark_text(text): """Compact rules processor""" attrs = {} rules = [] weight = 0 attrs['len'] = len(text) text = text.replace('.', ' ').replace(',', ' ').replace(u'№', ' ').strip().lower() words = text.split() textjunk = [] spaced = 0 attrs['wl'] = len(words) attrs['junkl'] = 0 attrs['mwords'] = [] for w in words: n = len(w) curw = 0 # is spaced if len(w) == 1: if w.isdigit(): if n > 3: curw +=1 if 'SP' not in rules: rules.append('SP') spaced = 0 else: spaced += 1 else: if spaced > 3: curw +=1 if 'SP' not in rules: rules.append('SP') spaced = 0 # is misspelled ? if n in MISSPELL_WORDS.keys(): if w in MISSPELL_WORDS[n]: curw += 1 if 'MS' not in rules: rules.append('MS') # is latin word pat, latweight = is_latin_word(w) if latweight > 0: curw += latweight if 'LT' not in rules: rules.append('LT') junk = 0 # is this text junk if curw > 0: junk = 1 else: if n in ALLDICT_WORDS.keys(): if w in ALLDICT_WORDS[n]: junk = 1 elif len(w) < 3 or w.isdigit(): junk = 1 attrs['junkl'] += junk if junk == 0: attrs['mwords'].append(w) weight += curw if spaced > 3: if 'SP' not in rules: rules.append('SP') weight += 1 isjunk = attrs['wl'] == attrs['junkl'] attrs['junksh'] = attrs['junkl'] * 100.0 / attrs['wl'] if attrs['wl'] > 0 else 0 # for junk in textjunk: # if not junk: isjunk = False if isjunk: weight += 10 rules.append('JU') return weight, rules, attrs
24,430
def create_faucet(client): """Create a wallet using the testnet faucet""" test_wallet = generate_faucet_wallet(client, debug=True) return test_wallet
24,431
async def test_loading_extra_values(hass, hass_storage): """Test we load extra data from the registry.""" hass_storage[entity_registry.STORAGE_KEY] = { "version": entity_registry.STORAGE_VERSION, "data": { "entities": [ { "entity_id": "test.named", "platform": "super_platform", "unique_id": "with-name", "name": "registry override", }, { "entity_id": "test.no_name", "platform": "super_platform", "unique_id": "without-name", }, { "entity_id": "test.disabled_user", "platform": "super_platform", "unique_id": "disabled-user", "disabled_by": "user", }, { "entity_id": "test.disabled_hass", "platform": "super_platform", "unique_id": "disabled-hass", "disabled_by": "hass", }, { "entity_id": "test.invalid__entity", "platform": "super_platform", "unique_id": "invalid-hass", "disabled_by": "hass", }, ] }, } await entity_registry.async_load(hass) registry = entity_registry.async_get(hass) assert len(registry.entities) == 4 entry_with_name = registry.async_get_or_create( "test", "super_platform", "with-name" ) entry_without_name = registry.async_get_or_create( "test", "super_platform", "without-name" ) assert entry_with_name.name == "registry override" assert entry_without_name.name is None assert not entry_with_name.disabled entry_disabled_hass = registry.async_get_or_create( "test", "super_platform", "disabled-hass" ) entry_disabled_user = registry.async_get_or_create( "test", "super_platform", "disabled-user" ) assert entry_disabled_hass.disabled assert entry_disabled_hass.disabled_by == entity_registry.DISABLED_HASS assert entry_disabled_user.disabled assert entry_disabled_user.disabled_by == entity_registry.DISABLED_USER
24,432
def handle_offer_creation(create_offer, header, state): """Handle Offer creation. Args: create_offer (CreateOffer): The transaction. header (TransactionHeader): The header of the Transaction. state (MarketplaceState): The wrapper around the context. Raises: InvalidTransaction - There is already an Offer with the same identifier. - The txn signer does not have an Account. - Either the source or target Holding account is not the signer. - The source is unset. - The source_quantity is unset or 0. - The target or target_quantity are set while the other is unset. - The source is not a holding. - THe target is not a holding. """ if state.get_offer(identifier=create_offer.id): raise InvalidTransaction( "Failed to create Offer, id {} already exists.".format( create_offer.id)) if not state.get_account(public_key=header.signer_public_key): raise InvalidTransaction( "Failed to create offer, transaction signer {} does " "not have an Account.".format(header.signer_public_key)) if not create_offer.source: raise InvalidTransaction( "Failed to create Offer, Offer source is not specified.") if create_offer.source_quantity == 0: raise InvalidTransaction("Failed to create Offer, source_quantity " "was unset or 0") source_holding = state.get_holding(identifier=create_offer.source) if not source_holding: raise InvalidTransaction( "Failed to create Offer, Holding id {} listed as source " "does not refer to a Holding.".format(create_offer.source)) if not source_holding.account == header.signer_public_key: raise InvalidTransaction( "Failed to create Offer, source Holding account {} not " "owned by txn signer {}".format(source_holding.account, header.signer_public_key)) source_asset = state.get_asset(source_holding.asset) if _is_not_transferable(source_asset, header.signer_public_key): raise InvalidTransaction( "Failed to create Offer, source asset {} are not " "transferable".format(source_asset.name)) if create_offer.target and not create_offer.target_quantity or \ create_offer.target_quantity and not create_offer.target: raise InvalidTransaction("Failed to create Offer, target and " "target_quantity must both be set or " "both unset.") if create_offer.target: target_holding = state.get_holding(identifier=create_offer.target) if not target_holding: raise InvalidTransaction( "Failed to create Offer, Holding id {} listed as target " "does not refer to a Holding.".format(create_offer.target)) if not target_holding.account == header.signer_public_key: raise InvalidTransaction( "Failed to create Offer, target Holding account {} not " "owned by txn signer {}".format(target_holding.account, header.signer_public_key)) target_asset = state.get_asset(target_holding.asset) if _is_not_transferable(target_asset, header.signer_public_key): raise InvalidTransaction( "Failed to create Offer, target asset {} is not " "transferable".format(target_asset.name)) state.set_create_offer( identifier=create_offer.id, label=create_offer.label, description=create_offer.description, owners=[header.signer_public_key], source=create_offer.source, source_quantity=create_offer.source_quantity, target=create_offer.target, target_quantity=create_offer.target_quantity, rules=create_offer.rules)
24,433
def kill_and_exit(all_p): """ Kills main, service and daemon's processes if one fails. """ for p in all_p: try: os.kill(p.pid, signal.SIGTERM) except Exception as e: print(e) exit(1)
24,434
def model_flux(t_dec,B,P_max,R,Ne,d_l,z,mp,me,e,c,sigma_t,time,nu,Gamma,E_k, n,eps_b,eps_e,p,j_ang): """ Function for deriving the flux for the spectrum or light curve at given times and frequencies """ # calculate lorentz factors, characteristic frequencies and # jet break time gamma_m = Gamma*eps_e*((p-2)/(p-1))*(mp/me) gamma_c = (6*np.pi*me*c)/(sigma_t*Gamma*B**2*time) gamma_crit = (6*np.pi*me*c)/(sigma_t*Gamma*B**2*t_dec) t_jb = 86400*(((1/0.057)*j_ang*((1+z)/2)**(3/8)*(E_k/1e53)**(1/8)* (n/0.1)**(-1/8))**(8/3)) nu_m0 = (gamma_m**2*Gamma*e*B)/(2*np.pi*me*c) nu_c0 = (gamma_c**2*Gamma*e*B)/(2*np.pi*me*c) flux_max = (Ne*P_max*1e26)/(4*np.pi*d_l**2) # At times smaller than the deceleration timescale if time <= t_dec: flux_n = spec_flux(flux_max,time,nu,p,nu_m0,nu_c0) flux_n = flux_n*(time/t_dec)**3 return flux_n # At times greater than the deceleration timescale if time > t_dec: if p > 2: nu_m = nu_m0*(time/t_dec)**(-3/2) nu_c = nu_c0*(time/t_dec)**(-1/2) if p < 2: nu_m = nu_m0*(time/t_dec)**((-3*(p+2))/(8*(p-1))) nu_c = nu_c0*(time/t_dec)**(-1/2) if time > t_jb: nu_c = nu_c0*(t_jb/t_dec)**(-1/2) flux_max = flux_max*(time/t_jb)**(-1) if p > 2: nu_m = nu_m0*(t_jb/t_dec)**(-3/2)*(time/t_jb)**(-2) if p < 2: nu_m = (nu_m0*(t_jb/t_dec)**((-3*(p+2))/(8*(p-1)))*(time/t_jb) **(-(p+2)/(2*(p-1)))) flux_n = spec_flux(flux_max,time,nu,p,nu_m,nu_c) return flux_n
24,435
def fix_lng_degrees(lng: float) -> float: """ For a lng degree outside [-180;180] return the appropriate degree assuming -180 = 180°W and 180 = 180°E. """ sign = 1 if lng > 0 else -1 lng_adj = (abs(lng) % 360) * sign if lng_adj > 180: return (lng_adj % 180) - 180 elif lng_adj < -180: return lng_adj % 180 return lng_adj
24,436
def calibrate_healpix(pix,version,nside=64,redo=False): """ This program is a wrapper around NSC_INSTCAL_CALIBRATE for all exposures in the same region of the sky. It retrieves the reference catalogs ONCE which saves time. Parameters ---------- pix The HEALPix pixel number. version The version name, e.g. 'v3'. =nside The HEALPix nside to use. Default is 64. /redo Rerun on exposures that were previously processed. Returns ------- Example ------- calibrate_healpix(1045,'v3') By D. Nidever 2017 Translated to Python by D. Nidever, May 2022 """ # Main NOAO DECam source catalog lsdir,mssdir,localdir = utils.rootdirs() fdir = dldir+'users/dnidever/nsc/instcal/'+version+'/' tmpdir = localdir+'dnidever/nsc/instcal/'+version+'/tmp/' if os.path.exists(fdir)==False: os.makedirs(fdir+'logs/') if os.path.exists(tmpdir)==False: os.makedirs(tmpdir) t00 = time.time() # Load the list of exposures listfile = fdir+'/lists/nsc_calibrate_healpix_list.fits' if os.path.exists(listfile)==False: print(listfile,' NOT FOUND') return hplist = Table.read(listfile) # Get the exposures for this healpix print('Calibrating InstCal SExtractor catalogs for Healpix pixel = '+str(pix)) ind1,ind2 = dln.match(hplist['pix'],pix) nind = len(ind1) if nind==0: print('No exposures') return print('NEXPOSURES = '+str(nind)) hplist1 = hplist[ind] hplist1['expdir'] = np.char.array(hplist1['expdir']).strip() hplist1['instrument ']= np.char.array(hplist1['instrument']).strip() hplist1['filter'] = np.char.array(hplist1['filter']).strip() # Central coordinates cenra,cendec = hp.pix2ang(nside,pix,lonlat=True) print('RA = %.6f' % cenra) print('DEC = %.6f' % cendec) cencoo = SkyCoord(ra=cenra,dec=cendec,unit='deg') glon = cencoo.galactic.l.degree glat = cencoo.galactic.b.degree print('L = %.6f' % glon) print('B = %.6f' % glat) # List of instrument-filters filters = np.char.array(hplist1['instrument']).strip()+'-'+np.char.array([f.strip()[0:2] for f in hplist1['filter']]).strip() filters = np.unique(filters) # Get required radius # DECam needs 1.1 deg # Mosaic3 needs 0.43 deg # Bok90prime needs 0.75 deg nc4d = np.sum(np.char.array(hplist1['instrument']).find('c4d') > -1) nksb = np.sum(np.char.array(hplist1['instrument']).find('ksb') > -1) minradius = 0.43 if nksb>0: minradius = np.maximum(minradius, 0.75) if nc4d>0: minradius = np.maximum(minradius, 1.1) # Add extra area for the size of the healpix pixel # nside=128 is roughly 27' across # nside=64 is roughly 55' across # nside=32 is roughly 110' across radius = minradius + 0.5 # Get all of the reference data that we need print('') ref = getrefdata(filters,cenra,cendec,radius) # Loop over the exposures for i in range(nind): print('') print('---- EXPOSURE '+str(i+1)+' OF '+str(nind)+' ----') print('') expdir = hplist1['expdir'][i] lo = expdir.find('/d1') expdir = dldir + expdir[lo+5:] calibrate(expdir,ref,redo=redo) print('') print('Total time = %.2f sec' % (time.time()-t00))
24,437
def log_train_val_stats(args: Namespace, it: int, train_loss: float, train_acc: float, valid, log_freq: int = 10, ckpt_freq: int = 50, mdl_watch_log_freq:int = 50, force_log: bool = False, # e.g. at the final it/epoch save_val_ckpt: bool = False, log_to_tb: bool = False, log_to_wandb: bool = False ): """ log train and val stats. Note: Unlike save ckpt, this one does need it to be passed explicitly (so it can save it in the stats collector). """ from uutils.torch_uu.tensorboard import log_2_tb from matplotlib import pyplot as plt import wandb # - is it epoch or iteration it_or_epoch: str = 'epoch_num' if args.training_mode == 'epochs' else 'it' # if its total_its: int = args.num_empochs if args.training_mode == 'epochs' else args.num_its if (it % log_freq == 0 or is_lead_worker(args.rank) or it == total_its - 1 or force_log) and is_lead_worker(args.rank): # - get eval stats val_loss, val_acc = valid(args, args.mdl, save_val_ckpt=save_val_ckpt) # - save args uutils.save_args(args, args_filename='args.json') # - print args.logger.log('\n') args.logger.log(f"{it_or_epoch}={it}: {train_loss=}, {train_acc=}") args.logger.log(f"{it_or_epoch}={it}: {val_loss=}, {val_acc=}") # - record into stats collector args.logger.record_train_stats_stats_collector(it, train_loss, train_acc) args.logger.record_val_stats_stats_collector(it, val_loss, val_acc) args.logger.save_experiment_stats_to_json_file() args.logger.save_current_plots_and_stats() # - log to wandb if log_to_wandb: if it == 0: wandb.watch(args.mdl, args.criterion, log="all", log_freq=mdl_watch_log_freq) wandb.log(data={'train loss': train_loss, 'train acc': train_acc, 'val loss': val_loss, 'val acc': val_acc}, step=it, commit=True) # wandb.log(data={'it': it}, step=it, commit=True) if it == total_its - 1: pass # print(f'logging fig at {it=}, {fig}') # wandb.log(data={'fig': fig}, step=it, commit=True) # plt.close(fig) # plt.close('all') # - log to tensorboard if log_to_tb: log_2_tb_supervisedlearning(args.tb, args, it, train_loss, train_acc, 'train') log_2_tb_supervisedlearning(args.tb, args, it, train_loss, train_acc, 'val') # log_2_tb(args, it, val_loss, val_acc, 'train') # log_2_tb(args, it, val_loss, val_acc, 'val') # - log ckpt if (it % ckpt_freq == 0 or it == total_its - 1 or force_log) and is_lead_worker(args.rank): save_ckpt(args, args.mdl, args.optimizer)
24,438
def create_code(traits): """Assign bits to list of traits. """ code = 1 result = {INVALID: code} if not traits: return result for trait in traits: code = code << 1 result[trait] = code return result
24,439
def clean(ctx): """Run all clean sub-tasks.""" pass
24,440
def load_tensorrt_plugin(): """load TensorRT plugins library.""" global plugin_is_loaded lib_path = get_tensorrt_op_path() if (not plugin_is_loaded) and os.path.exists(lib_path): ctypes.CDLL(lib_path) plugin_is_loaded = True
24,441
def get_current_table(grid_id: str) -> List[Dict[Any, Any]]: """ Get current Data from the grid Args: grid_id: Grid ID to retrieve data from. Returns: list: Exsiting grid data. """ current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id) if current_table is None: raise ValueError(f"The grid id isn't valid: {grid_id}") return pd.DataFrame(current_table)
24,442
def read_graph(filepath): """Creates a graph based on the content of the file at given filepath. Parameters ---------- filename : filepath Path to a file containing an adjacency matrix. """ g_data = np.loadtxt(open(filepath, "rb"), delimiter=",") return nx.from_numpy_matrix(g_data)
24,443
def test_decompose_basic_concat(namespace: TestNamespace, check: CheckFunc) -> None: """Test construction of ConcatenatedBits.""" ref0 = namespace.addArgument("R0", IntType.u(7)) ref1 = namespace.addArgument("R1", IntType.u(3)) ref2 = namespace.addArgument("R2", IntType.u(13)) concat = ConcatenatedBits(ref2.bits, ref1.bits, ref0.bits) expected = namespace.parse("R2[0:13]", "R1[0:3]", "R0[0:7]") check(namespace, concat, expected)
24,444
def get_seed(seed=None): """Get valid Numpy random seed value""" # https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM random = np.random.RandomState(seed) return random.randint(0, 2147483647)
24,445
def test_catalog_wrong_action_argument(): """ Test if error is raised when wrong action argument is supplied """ with pytest.raises(ValueError): ctlg = Catalog() ctlg('wrong_action')
24,446
def test_init(isolated_runner): """Test project initialization.""" runner = isolated_runner # 1. the directory should be automatically created new_project = Path('test-new-project') assert not new_project.exists() result = runner.invoke(cli.cli, ['init', 'test-new-project']) assert 0 == result.exit_code assert new_project.exists() # 2. test project directory creation os.mkdir('test-project') result = runner.invoke(cli.cli, ['init', 'test-project']) assert 0 == result.exit_code assert os.stat(os.path.join('test-project', '.git')) assert os.stat(os.path.join('test-project', '.renku')) # 3. test project init from already existing renku repository os.chdir('test-project') result = runner.invoke(cli.cli, ['init']) assert 0 != result.exit_code # 4. in case of init failure because of existing .git folder # .renku directory should not exist assert not os.path.exists(os.path.join('test-project', '.renku')) result = runner.invoke(cli.cli, ['init', '--force']) assert 0 == result.exit_code assert os.stat(os.path.join('.git')) assert os.stat(os.path.join('.renku')) # 5. check git lfs init options os.chdir('../') shutil.rmtree('test-project') os.mkdir('test-project') os.chdir('test-project') result = runner.invoke(cli.cli, ['init', '--no-external-storage']) with open('.git/config') as f: config = f.read() assert 'filter "lfs"' not in config result = runner.invoke(cli.cli, ['init', '-S']) with open('.git/config') as f: config = f.read() assert 'filter "lfs"' not in config result = runner.invoke(cli.cli, ['init', '--force']) with open('.git/config') as f: config = f.read() assert 'filter "lfs"' in config
24,447
def explain_predictions_best_worst(pipeline, input_features, y_true, num_to_explain=5, top_k_features=3, include_shap_values=False, metric=None, output_format="text"): """Creates a report summarizing the top contributing features for the best and worst points in the dataset as measured by error to true labels. XGBoost models and CatBoost multiclass classifiers are not currently supported. Arguments: pipeline (PipelineBase): Fitted pipeline whose predictions we want to explain with SHAP. input_features (ww.DataTable, pd.DataFrame): Input data to evaluate the pipeline on. y_true (ww.DataColumn, pd.Series): True labels for the input data. num_to_explain (int): How many of the best, worst, random data points to explain. top_k_features (int): How many of the highest/lowest contributing feature to include in the table for each data point. include_shap_values (bool): Whether SHAP values should be included in the table. Default is False. metric (callable): The metric used to identify the best and worst points in the dataset. Function must accept the true labels and predicted value or probabilities as the only arguments and lower values must be better. By default, this will be the absolute error for regression problems and cross entropy loss for classification problems. output_format (str): Either "text" or "dict". Default is "text". Returns: str, dict, or pd.DataFrame - A report explaining the top contributing features for the best/worst predictions in the input_features. For each of the best/worst rows of input_features, the predicted values, true labels, metric value, feature names, prediction contribution, and SHAP Value (optional) will be listed. Raises: ValueError: if input_features does not have more than twice the requested features to explain. ValueError: if y_true and input_features have mismatched lengths. ValueError: if an output_format outside of "text", "dict" or "dataframe is provided. """ input_features = infer_feature_types(input_features) input_features = _convert_woodwork_types_wrapper(input_features.to_dataframe()) y_true = infer_feature_types(y_true) y_true = _convert_woodwork_types_wrapper(y_true.to_series()) if not (input_features.shape[0] >= num_to_explain * 2): raise ValueError(f"Input features must be a dataframe with more than {num_to_explain * 2} rows! " "Convert to a dataframe and select a smaller value for num_to_explain if you do not have " "enough data.") if y_true.shape[0] != input_features.shape[0]: raise ValueError("Parameters y_true and input_features must have the same number of data points. Received: " f"true labels: {y_true.shape[0]} and {input_features.shape[0]}") if output_format not in {"text", "dict", "dataframe"}: raise ValueError(f"Parameter output_format must be either text, dict, or dataframe. Received {output_format}") if not metric: metric = DEFAULT_METRICS[pipeline.problem_type] try: if is_regression(pipeline.problem_type): if is_time_series(pipeline.problem_type): y_pred = pipeline.predict(input_features, y=y_true).to_series() else: y_pred = pipeline.predict(input_features).to_series() y_pred_values = None y_true_no_nan, y_pred_no_nan = drop_rows_with_nans(y_true, y_pred) errors = metric(y_true_no_nan, y_pred_no_nan) else: if is_time_series(pipeline.problem_type): y_pred = pipeline.predict_proba(input_features, y=y_true).to_dataframe() y_pred_values = pipeline.predict(input_features, y=y_true).to_series() else: y_pred = pipeline.predict_proba(input_features).to_dataframe() y_pred_values = pipeline.predict(input_features).to_series() y_true_no_nan, y_pred_no_nan, y_pred_values_no_nan = drop_rows_with_nans(y_true, y_pred, y_pred_values) errors = metric(pipeline._encode_targets(y_true_no_nan), y_pred_no_nan) except Exception as e: tb = traceback.format_tb(sys.exc_info()[2]) raise PipelineScoreError(exceptions={metric.__name__: (e, tb)}, scored_successfully={}) errors = pd.Series(errors, index=y_pred_no_nan.index) sorted_scores = errors.sort_values() best_indices = sorted_scores.index[:num_to_explain] worst_indices = sorted_scores.index[-num_to_explain:] index_list = best_indices.tolist() + worst_indices.tolist() pipeline_features = pipeline.compute_estimator_features(input_features, y_true).to_dataframe() data = _ReportData(pipeline, pipeline_features, input_features, y_true, y_pred, y_pred_values, errors, index_list, metric) report_creator = _report_creator_factory(data, report_type="explain_predictions_best_worst", output_format=output_format, top_k_features=top_k_features, include_shap_values=include_shap_values, num_to_explain=num_to_explain) return report_creator(data)
24,448
def resolve_lookup( context: dict, lookup: str, call_functions: bool = True ) -> typing.Any: """ Helper function to extract a value out of a context-dict. A lookup string can access attributes, dict-keys, methods without parameters and indexes by using the dot-accessor (e.g. ``person.name``) This is based on the implementation of the variable lookup of the django template system: https://github.com/django/django/blob/master/django/template/base.py """ current = context for bit in lookup.split("."): try: current = current[bit] except (TypeError, AttributeError, KeyError, ValueError, IndexError): try: current = getattr(current, bit) except (TypeError, AttributeError): # Reraise if the exception was raised by a @property if not isinstance(current, dict) and bit in dir(current): raise try: # list-index lookup current = current[int(bit)] except ( IndexError, # list index out of range ValueError, # invalid literal for int() KeyError, # current is a dict without `int(bit)` key TypeError, ): # unsubscriptable object return None # raise LookupError( # "Failed lookup for key " "[%s] in %r", (bit, current) # ) # missing attribute if callable(current) and call_functions: try: # method call (assuming no args required) current = current() except TypeError: signature = inspect.signature(current) # type: ignore try: signature.bind() except TypeError: # arguments *were* required pass # but we continue because we might use an attribute on the object instead of calling it else: raise return current
24,449
def app_config( simple_config: Configurator, document: t.IO ) -> t.Generator[Configurator, None, None]: """Incremented fixture that loads the DOCUMENT above into the config.""" simple_config.pyramid_openapi3_spec( document.name, route="/foo.yaml", route_name="foo_api_spec" ) yield simple_config
24,450
def check_actions_tool(tool): """2.2.x to 2.3.0 upgrade step checker """ atool = getToolByName(tool, 'portal_actions') try: atool['user']['change_password'] except KeyError: return True try: atool['global']['members_register'] except KeyError: return True try: atool['global']['search_form'] except KeyError: return True try: atool['global']['search'] except KeyError: return True try: atool['global']['syndication'] except KeyError: return True return False
24,451
def integer_byte_length(number): """ Number of bytes needed to represent a integer excluding any prefix 0 bytes. :param number: Integer value. If num is 0, returns 0. :returns: The number of bytes in the integer. """ quanta, remainder = divmod(integer_bit_length(number), 8) if remainder: quanta += 1 return quanta
24,452
def ones(input_dim, output_dim, name=None): """All zeros.""" initial = tf.ones((input_dim, output_dim), dtype=tf.float32) return tf.Variable(initial, name=name)
24,453
def enrichment_score2(mat, idx, line_width, norm_factors, distance_range=(20, 40), window_size=10, stats_test_log=({}, {})): """ Calculate the enrichment score of a stripe given its location, width and the contact matrix Parameters: ---------- mat: np.array (2D) Contact matrix generated with strata2horizontal() or strata2vertical() idx: int The location (index) of the candidate stripe line_width: int Stripe width (# of bins) norm_factors: np.array (1D) The vector of normalization factors of the contact map. distance_range: tuple The distance range (# of bins) for the diagonal for calculating the scores window_size: int Window size (# of bins) stats_test_log: tuple of dict Previous log for accelerating statistical tests Returns ---------- new_mat: np.array (1D) The enrichment score of each pixel along the candidate stripe """ _calculated_values, _poisson_stats = stats_test_log half = int(line_width // 2) x1, x2 = idx - half, idx - half + line_width if x1 == x2: x2 += 1 new_mat = np.zeros((distance_range[1] - distance_range[0],)) for j in range(distance_range[0], distance_range[1]): y = j - distance_range[0] _min_temp = subsetNpMatrix(mat, (x1, x2), (j - window_size - half, j + window_size + half + 1)) line_min = np.median([_min_temp]) # print(_min_temp, line_min) _inner_neighbor = subsetNpMatrix(mat, (idx - half - window_size, x1), (j - window_size - half, j + window_size + half + 1)) _outer_neighbor = subsetNpMatrix(mat, (x2 + 1, idx + half + window_size + 1), (j - window_size - half, j + window_size + half + 1)) if _outer_neighbor.size == 0 or _inner_neighbor.size == 0: continue neighbor_mean = max(np.mean(_inner_neighbor), np.mean(_outer_neighbor)) # There should be a lower bound for the expected value, # otherwise situations like (exp=0.01 and obs=0.02) would also be significant # Currently we can set this to 0 until KR norm factors can be loaded lower_b = 1 / norm_factors[idx] # This should be (1 / KR_norm_factors) if we refer to JuiceTools HICCUPS _exp = max(neighbor_mean, lower_b) _obs = int(line_min) # the same as floor function when line_min > 0 # _calculated_values: store all calculated exp-obs pairs in dictionary, in which keys are obs since # they are always integers. Each _calculated_values[obs] is a binary tree for quick searching, # and each tree leaf is a exp value corresponding to the obs value. Since exp values are float, # there is also an integer index attached for searching the exp-obs in dictionary _poisson_stats # (float cannot be dict keys). # _poisson_stats: record all calculated result in a dict. It should be # _poisson_stats[(_exp, _obs)] = -log10(p). But _exp is a float and cannot be a dict key, we give # each _exp a unique index and use the index. # stats_log: record all p value calculation. Just for benchmarking. Delete this when publishing. # global _calculated_values, _poisson_stats # , stats_log tolerance = 0.02 # check if obs is a value calculated before if _obs in _calculated_values: # Find the nearest _exp values which were calculated before # One larger, one smaller (_upper, _lower) = _calculated_values[_obs].search(_exp) # If _upper is close enough to _exp, directly use the p value from (_upper-_obs) pair if _upper is not None and (_upper.key - _exp) < tolerance * _exp: _exp = _upper.key _exp_idx = _upper.val # The integer index for _upper (float cannot be dict keys!) mlog_p_val = _poisson_stats[(_exp_idx, _obs)] else: # Else, calculate p value for _obs-_exp pair and store them in _calculated_values and _poisson_stats _exp_idx = _calculated_values[_obs].insert(_exp) # insert to the binary tree and return an index Poiss = poisson(_exp) p_val = 1 - Poiss.cdf(_obs) if 0 < p_val < 1: mlog_p_val = - np.log10(p_val) else: # Some p values are too small, -log(0) will return an error, so we use -1 to temporarily replace mlog_p_val = -1 _poisson_stats[(_exp_idx, _obs)] = mlog_p_val # stats_log.append([_exp, _obs, mlog_p_val]) else: # If _obs is not used before, generate a new binary tree _calculated_values[_obs] _calculated_values[_obs] = AVLTree() _exp_idx = _calculated_values[_obs].insert(_exp) # calculate p value for _obs-_exp pair and store them in _calculated_values and _poisson_stats Poiss = poisson(_exp) p_val = 1 - Poiss.cdf(_obs) if 0 < p_val < 1: mlog_p_val = - np.log10(p_val) else: # Some p values are too small, -log(0) will return an error, so we use -1 to temporarily replace mlog_p_val = -1 _poisson_stats[(_exp_idx, _obs)] = mlog_p_val # stats_log.append([_exp, _obs, mlog_p_val]) # Store enrichment score in new_mat new_mat[y] = mlog_p_val new_mat[new_mat < 0] = np.max(new_mat) # Replace all "-1"s with the largest -log(p) return new_mat
24,454
def paginate(data, page=1, per_page=None): """Create a paginated response of the given query set. Arguments: data -- A flask_mongoengine.BaseQuerySet instance """ per_page = app.config['DEFAULT_PER_PAGE'] if not per_page else per_page pagination_obj = data.paginate(page=page, per_page=per_page) return { 'data': build_pagination_data(pagination_obj), 'meta': build_pagination_metadata(pagination_obj), }
24,455
def test_cli_help(help_option, capfd): """ Test for the help option : -h, --help :param help_option: :param capfd: :return: """ args = [__prog__, help_option] cli = PlaybookGrapherCLI(args) with pytest.raises(SystemExit) as exception_info: cli.parse() out, err = capfd.readouterr() assert "Make graph from your Playbook." in out
24,456
def compute_mean_wind_dirs(res_path, dset, gids, fracs): """ Compute mean wind directions for given dset and gids """ with Resource(res_path) as f: wind_dirs = np.radians(f[dset, :, gids]) sin = np.mean(np.sin(wind_dirs) * fracs, axis=1) cos = np.mean(np.cos(wind_dirs) * fracs, axis=1) mean_wind_dirs = np.degrees(np.arctan2(sin, cos)) mask = mean_wind_dirs < 0 mean_wind_dirs[mask] += 360 return mean_wind_dirs
24,457
def plot_area_and_score(samples: SampleList, compound_name: str, include_none: bool = False): """ Plot the peak area and score for the compound with the given name :param samples: A list of samples to plot on the chart :param compound_name: :param include_none: Whether samples where the compound was not found should be plotted. """ peak_areas, scores = samples.get_areas_and_scores(compound_name, include_none) fig, ax1 = plt.subplots() y_positions = numpy.arange(len(peak_areas)) y_positions = [x * 1.5 for x in y_positions] bar_width = 0.5 offset = bar_width / 2 area_y_pos = [x + offset for x in y_positions] area_bar = ax1.barh( area_y_pos, list(peak_areas.values()), label="Peak Area", color="tab:orange", height=bar_width, ) ax1.set_xscale("log") ax1.set_xlabel("Log10(Peak Area)") ax2 = ax1.twiny() score_scatter = ax2.scatter(list(scores.values()), area_y_pos, label="Score", color="tab:blue") ax2.set_xlabel("Score") ax1.barh([], [], label="Score", color="tab:blue", height=bar_width) ax1.set_yticks(y_positions) ax1.set_yticklabels(list(peak_areas.keys())) fig.suptitle(f"Peak Area and Score for {compound_name}\n") fig.set_size_inches(A4_landscape) plt.tight_layout() plt.subplots_adjust(top=0.9) ax1.legend() return fig, ax1, ax2
24,458
def _raise_on_error(data: dict) -> None: """Raise appropriately when a returned data payload contains an error.""" if data["code"] == 0: return raise_error(data)
24,459
def get_strides(fm: NpuFeatureMap) -> NpuShape3D: """Calculates STRIDE_C/Y/X""" if fm.strides is not None: return fm.strides elem_size = fm.data_type.size_in_bytes() if fm.layout == NpuLayout.NHWC: stride_c = elem_size stride_x = fm.shape.depth * stride_c stride_y = fm.shape.width * stride_x else: stride_x = 16 * elem_size stride_c = stride_x * fm.shape.width stride_y = elem_size * fm.shape.width * numeric_util.round_up(fm.shape.depth, 16) return NpuShape3D(depth=stride_c, height=stride_y, width=stride_x)
24,460
def gram_linear(x): """Compute Gram (kernel) matrix for a linear kernel. Args: x: A num_examples x num_features matrix of features. Returns: A num_examples x num_examples Gram matrix of examples. """ return x.dot(x.T)
24,461
def enable_maintenance(*app_list): """Enables a maintenance page for publishers and serves a 503""" """Only to be run on loadbalancers""" if not fabric.contrib.files.exists(maintenance_config): abort("Sorry this task can only currently be run on loadbalancers") puppet.disable("Maintenance mode enabled") env_url_post = fabric.state.env.gateway.lstrip("jumpbox.") app_list = list(app_list) if not valid_apps.issuperset(app_list): print("{} are not valid apps for this maintenance.".format( list(set(app_list).difference(valid_apps)))) exit(1) for app in app_list: app_hostname = "{}.{}".format(app, env_url_post) app_config_file = "/etc/nginx/sites-enabled/{}".format(app_hostname) maintenance_setting = "set $maintenance 1;" fabric.contrib.files.sed( app_config_file, "include includes/maintenance.conf;", maintenance_setting, use_sudo=True, backup=".maint-bak" ) with cd('/etc/nginx/sites-enabled/'): sudo('rm -f *.maint-bak') sudo('service nginx reload')
24,462
def to_feature(shape, properties={}): """ Create a GeoJSON Feature object for the given shapely.geometry :shape:. Optionally give the Feature a :properties: dict. """ collection = to_feature_collection(shape) feature = collection["features"][0] feature["properties"] = properties # remove some unecessary and redundant data if "id" in feature: del feature["id"] if isinstance(shape, shapely.geometry.Point) and "bbox" in feature: del feature["bbox"] return dict(feature)
24,463
def test_py30303_disc(): """Very basic testing.""" s = py30303_disc.d30303() s.send_discovery() s.end_discovery()
24,464
def read_responses(file): """ Read dialogs from file :param file: str, file path to the dataset :return: list, a list of dialogue (context) contained in file """ with open(file, 'r') as f: samples = f.read().split('<|endoftext|>') samples = samples[1:] # responses = [i.strip() for i in f.readlines() if len(i.strip()) != 0] return samples
24,465
def build_parametric_ev(data, onset, name, value, duration=None, center=None, scale=None): """Make design info for a multi-column constant-value ev. Parameters ---------- data : DataFrame Input data; must have "run" column and any others specified. onset : string Column name containing event onset information. name : string Condition name to use for this ev. value : string Column name containing event amplitude information. duration : string, float, or ``None`` Column name containing event duration information, or a value to use for all events, or ``None`` to model events as impulses. center : float, optional Value to center the ``value`` column at before scaling. If absent, center at the mean across runs. scale : callable, optional Function to scale the centered value column with. Returns ------- ev : DataFrame Returned DataFrame will have "run", "onset", "duration", "value", and "condition" columns. """ ev = data[["run", onset, value]].copy() ev.columns = ["run", "onset", "value"] # Center the event amplitude if center is None: ev["value"] -= ev.value.mean() else: ev["value"] = ev.value - center # (Possibly) scale the event amplitude if scale is not None: ev["value"] = scale(ev["value"]) # Set a condition name for all events ev["condition"] = name # Determine the event duration ev = _add_duration_information(data, ev, duration) return ev
24,466
def test_alert_schedule(cinq_test_service): """ Test whether the auditor respects the alert schedule """ setup_info = setup_test_aws(cinq_test_service) account = setup_info['account'] prep_s3_testing(cinq_test_service) # Add resources client = aws_get_client('s3') bucket_name = dbconfig.get('test_bucket_name', NS_CINQ_TEST, default='testbucket') client.create_bucket(Bucket=bucket_name) # Collect resources collect_resources(account=account, resource_types=['s3']) # Initialize auditor auditor = MockRequiredTagsAuditor() # Test 1 --- The auditor should not alert again as we are not at the next scheduled alert time auditor.run() assert auditor._cinq_test_notices auditor.run() assert not auditor._cinq_test_notices
24,467
def time_aware_indexes(t, train_size, test_size, granularity, start_date=None): """Return a list of indexes that partition the list t by time. Sorts the list of dates t before dividing into training and testing partitions, ensuring a 'history-aware' split in the ensuing classification task. Args: t (np.ndarray): Array of timestamp tags. train_size (int): The training window size W (in τ). test_size (int): The testing window size Δ (in τ). granularity (str): The unit of time τ, used to denote the window size. Acceptable values are 'year|quarter|month|week|day'. start_date (date): The date to begin partioning from (eg. to align with the start of the year). Returns: (list, list): Indexing for the training partition. List of indexings for the testing partitions. """ # Order the dates as well as their original positions with_indexes = zip(t, range(len(t))) ordered = sorted(with_indexes, key=operator.itemgetter(0)) # Split out the dates from the indexes dates = [tup[0] for tup in ordered] indexes = [tup[1] for tup in ordered] # Get earliest date start_date = utils.resolve_date(start_date) if start_date else ordered[0][0] # Slice out training partition boundary = start_date + get_relative_delta(train_size, granularity) to_idx = bisect.bisect_left(dates, boundary) train = indexes[:to_idx] tests = [] # Slice out testing partitions while to_idx < len(indexes): boundary += get_relative_delta(test_size, granularity) from_idx = to_idx to_idx = bisect.bisect_left(dates, boundary) tests.append(indexes[from_idx:to_idx]) return train, tests
24,468
def send_invite_mail(invite, request): """ Send an email invitation to user not yet registered in the system. :param invite: ProjectInvite object :param request: HTTP request :return: Amount of sent email (int) """ invite_url = build_invite_url(invite, request) message = get_invite_body( project=invite.project, issuer=invite.issuer, role_name=invite.role.name, invite_url=invite_url, date_expire_str=localtime(invite.date_expire).strftime( '%Y-%m-%d %H:%M' ), ) message += get_invite_message(invite.message) message += get_email_footer() subject = get_invite_subject(invite.project) return send_mail(subject, message, [invite.email], request)
24,469
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer): """ Utility function for optimize_on_cpu and 16-bits training. Copy the parameters optimized on CPU/RAM back to the model on GPU """ for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model): if name_opti != name_model: logger.error("name_opti != name_model: {} {}".format(name_opti, name_model)) raise ValueError param_model.data.copy_(param_opti.data)
24,470
def enable_extra_meshes(): """ enable Add Mesh Extra Objects addon https://docs.blender.org/manual/en/3.0/addons/add_mesh/mesh_extra_objects.html """ enable_addon(addon_module_name="add_mesh_extra_objects")
24,471
def get_credentials(quota_project_id=None): """Obtain credentials object from json file and environment configuration.""" credentials_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") with open(credentials_path, "r", encoding="utf8") as file_handle: credentials_data = file_handle.read() credentials_dict = json.loads(credentials_data) if "client_email" in credentials_dict: return service_account.Credentials.from_service_account_file(credentials_path) if "audience" in credentials_dict: return identity_pool.Credentials.from_info(credentials_dict) return google.auth.default(quota_project_id=quota_project_id)[0]
24,472
def create_model_file(path, dz, vp, vs, rho): """ Writing the 1D model to ascci file, to be used as input by the Computer Programs in Seismology """ qp = np.zeros_like(dz) qs = np.zeros_like(dz) etap = np.zeros_like(dz) etas = np.zeros_like(dz) frefp = np.ones_like(dz) frefs = np.ones_like(dz) f = open(path, mode='w') f.write(MODEL_HEADER) a = np.vstack((dz, vp, vs, rho, qp, qs, etap, etas, frefp, frefs)) for col in a.T: f.write('\n') col.tofile(f, sep=' ') f.close()
24,473
async def test_file_inspection(make_file, loop): """ Checks: Basic test file inspection """ content = """ RAMADA RAMZAN xd bobby pin bobby pun """ make_file(content) buffer = [] async for chunk in inspect(TEST_DIR / "test_file_read.txt", loop): buffer.append(chunk) actual = "".join(buffer) assert actual == content
24,474
def update(): """ Update the feature with updates committed to develop. This will merge current develop into the current branch. """ branch = git.current_branch(refresh=True) develop = conf.get('git.devel_branch', 'develop') common.assert_branch_type('feature') common.git_checkout(develop) common.git_pull(develop) common.git_checkout(branch.name) common.git_merge(branch.name, develop)
24,475
def binary_n(total_N, min_n=50): """ Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n """ max_exp = np.log2(1.0 * total_N / min_n) max_exp = int(np.floor(max_exp)) return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)]
24,476
def thresholding(pred,label,thres): """ Given the threshold return boolean matrix with 1 if > thres 0 if <= 1 """ conf =[] for i in thres: pr_th,lab_th = (pred>i),(label>i) conf += confusion(pr_th,lab_th) return np.array(conf).reshape(-1,4)
24,477
def WriteWavFile(signal, sample_rate, file_name, bitdepth=16, normalize=True): """Write a .wav file from a numpy array. Args: signal: 2-dimensional numpy array, of size (num_samples, num_channels). sample_rate: int. sample rate of the signal in Hz. file_name: string. name of the destination file. bitdepth: int. bitdepth in bits (default 16). normalize: boolean. if set to True, scale the data to the [-1, 1] range before writing. """ if signal.dtype == numpy.uint8 or signal.dtype == numpy.int16: bitdepth = signal.dtype.itemsize * 8 scaled_signal = signal else: scaled_signal = Quantize(signal, bitdepth, normalize=normalize) if scaled_signal.ndim == 1: num_channels = 1 else: num_channels = scaled_signal.shape[1] # Compute the total size of the output .wav file, minus the size of the # first two fields of the RIFF header. # RIFF Format. total_size = _RIFF_FORMAT_DESCRIPTOR_SIZE # 'fmt ' chunk. total_size += _FMT_CHUNK_HEADER_SIZE + _FMT_CHUNK_DATA_SIZE # 'data' chunk. total_size += _DATA_CHUNK_HEADER_SIZE + scaled_signal.nbytes f = file(file_name, 'w') try: f.write('RIFF') f.write(struct.pack('<L', total_size)) f.write('WAVEfmt ') bitrate = sample_rate * num_channels * (bitdepth / 8) bits_per_sample = num_channels * (bitdepth / 8) f.write(struct.pack('<LHHLLHH', 16, 1, num_channels, sample_rate, bitrate, bits_per_sample, bitdepth)) f.write('data') f.write(struct.pack('<L', scaled_signal.nbytes)) scaled_signal.tofile(f) finally: f.close()
24,478
def set_pay_to_address_name_loop(context: X12ParserContext, segment_data: Dict) -> None: """ Sets the Billing Provider Pay to Address Name Loop 2010AB :param context: The X12Parsing context which contains the current loop and transaction record. :param segment_data: The current segment data """ if "loop_2010a" in context.loop_name: billing_provider = _get_billing_provider(context) if TransactionLoops.BILLING_PROVIDER_PAY_TO_ADDRESS not in billing_provider: billing_provider[TransactionLoops.BILLING_PROVIDER_PAY_TO_ADDRESS] = { "ref_segment": [], "per_segment": [], } pay_to_address_loop = billing_provider[ TransactionLoops.BILLING_PROVIDER_PAY_TO_ADDRESS ] context.set_loop_context( TransactionLoops.BILLING_PROVIDER_PAY_TO_ADDRESS, pay_to_address_loop )
24,479
def unicode_test(request, oid): """Simple view to test funky characters from the database.""" funky = News.objects.using('livewhale').get(pk=oid) return render(request, 'bridge/unicode.html', {'funky': funky})
24,480
def _field_to_schema_object(field: BaseType, apistrap: Optional[Apistrap]) -> Optional[Dict[str, Any]]: """ Convert a field definition to OpenAPI 3 schema. :param field: the field to be converted :param apistrap: the extension used for adding reusable schema definitions :return: a schema """ if isinstance(field, ModelType): return _model_field_to_schema_object(field, apistrap) elif isinstance(field, ListType): if isinstance(field.field, ModelType): return _model_array_to_schema_object(field, apistrap) elif isinstance(field.field, BaseType): return _primitive_array_to_schema_object(field) elif isinstance(field, DictType): if isinstance(field.field, ModelType): return _model_dict_to_schema_object(field, apistrap) elif isinstance(field.field, UnionType): return _union_dict_to_schema_object(field, apistrap) elif isinstance(field.field, ListType) and isinstance(field.field.field, ModelType): return _dict_of_model_lists_to_schema_object(field, apistrap) elif isinstance(field.field, BaseType): return _primitive_dict_to_schema_object(field) elif isinstance(field, StringType): return _string_field_to_schema_object(field, apistrap) elif isinstance(field, AnyType): return {} elif isinstance(field, UnionType): return _union_field_to_schema_object(field, apistrap) elif isinstance(field, DiscriminatedModelType): return _discriminated_model_field_to_schema_object(field, apistrap) elif isinstance(field, PolyModelType): return _poly_model_field_to_schema_object(field, apistrap) elif isinstance(field, BaseType): return _primitive_field_to_schema_object(field) return None
24,481
def linux_gcc_name(): """Returns the name of the `gcc` compiler. Might happen that we are cross-compiling and the compiler has a longer name. Args: None Returns: str: Name of the `gcc` compiler or None """ cc_env = os.getenv('CC') if cc_env is not None: if subprocess.Popen([cc_env, "--version"], stdout=subprocess.DEVNULL): return cc_env return "gcc" if subprocess.Popen(["gcc", "--version"], stdout=subprocess.DEVNULL) else None
24,482
def drop_test(robot, *, z_rot: float, min_torque: bool, initial_height: float = 1.) -> Dict[str, Any]: """Params which have been tested for this task: nfe = 20, total_time = 1.0, vary_timestep_with=(0.8,1.2), 5 mins for solving if min_torque is True, quite a bit more time is needed as IPOPT refines things """ nfe = len(robot.m.fe) ncp = len(robot.m.cp) tested_models = ('3D monoped', '3D biped', '3D quadruped', '3D prismatic monoped') if not robot.name in tested_models: visual.warn( f'This robot configuration ("{robot.name}") hasn\'t been tested!\n' f'Tested models are: {tested_models}') body = robot['base_B'] if robot.name == '3D quadruped' else robot['base'] # start at the origin body['q'][1, ncp, 'x'].fix(0) body['q'][1, ncp, 'y'].fix(0) body['q'][1, ncp, 'z'].fix(initial_height) # fix initial angle for link in robot.links: for ang in ('phi', 'theta'): link['q'][1, ncp, ang].fix(0) link['q'][1, ncp, 'psi'].fix(z_rot) # start stationary for link in robot.links: for q in link.pyomo_sets['q_set']: link['dq'][1, ncp, q].fix(0) # init to y plane for link in robot.links: for ang in ('phi', 'theta'): link['q'][:, :, ang].value = 0 link['q'][:, :, 'psi'].value = z_rot # legs slightly forward at the end uplopairs = (('upper', 'lower'),) if robot.name == '3D monoped' \ else (('UL', 'LL'), ('UR', 'LR')) if robot.name == '3D biped' \ else (('UFL', 'LFL'), ('UFR', 'LFR'), ('UBL', 'LBL'), ('UBR', 'LBR')) if robot.name == '3D quadruped' \ else tuple() # <- iterating over this will result in the body not being evaluated for upper, lower in uplopairs: ang = 0.01 if not ( robot.name == '3D quadruped' and upper[1] == 'B') else -0.01 robot[upper]['q'][nfe, ncp, 'theta'].setlb(ang) robot[lower]['q'][nfe, ncp, 'theta'].setub(-ang) # but not properly fallen over body['q'][nfe, ncp, 'z'].setlb(0.2) # objective: reduce CoT, etc utils.remove_constraint_if_exists(robot.m, 'cost') torque_cost = torque_squared_penalty(robot) pen_cost = feet_penalty(robot) robot.m.cost = Objective(expr=(torque_cost if min_torque else 0) + 1000*pen_cost) return {'torque': torque_cost, 'penalty': pen_cost}
24,483
def login(api, key, secret): """ Authenticates with Coach. Get your API key here: https://coach.lkuich.com/ """ try: id = api[0:5] profile = coach.get_profile(api, id) except Exception as e: click.echo(f"Failed to authenticate:\n{e}") return if profile is None: return if 'bucket' not in profile: click.echo(f"Failed to authenticate, invalid API/ID: {id}") return if not os.path.exists(config_folder): os.mkdir(config_folder) creds = os.path.join(config_folder, 'creds.json') click.echo(f"Storing credentials in: {creds}") with open(creds, 'w') as creds_file: content = { 'api': api, 'key': key, 'secret': secret, 'bucket': profile['bucket'], 'id': id } creds_file.write(json.dumps(content)) creds_file.close()
24,484
def get_user_list( *, client: Client, an_enum_value: List[AnEnum], some_date: Union[date, datetime], ) -> Union[ List[AModel], HTTPValidationError, ]: """ Get a list of things """ url = "{}/tests/".format(client.base_url) headers: Dict[str, Any] = client.get_headers() json_an_enum_value = [] for an_enum_value_item_data in an_enum_value: an_enum_value_item = an_enum_value_item_data.value json_an_enum_value.append(an_enum_value_item) if isinstance(some_date, date): json_some_date = some_date.isoformat() else: json_some_date = some_date.isoformat() params: Dict[str, Any] = { "an_enum_value": json_an_enum_value, "some_date": json_some_date, } response = httpx.get(url=url, headers=headers, params=params,) if response.status_code == 200: return [AModel.from_dict(item) for item in cast(List[Dict[str, Any]], response.json())] if response.status_code == 422: return HTTPValidationError.from_dict(cast(Dict[str, Any], response.json())) else: raise ApiResponseError(response=response)
24,485
def download_from_s3(source_path, outdir, s3_client=None): """Download a file from CreoDIAS S3 storage to the given location (Works only when used from a CreoDIAS vm) Parameters ---------- source_path: CreoDIAS path to S3 object target_path: Path to write the product folder """ import boto3 from botocore.client import Config import os from creodias_finder.creodias_storage import S3Storage if not s3_client: s3_client = boto3.client( 's3', endpoint_url='http://data.cloudferro.com/', use_ssl=False, aws_access_key_id='access', aws_secret_access_key='secret', config=Config( signature_version='s3', connect_timeout=60, read_timeout=60, ) ) storage_client = S3Storage(s3_client) source_path = source_path.lstrip('/eodata/') product_folder = source_path.split('/')[-1] storage_client.download_product('DIAS', source_path, os.path.join(outdir, product_folder))
24,486
def old_func5(self, x): """Summary. Bizarre indentation. """ return x
24,487
def add_node_hash(node): """ Recursively adds all missing commitments and hashes to a verkle trie structure. """ if node["node_type"] == "leaf": node["hash"] = hash([node["key"], node["value"]]) if node["node_type"] == "inner": lagrange_polynomials = [] values = {} for i in range(WIDTH): if i in node: if "hash" not in node[i]: add_node_hash(node[i]) values[i] = int.from_bytes(node[i]["hash"], "little") commitment = kzg_utils.compute_commitment_lagrange(values) node["commitment"] = commitment node["hash"] = hash(commitment.compress())
24,488
def set_( computer_policy=None, user_policy=None, cumulative_rights_assignments=True, adml_language="en-US", ): """ Set a local server policy. Args: computer_policy (dict): A dictionary of "policyname: value" pairs of computer policies to set. 'value' should be how it is displayed in the gpedit GUI, i.e. if a setting can be 'Enabled'/'Disabled', then that should be passed Administrative Template data may require dicts within dicts, to specify each element of the Administrative Template policy. Administrative Templates policies are always cumulative. Policy names can be specified in a number of ways based on the type of policy: Windows Settings Policies: These policies can be specified using the GUI display name or the key name from the _policy_info class in this module. The GUI display name is also contained in the _policy_info class in this module. Administrative Template Policies: These can be specified using the policy name as displayed in the GUI (case sensitive). Some policies have the same name, but a different location (for example, "Access data sources across domains"). These can be differentiated by the "path" in the GUI (for example, "Windows Components\\Internet Explorer\\Internet Control Panel\\Security Page\\Internet Zone\\Access data sources across domains"). Additionally, policies can be specified using the "name" and "id" attributes from the ADMX files. For Administrative Templates that have policy elements, each element can be specified using the text string as seen in the GUI or using the ID attribute from the ADMX file. Due to the way some of the GUI text is laid out, some policy element names could include descriptive text that appears lbefore the policy element in the GUI. Use the get_policy_info function for the policy name to view the element ID/names that the module will accept. user_policy (dict): The same setup as the computer_policy, except with data to configure the local user policy. cumulative_rights_assignments (bool): Determine how user rights assignment policies are configured. If True, user right assignment specifications are simply added to the existing policy If False, only the users specified will get the right (any existing will have the right revoked) adml_language (str): The language files to use for looking up Administrative Template policy data (i.e. how the policy is displayed in the GUI). Defaults to 'en-US' (U.S. English). Returns: bool: True is successful, otherwise False CLI Example: .. code-block:: bash salt '*' lgpo.set computer_policy="{'LockoutDuration': 2, 'RestrictAnonymous': 'Enabled', 'AuditProcessTracking': 'Succes, Failure'}" """ if computer_policy and not isinstance(computer_policy, dict): raise SaltInvocationError("computer_policy must be specified as a dict") if user_policy and not isinstance(user_policy, dict): raise SaltInvocationError("user_policy must be specified as a dict") policies = {} policies["User"] = user_policy policies["Machine"] = computer_policy if policies: adml_policy_resources = _get_policy_resources(language=adml_language) for p_class in policies: _secedits = {} _netshs = {} _advaudits = {} _modal_sets = {} _admTemplateData = {} _regedits = {} _lsarights = {} _policydata = _policy_info() if policies[p_class]: for policy_name in policies[p_class]: _pol = None policy_key_name = policy_name if policy_name in _policydata.policies[p_class]["policies"]: _pol = _policydata.policies[p_class]["policies"][policy_name] else: # Case-sensitive search first for policy in _policydata.policies[p_class]["policies"]: _p = _policydata.policies[p_class]["policies"][policy][ "Policy" ] if _p == policy_name: _pol = _policydata.policies[p_class]["policies"][policy] policy_key_name = policy if _pol is None: # Still not found, case-insensitive search for policy in _policydata.policies[p_class]["policies"]: _p = _policydata.policies[p_class]["policies"][policy][ "Policy" ] # Case-sensitive search first if _p.lower() == policy_name.lower(): _pol = _policydata.policies[p_class]["policies"][ policy ] policy_key_name = policy if _pol: # transform and validate the setting _value = _transform_value( value=policies[p_class][policy_name], policy=_policydata.policies[p_class]["policies"][ policy_key_name ], transform_type="Put", ) if not _validateSetting( value=_value, policy=_policydata.policies[p_class]["policies"][ policy_key_name ], ): raise SaltInvocationError( "The specified value {} is not an acceptable setting" " for policy {}.".format( policies[p_class][policy_name], policy_name ) ) if "Registry" in _pol: # set value in registry log.trace("%s is a registry policy", policy_name) _regedits[policy_name] = {"policy": _pol, "value": _value} elif "Secedit" in _pol: # set value with secedit log.trace("%s is a Secedit policy", policy_name) if _pol["Secedit"]["Section"] not in _secedits: _secedits[_pol["Secedit"]["Section"]] = [] _secedits[_pol["Secedit"]["Section"]].append( " ".join([_pol["Secedit"]["Option"], "=", str(_value)]) ) elif "NetSH" in _pol: # set value with netsh log.trace("%s is a NetSH policy", policy_name) _netshs.setdefault( policy_name, { "profile": _pol["NetSH"]["Profile"], "section": _pol["NetSH"]["Section"], "option": _pol["NetSH"]["Option"], "value": str(_value), }, ) elif "AdvAudit" in _pol: # set value with advaudit _advaudits.setdefault( policy_name, { "option": _pol["AdvAudit"]["Option"], "value": str(_value), }, ) elif "NetUserModal" in _pol: # set value via NetUserModal log.trace("%s is a NetUserModal policy", policy_name) if _pol["NetUserModal"]["Modal"] not in _modal_sets: _modal_sets[_pol["NetUserModal"]["Modal"]] = {} _modal_sets[_pol["NetUserModal"]["Modal"]][ _pol["NetUserModal"]["Option"] ] = _value elif "LsaRights" in _pol: log.trace("%s is a LsaRights policy", policy_name) _lsarights[policy_name] = {"policy": _pol, "value": _value} else: _value = policies[p_class][policy_name] log.trace('searching for "%s" in admx data', policy_name) ( success, the_policy, policy_name_list, msg, ) = _lookup_admin_template( policy_name=policy_name, policy_class=p_class, adml_language=adml_language, ) if success: policy_name = the_policy.attrib["name"] policy_namespace = the_policy.nsmap[the_policy.prefix] if policy_namespace not in _admTemplateData: _admTemplateData[policy_namespace] = {} _admTemplateData[policy_namespace][policy_name] = _value else: raise SaltInvocationError(msg) if ( policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None ): log.trace( "setting == %s", str( _admTemplateData[policy_namespace][policy_name] ).lower(), ) log.trace( str( _admTemplateData[policy_namespace][policy_name] ).lower() ) if ( str( _admTemplateData[policy_namespace][policy_name] ).lower() != "disabled" and str( _admTemplateData[policy_namespace][policy_name] ).lower() != "not configured" ): if ELEMENTS_XPATH(the_policy): if isinstance( _admTemplateData[policy_namespace][policy_name], dict, ): for elements_item in ELEMENTS_XPATH(the_policy): for child_item in elements_item: # check each element log.trace( "checking element %s", child_item.attrib["id"], ) temp_element_name = None this_element_name = _getFullPolicyName( policy_item=child_item, policy_name=child_item.attrib["id"], return_full_policy_names=True, adml_language=adml_language, ) log.trace( 'id attribute == "%s" ' ' this_element_name == "%s"', child_item.attrib["id"], this_element_name, ) if ( this_element_name in _admTemplateData[ policy_namespace ][policy_name] ): temp_element_name = ( this_element_name ) elif ( child_item.attrib["id"] in _admTemplateData[ policy_namespace ][policy_name] ): temp_element_name = ( child_item.attrib["id"] ) else: raise SaltInvocationError( 'Element "{}" must be included' " in the policy configuration" " for policy {}".format( this_element_name, policy_name, ) ) if ( "required" in child_item.attrib and child_item.attrib[ "required" ].lower() == "true" ): if not _admTemplateData[ policy_namespace ][policy_name][temp_element_name]: raise SaltInvocationError( 'Element "{}" requires a value ' "to be specified".format( temp_element_name ) ) if ( etree.QName(child_item).localname == "boolean" ): if not isinstance( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ], bool, ): raise SaltInvocationError( "Element {} requires a boolean " "True or False".format( temp_element_name ) ) elif ( etree.QName(child_item).localname == "decimal" or etree.QName(child_item).localname == "longDecimal" ): min_val = 0 max_val = 9999 if "minValue" in child_item.attrib: min_val = int( child_item.attrib[ "minValue" ] ) if "maxValue" in child_item.attrib: max_val = int( child_item.attrib[ "maxValue" ] ) if ( int( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ] ) < min_val or int( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ] ) > max_val ): raise SaltInvocationError( 'Element "{}" value must be between ' "{} and {}".format( temp_element_name, min_val, max_val, ) ) elif ( etree.QName(child_item).localname == "enum" ): # make sure the value is in the enumeration found = False for enum_item in child_item: if ( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ] == _getAdmlDisplayName( adml_policy_resources, enum_item.attrib[ "displayName" ], ).strip() ): found = True break if not found: raise SaltInvocationError( 'Element "{}" does not have' " a valid value".format( temp_element_name ) ) elif ( etree.QName(child_item).localname == "list" ): if ( "explicitValue" in child_item.attrib and child_item.attrib[ "explicitValue" ].lower() == "true" ): if not isinstance( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ], dict, ): raise SaltInvocationError( "Each list item of element " '"{}" requires a dict ' "value".format( temp_element_name ) ) elif not isinstance( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ], list, ): raise SaltInvocationError( 'Element "{}" requires a' " list value".format( temp_element_name ) ) elif ( etree.QName(child_item).localname == "multiText" ): if not isinstance( _admTemplateData[ policy_namespace ][policy_name][ temp_element_name ], list, ): raise SaltInvocationError( 'Element "{}" requires a' " list value".format( temp_element_name ) ) _admTemplateData[policy_namespace][ policy_name ][ child_item.attrib["id"] ] = _admTemplateData[ policy_namespace ][ policy_name ].pop( temp_element_name ) else: raise SaltInvocationError( 'The policy "{}" has elements which must be' " configured".format(policy_name) ) else: if ( str( _admTemplateData[policy_namespace][ policy_name ] ).lower() != "enabled" ): raise SaltInvocationError( 'The policy {} must either be "Enabled", ' '"Disabled", or "Not Configured"'.format( policy_name ) ) if _regedits: for regedit in _regedits: log.trace("%s is a Registry policy", regedit) # if the value setting is None or "(value not set)", we will delete the value from the registry if ( _regedits[regedit]["value"] is not None and _regedits[regedit]["value"] != "(value not set)" ): _ret = __utils__["reg.set_value"]( _regedits[regedit]["policy"]["Registry"]["Hive"], _regedits[regedit]["policy"]["Registry"]["Path"], _regedits[regedit]["policy"]["Registry"]["Value"], _regedits[regedit]["value"], _regedits[regedit]["policy"]["Registry"]["Type"], ) else: _ret = __utils__["reg.read_value"]( _regedits[regedit]["policy"]["Registry"]["Hive"], _regedits[regedit]["policy"]["Registry"]["Path"], _regedits[regedit]["policy"]["Registry"]["Value"], ) if _ret["success"] and _ret["vdata"] != "(value not set)": _ret = __utils__["reg.delete_value"]( _regedits[regedit]["policy"]["Registry"]["Hive"], _regedits[regedit]["policy"]["Registry"]["Path"], _regedits[regedit]["policy"]["Registry"]["Value"], ) if not _ret: raise CommandExecutionError( "Error while attempting to set policy {} via the" " registry. Some changes may not be applied as" " expected".format(regedit) ) if _lsarights: for lsaright in _lsarights: _existingUsers = None if not cumulative_rights_assignments: _existingUsers = _getRightsAssignments( _lsarights[lsaright]["policy"]["LsaRights"]["Option"] ) if _lsarights[lsaright]["value"]: for acct in _lsarights[lsaright]["value"]: _ret = _addAccountRights( acct, _lsarights[lsaright]["policy"]["LsaRights"][ "Option" ], ) if not _ret: raise SaltInvocationError( "An error occurred attempting to configure the" " user right {}.".format(lsaright) ) if _existingUsers: for acct in _existingUsers: if acct not in _lsarights[lsaright]["value"]: _ret = _delAccountRights( acct, _lsarights[lsaright]["policy"]["LsaRights"][ "Option" ], ) if not _ret: raise SaltInvocationError( "An error occurred attempting to remove previously " "configured users with right {}.".format( lsaright ) ) if _secedits: # we've got secedits to make log.trace(_secedits) ini_data = "\r\n".join(["[Unicode]", "Unicode=yes"]) _seceditSections = [ "System Access", "Event Audit", "Registry Values", "Privilege Rights", ] for _seceditSection in _seceditSections: if _seceditSection in _secedits: ini_data = "\r\n".join( [ ini_data, "".join(["[", _seceditSection, "]"]), "\r\n".join(_secedits[_seceditSection]), ] ) ini_data = "\r\n".join( [ini_data, "[Version]", 'signature="$CHICAGO$"', "Revision=1"] ) log.trace("ini_data == %s", ini_data) if not _write_secedit_data(ini_data): raise CommandExecutionError( "Error while attempting to set policies via " "secedit. Some changes may not be applied as " "expected" ) if _netshs: # we've got netsh settings to make for setting in _netshs: log.trace("Setting firewall policy: %s", setting) log.trace(_netshs[setting]) _set_netsh_value(**_netshs[setting]) if _advaudits: # We've got AdvAudit settings to make for setting in _advaudits: log.trace("Setting Advanced Audit policy: %s", setting) log.trace(_advaudits[setting]) _set_advaudit_value(**_advaudits[setting]) if _modal_sets: # we've got modalsets to make log.trace(_modal_sets) for _modal_set in _modal_sets: try: _existingModalData = win32net.NetUserModalsGet( None, _modal_set ) _newModalSetData = dictupdate.update( _existingModalData, _modal_sets[_modal_set] ) log.trace("NEW MODAL SET = %s", _newModalSetData) _ret = win32net.NetUserModalsSet( None, _modal_set, _newModalSetData ) # TODO: This needs to be more specific except Exception as exc: # pylint: disable=broad-except msg = ( "An unhandled exception occurred while " "attempting to set policy via " "NetUserModalSet\n{}".format(exc) ) log.exception(msg) raise CommandExecutionError(msg) if _admTemplateData: _ret = False log.trace( "going to write some adm template data :: %s", _admTemplateData ) _ret = _writeAdminTemplateRegPolFile( _admTemplateData, adml_language=adml_language, registry_class=p_class, ) if not _ret: raise CommandExecutionError( "Error while attempting to write Administrative Template" " Policy data. Some changes may not be applied as expected" ) return True else: raise SaltInvocationError("You have to specify something!")
24,489
def get_dotenv_variable(var_name: str) -> str: """ """ try: return config.get(var_name) except KeyError: error_msg = f"{var_name} not found!\nSet the '{var_name}' environment variable" raise ImproperlyConfigured(error_msg)
24,490
def test_send_user_to_hubspot(mocker, settings): """ Tests that send_user_to_hubspot sends the correct data """ mock_requests = mocker.patch("authentication.pipeline.user.requests") mock_log = mocker.patch("authentication.pipeline.user.log") mock_request = mocker.Mock(COOKIES={"hubspotutk": "somefakedata"}) # Test with no settings set ret_val = user_actions.send_user_to_hubspot( mock_request, details={"email": "test@test.co"} ) assert ret_val == {} mock_log.error.assert_called_once() # Test with appropriate settings set settings.HUBSPOT_CONFIG["HUBSPOT_PORTAL_ID"] = "123456" settings.HUBSPOT_CONFIG["HUBSPOT_CREATE_USER_FORM_ID"] = "abcdefg" ret_val = user_actions.send_user_to_hubspot( mock_request, details={"email": "test@test.co"} ) assert ret_val == {} mock_requests.post.assert_called_with( data={"email": "test@test.co", "hs_context": '{"hutk": "somefakedata"}'}, headers={"Content-Type": "application/x-www-form-urlencoded"}, url="https://forms.hubspot.com/uploads/form/v2/123456/abcdefg?&", )
24,491
def load_dataset(): """ Create a PyTorch Dataset for the images. Notes ----- - See https://discuss.pytorch.org/t/computing-the-mean-and-std-of-dataset/34949 """ transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.9720, 0.9720, 0.9720), (0.1559, 0.1559, 0.1559)) # Normalize with the mean and std of the whole dataset ]) dataset = ImageFolder(root='images', transform=transform) return dataset
24,492
def load(): """Load list of available storage managers.""" storage_type = config.get("rights", "type") if storage_type == "custom": rights_module = config.get("rights", "custom_handler") __import__(rights_module) module = sys.modules[rights_module] else: root_module = __import__("rights.regex", globals=globals(), level=2) module = root_module.regex sys.modules[__name__].authorized = module.authorized return module
24,493
def refresh_window(tot_before, tot_after, pnic_before, pnic_after): """Print stats on screen.""" global lineno # totals print_line("total bytes: sent: %-10s received: %s" \ % (bytes2human(tot_after.bytes_sent), bytes2human(tot_after.bytes_recv)) ) print_line("total packets: sent: %-10s received: %s" \ % (tot_after.packets_sent, tot_after.packets_recv) ) # per-network interface details: let's sort network interfaces so # that the ones which generated more traffic are shown first print_line("") nic_names = list(pnic_after.keys()) nic_names.sort(key=lambda x: sum(pnic_after[x]), reverse=True) for name in nic_names: stats_before = pnic_before[name] stats_after = pnic_after[name] templ = "%-15s %15s %15s" print_line(templ % (name, "TOTAL", "PER-SEC"), highlight=True) print_line(templ % ( "bytes-sent", bytes2human(stats_after.bytes_sent), bytes2human(stats_after.bytes_sent - stats_before.bytes_sent) + '/s', )) print_line(templ % ( "bytes-recv", bytes2human(stats_after.bytes_recv), bytes2human(stats_after.bytes_recv - stats_before.bytes_recv) + '/s', )) print_line(templ % ( "pkts-sent", stats_after.packets_sent, stats_after.packets_sent - stats_before.packets_sent, )) print_line(templ % ( "pkts-recv", stats_after.packets_recv, stats_after.packets_recv - stats_before.packets_recv, )) print_line("") win.refresh() lineno = 0
24,494
def vote(pred1, pred2, pred3=None): """Hard voting for the ensembles""" vote_ = [] index = [] if pred3 is None: mean = np.mean([pred1, pred2], axis=0) for s, x in enumerate(mean): if x == 1 or x == 0: vote_.append(int(x)) else: vote_.append(pred2[s]) index.append(s) else: mean = np.mean([pred1, pred2, pred3], axis=0) for s, x in enumerate(mean): if x == 1 or x == 0: vote_.append(int(x)) elif x > 0.5: vote_.append(1) index.append(s) else: vote_.append(0) index.append(s) return mean, vote_, index
24,495
def move(cardinal): """ Moves the player from one room to another if room exists and door is open. Parameters: cardinal(str): String that points to a valid cardinal. i.e {North, East, South, West} """ room_before_mov = __current_position() room_after_mov = room_m.move(cardinal, room_before_mov) if room_after_mov is None: print('No movement was made!') else: char_m.update_player_position(room_after_mov) room_to_screen()
24,496
def generate_master_flat( science_frame : CCDData, bias_path : Path, dark_path : Path, flat_path : Path, use_cache : bool=True ) -> CCDData: """ """ cache_path = generate_cache_path(science_frame, flat_path) / 'flat' if use_cache and cache_path.is_dir(): flat_frames = ccdp.ImageFileCollection(location=cache_path) else: cache_path.mkdir(parents=True, exist_ok=True) flat_frames = calibrate_flat(science_frame=science_frame, bias_path=bias_path, dark_path=dark_path, flat_path=flat_path, output_path=cache_path) ccd = select_flat_frame(science_frame=science_frame, flat_frames=flat_frames) return ccd
24,497
def submit_remote(c, node_count=int(env_values["CLUSTER_MAX_NODES"])): """This command isn't implemented please modify to use. The call below will work for submitting jobs to execute on a remote cluster using GPUs. """ raise NotImplementedError( "You need to modify this call before being able to use it" ) from aml_compute import PyTorchExperimentCLI exp = PyTorchExperimentCLI("<YOUR-EXPERIMENT-NAME>") run = exp.submit( os.path.join(_BASE_PATH, "src"), "<YOUR-TRAINING-SCRIPT>", {"YOUR": "ARGS"}, node_count=node_count, dependencies_file=os.path.join(_BASE_PATH, "environment_gpu.yml"), wait_for_completion=True, ) print(run)
24,498
def natrix_mqttclient(client_id): """Generate a natrix mqtt client. This function encapsulates all configurations about natrix mqtt client. Include: - client_id The unique id about mqtt connection. - username & password Username is device serial number which used to identify who am I; :return: """ client = NatrixMQTTClient(client_id) return client
24,499