content
stringlengths
22
815k
id
int64
0
4.91M
def list_upcoming_assignments_calendar_events(request_ctx, **request_kwargs): """ Returns the current user's upcoming events, i.e. the same things shown in the dashboard 'Coming Up' sidebar. :param request_ctx: The request context :type request_ctx: :class:RequestContext :return: List upcoming assignments, calendar events :rtype: requests.Response (with void data) """ path = '/v1/users/self/upcoming_events' url = request_ctx.base_api_url + path.format() response = client.get(request_ctx, url, **request_kwargs) return response
5,324,700
def sync(erpnext_support_issue=None): """ Syncs Support Issue with Server. """ fields = ["name", "frappe_issue_id", "status", "last_sync_on"] filters = [ ["status", "=", "Open"], ["frappe_issue_id", "!=", ""], ["last_sync_on", "!=", ""] ] if erpnext_support_issue: filters.append(["name", "=", erpnext_support_issue]) support_issues = frappe.get_all("ERPNext Support Issue", filters=filters, fields=fields) if not support_issues: return erpnext_support_issues = [] # Batch issue sync requests to 10 per call for idx, issue in enumerate(support_issues): issue.last_sync_on = get_datetime_str(issue.last_sync_on) erpnext_support_issues.append(issue) if erpnext_support_issues and ((idx and idx%10 == 0) or idx == len(erpnext_support_issues)-1): params = {"erpnext_support_issues": json.dumps(erpnext_support_issues)} response = call_remote_method("serve_communications_and_statuses", params) if not response or (not isinstance(response, string_types) and response.get('failed')): continue update_erpnext_support_issue_status_and_communications(erpnext_support_issues, json.loads(response)) erpnext_support_issues = []
5,324,701
def test_whitelist_nested_classes( assert_errors, parse_ast_tree, whitelist_name, code, default_options, mode, ): """Testing that it is possible to nest whitelisted classes.""" tree = parse_ast_tree(mode(code.format(whitelist_name))) visitor = NestedComplexityVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [])
5,324,702
def mse_loss(y,loc): """ Mean squared error loss function Use mean-squared error to regress to the expected value Parameters: loc: mean """ loss = (y-loc)**2 return K.mean(loss)
5,324,703
def _get_license_key_outputs(session): """Returns the account id and policy ARN for the license key secret if they exist""" global __cached_license_key_nr_account_id global __cached_license_key_policy_arn if __cached_license_key_nr_account_id and __cached_license_key_policy_arn: return __cached_license_key_nr_account_id, __cached_license_key_policy_arn output_values = _get_stack_output_value(session, ["NrAccountId", "ViewPolicyARN"]) __cached_license_key_nr_account_id = output_values.get("NrAccountId") __cached_license_key_policy_arn = output_values.get("ViewPolicyARN") return __cached_license_key_nr_account_id, __cached_license_key_policy_arn
5,324,704
def get_sample_untransformed(shape, distribution_type, distribution_params, seed): """Get a distribution based on specification and parameters. Parameters can be a list, in which case each of the list members is used to generate one row (or column?) of the resulting sample matrix. Otherwise, the same parameters are used for the whole matrix. Args: shape: Tuple/List representing the shape of the output distribution_type: DistributionType object distribution_params: Dict of distributon parameters seed: random seed to be used Returns: sample: TF Tensor with a sample from the distribution """ if isinstance(distribution_params, list): if len(shape) != 2 or len(distribution_params) != shape[1]: raise ValueError("If distribution_params is a list, the desired 'shape' " "should be 2-dimensional and number of elements in the " "list should match 'shape[1]'") all_samples = [] for curr_params in distribution_params: curr_samples = get_one_sample_untransformed([shape[0], 1], distribution_type, curr_params, seed) all_samples.append(curr_samples) return tf.concat(all_samples, axis=1) else: return get_one_sample_untransformed(shape, distribution_type, distribution_params, seed)
5,324,705
def __convert_sysctl_dict_to_text(): """ Convert sysctl configuration dict to text with each property value pair separated on new line """ import params sysctl_file_content = "### HAWQ System Parameters ###########\n" for key, value in params.hawq_sysctl.iteritems(): if not __valid_input(value): raise Exception("Value {0} for parameter {1} contains non-numeric characters which are not allowed (except whitespace), please fix the value and retry".format(value, key)) sysctl_file_content += "{0} = {1}\n".format(key, value) return sysctl_file_content
5,324,706
def vizualScript( inputds: str, script: Union[List[Dict[str, Any]], Dict[str, Any]], script_needs_compile: bool = False, properties: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """ Create a view that implements a sequence of vizual commands over a fixed input table Parameters ---------- inputds: string The internal name of the dataset to apply the input script to script: list[dictionary] or dictionary The sequence of vizual commands to apply the input script to. If not a list, the parameter will be assumed to be a singleton command and wrapped in a list. script_needs_compile: boolean Set to true if mimir should preprocess the script to provide more spreadsheet-like semantics (e.g., lazy evaluation of expression cells) Returns ------- dictionary of - "name": The name of the created view - "script": The compiled version of the script (or just script if script_needs_compile = False) """ properties = {} if properties is None else properties script_list: List[Dict[str, Any]] if type(script) is list: script_list = cast(List[Dict[str, Any]], script) else: script_list = [cast(Dict[str, Any], script)] req_json = { "input" : inputds, "script" : script_list, # "resultName": Option[String], "compile": script_needs_compile, "properties" : properties } # print(_mimir_url + "vizual/create") # print(json.dumps(req_json)) resp = readResponse(requests.post(_mimir_url + 'vizual/create', json=req_json)) assert("name" in resp) assert("script" in resp) return resp
5,324,707
def test_multiple_fits_different_columns(): """HyperTransformer should be able to be used multiple times regardless of the data. Fitting, transforming and reverse transforming should work when called on different data. """ # Setup data = pd.DataFrame({'col1': [1, 2, 3], 'col2': [1.0, 0.0, 0.0]}) new_data = pd.DataFrame({'col3': [1, 2, 3], 'col4': [1.0, 0.0, 0.0]}) ht = HyperTransformer() # Run ht.detect_initial_config(data) ht.fit(data) ht.detect_initial_config(new_data) ht.fit(new_data) transformed1 = ht.transform(new_data) transformed2 = ht.transform(new_data) reverse1 = ht.reverse_transform(transformed1) reverse2 = ht.reverse_transform(transformed2) # Assert expected_transformed = pd.DataFrame({'col3.value': [1, 2, 3], 'col4.value': [1.0, 0.0, 0.0]}) pd.testing.assert_frame_equal(transformed1, expected_transformed) pd.testing.assert_frame_equal(transformed2, expected_transformed) pd.testing.assert_frame_equal(reverse1, new_data) pd.testing.assert_frame_equal(reverse2, new_data)
5,324,708
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None): """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`. """ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( input_tensor) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return input_tensor with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)): if ignore_value is None: if input_tensor.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = '' elif input_tensor.dtype.is_integer: ignore_value = -1 # -1 has a special meaning of missing feature else: # NOTE: `as_numpy_dtype` is a property, so with the parentheses this is # constructing a new numpy object of the given type, which yields the # default value for that type. ignore_value = input_tensor.dtype.as_numpy_dtype() ignore_value = math_ops.cast( ignore_value, input_tensor.dtype, name='ignore_value') indices = array_ops.where_v2( math_ops.not_equal(input_tensor, ignore_value), name='indices') return sparse_tensor_lib.SparseTensor( indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape( input_tensor, out_type=dtypes.int64, name='dense_shape'))
5,324,709
def parse_date(date_str): """ >>> parse_date("22 April 2011 at 20:34") datetime.datetime(2011, 4, 22, 20, 34) >>> parse_date("9 July 2011") datetime.datetime(2011, 7, 9, 0, 0) >>> parse_date("September 2003") datetime.datetime(2003, 9, 1, 0, 0) """ try: return datetime.strptime(date_str, "%d %B %Y at %H:%M") except Exception: logging.info("Parsing date: {0} - date incomplete".format(date_str)) try: # We could directly parse all dates with parser, # but this allows to have logging only for incomplete dates return parser.parse( date_str, default=datetime( year=datetime.now().year, month=1, day=1)) except Exception: relative_time = parse_relative_time(date_str) if relative_time: return datetime.now() - relative_time fuzzy_t = parse_fuzzy_time(date_str) if fuzzy_t: fuzzy_time = fuzzy_t[0] delta = fuzzy_t[1] return datetime.now().replace( hour=fuzzy_time.hour, minute=fuzzy_time.minute, second=fuzzy_time.second) - delta else: logging.error("Failed to parse date: {0}".format(date_str)) return datetime.now()
5,324,710
def readTmpFile( processPid ): """ Read the temp file """ fileName = os.path.join( tempfile.gettempdir(), 'mms-' + str( processPid ) ) if not os.path.isfile( fileName ): return None f = open( fileName ) try: fileContent = f.read() # Handle the legacy json files if fileContent.startswith( '{' ): os.remove( fileName ) return None resBson = bson.decode_all( fileContent ) if len(resBson) != 1: return None return resBson[0] finally: f.close()
5,324,711
def save_transitions_to_file(dynamics, state_space, action_space, N, T, filename, verbose=True, sample_trajectories=False): """ samples transition data from (dynamics, state_space, action_space) with N total datasets with T samples each saves to filename in the npz format sample_trajectories: if false, samples independent transitions """ if sample_trajectories: agent = RandomAgent(action_space) dataset = DynamicsTrajDataset(dynamics, agent) else: dataset = DynamicsRandomDataset(dynamics, state_space, action_space) X,Y = dataset.sample(N,T, verbose=verbose) maybe_makedir(filename) np.savez(filename, X=X, Y=Y) if verbose: print("Saved transitions to ", filename)
5,324,712
def pkcs7_unpad_strict(data, block_size=16): """Same as `pkcs7_unpad`, but throw exception on incorrect padding. Mostly used to showcase the padding oracle attack. """ pad = data[-1] if ord(pad) < 1 or ord(pad) > block_size: raise Exception('Invalid padding length') for i in range(2, ord(pad)+1): if data[-i] != pad: raise Exception('Invalid padding character') return data[:-ord(pad)]
5,324,713
def cut(d1: dict, d2: dict) -> dict: """Removes the keys/values in `d1` to `d2` if they do not already exist (non-mutating action) Examples: .. highlight:: python .. code-block:: python from map_ops.operations import cut d1 = {"foo": 1, "bar": 1} d2 = {"foo": 2, "baz": 2} cut(d1, d2) {"baz": 2} Args: d1: A Python dict d2: A Python dict Returns: A Python dict """ return cut_(d1, d2)
5,324,714
def chiresponse(A,x): """ Deprecated, just use normal "response" function above! The response function used in the chi squared fitting portion of the simulation. Meant to imitate the actual response of a scintillator. Inputs 2 vectors, and responds with a cos^x dependence. Parameters ---------- A : float The angle between the two vectors who's response is meant to be imitated. Returns ------- A : float The cosine dependence based on the angle, includes a mask so that terms corresponding to angular separations beyond pi/2 are 0, imitating what would happen if a GRB didn't strike the face of a detector. Further simulations of this effect are neccessary in a different software package to confirm this assumption, but its okay for now. """ #meant to imitate the response of the detectors for effective area vs. angle, found to be around .77 # print(length(A),length(B)) #if cosine is negative, mask = A > np.pi/2. A[mask] = 0 A[~mask] = pow(abs(np.cos(A[~mask])),x) return A
5,324,715
def app_config_js(): """ Render app_config.js to file. """ from static import _app_config_js with _fake_context('/js/includes/app_config.js'): response = _app_config_js() with open('www/js/includes/app_config.js', 'w') as f: f.write(response.data)
5,324,716
def _remove_trailing_string(content, trailing): """ Strip trailing component `trailing` from `content` if it exists. Used when generating names from view classes. """ if content.endswith(trailing) and content != trailing: return content[:-len(trailing)] return content
5,324,717
def text_save(final_result, section, path): """ 추출한 글자를 저장합니다. index, start_time, end_time, section, contents :param final_result: 전처리 된 문자 :param path: csv 파일 저장 경로 """ f = open(path, 'w', encoding='utf-8', newline='') wr = csv.writer(f) wr.writerow(['index', 'start_time', 'end_time', 'section', 'contents']) num = 1 index=[] for i in range(0, len(final_result)): if(i==0): index.insert(0, 0) pass else: if final_result[i-1] == final_result[i]: index.insert(i, i-1) else: index.insert(i, i) final_content=[] for i in range(0, len(final_result)): d_content={} if (index.count(i) == 0): pass else : d_content.update([("start", time.strftime("%H:%M:%S", time.gmtime(i))), ("end", time.strftime("%H:%M:%S", time.gmtime(i+index.count(i)))) , ('section', section[i]), ("contents", final_result[i])]) final_content.append(d_content) for j in range(0, len(final_content)): wr.writerow([j, final_content[j]["start"], final_content[j]["end"], final_content[j]["section"], final_content[j]["contents"]]) f.close()
5,324,718
def execute_no_overwrite_part1(tmpdir): """Create expected output.""" config = configparser.ConfigParser() config_path = pathlib.Path('tests/data/tasks/osco/demo-osco-to-oscal.config') config.read(config_path) section = config['task.osco-to-oscal'] section['output-dir'] = str(tmpdir) tgt = osco_to_oscal.OscoToOscal(section) retval = tgt.execute() assert retval == TaskOutcome.SUCCESS assert len(os.listdir(str(tmpdir))) == 1
5,324,719
def data_downloader(genome_ids: List[str], output_directory: Optional[str] = None, metadata: Optional[str] = None) -> List[str]: """ Parameters ---------- genome_ids A list of assembly accession id's output_directory Directory to look for and save data into to metadata A file containing metadata for the genomes to be downloaded Returns ------- List[str] The filepaths to the fasta files for each id requested. Raises ------ ValueError If an invalid channel is selected """ metadata_cols = ['ftp_path', '# assembly_accession'] if metadata is None: genomes_metadata = pd.read_csv(default_metadata(), sep='\t', index_col=False) elif os.path.exists(metadata): genomes_metadata = pd.read_csv(metadata, sep='\t', index_col=False) if not all(genomes_metadata.columns.contains(val_) for val_ in metadata_cols): raise ValueError("metadata must at least contain columns " "for all of the following: {}" .format(metadata_cols)) else: raise ValueError("Argument `metadata` must be a valid filepath or " "default `None`") if output_directory is None: output_directory = os.path.curdir genomes_metadata.set_index('# assembly_accession', inplace=True) possible_ids = set(genomes_metadata.index) for id_ in genome_ids: if id_ not in possible_ids: raise ValueError('Assembly accession ID \'{}\' is not in metadata' .format(id_)) # make sure all genomes are downloaded (download if not) fasta_filenames = _ensure_all_data(genome_ids, genomes_metadata, output_directory) return fasta_filenames
5,324,720
def find_loop_size( public_key, subject=7 ): """ To transform a subject number, start with the value 1. Then, a number of times called the loop size, perform the following steps: - Set the value to itself multiplied by the subject number. - Set the value to the remainder after dividing the value by 20201227 After the desired loop size, the subject number 7 is transformed into the public key itself. """ loops = 0 value = 1 while value != public_key: loops += 1 value *= subject value = value % 20201227 return loops
5,324,721
def update_learning_curves(learning_curves, results): """ Updates learning curves with given results. Parameters ---------- learning_curves : dict Learning curves results : dict Dictionary containing results of current epoch """ for phase in results: for metric, value in results[phase].items(): learning_curves[phase][metric].append(value)
5,324,722
def lambda_local_ep(ngl, ind_passive, passive_el, disp_vector, dyna_stif, coord, connect, E, v, rho): """ Calculates the lambda parameter of the local elastic potential energy function. Args: ngl (:obj:`int`): Degrees of freedom. ind_passive (:obj:`numpy.array`): Index of passive elements. passive_el (:obj:`numpy.array`): Passive element nodes. disp_vector (:obj:`numpy.array`): Displacement vector. dyna_stif (:obj:`numpy.array`): Dynamic stiffness matrix. omega_par (:obj:`float`): 2 * pi * frequency. coord (:obj:`numpy.array`): Coordinates of the element. connect (:obj:`numpy.array`): Element connectivity. E (:obj:`float`): Elastic modulus. v (:obj:`float`): Poisson's ratio. rho (:obj:`float`): Density. Returns: Lambda parameter solution. """ aux1 = np.zeros(ngl, dtype=complex) fadj = 0 for i, el in enumerate(passive_el): Ke, _ = fc.matricesQ4(el, coord, connect, E, v, rho) aux1[ind_passive[i]] = Ke@disp_vector[ind_passive[i]].conjugate() fadj += aux1 aux1[:] = 0 fadj *= -1/2 lam = spsolve(dyna_stif, fadj) return lam
5,324,723
async def test_verify_image_signatures( fake_registry_v2_image_source: FakeRegistryV2ImageSourceNoLabels, image_name: ImageName, ): """Test verifying the signatures within the image configuration.""" # An exception should be raised if the image configuration is not signed with pytest.raises(NoSignatureError) as exception: await fake_registry_v2_image_source.verify_image_signatures(image_name) assert str(exception.value) == "Image does not contain any signatures!" # Sign await fake_registry_v2_image_source.quick_sign(image_name) # Replace the class method for resolving signature providers ... original_method = Signer.for_signature Signer.for_signature = _signer_for_signature result = await fake_registry_v2_image_source.verify_image_signatures(image_name) assert result.image_config assert result.signatures # Make sure that signer_kwargs are passed correctly ... assignable_value = time() fake_registry_v2_image_source.signer_kwargs = { FakeSigner.__name__: {"assignable_value": assignable_value} } result = await fake_registry_v2_image_source.verify_image_signatures(image_name) assert result.image_config assert result.signatures fake_signer_verify = cast(FakeSignerVerify, result.signatures.results[0]) assert fake_signer_verify.assignable_value == assignable_value assert fake_signer_verify.type == "fake" assert fake_signer_verify.valid # Restore the original class method Signer.for_signature = original_method
5,324,724
def from_spanning_matroid(matroid: tuple[set[T], list[set[T]]]) -> list[set[T]]: """Construct flats from a matroid defined by spanning sets. Args: matroid (tuple[set[T], list[set[T]]]): A matroid defined by spanning sets. Returns: list[set[T]]: The flats of a given matroid. """ E, _ = matroid return from_closure_matroid((E, closure_function.from_spanning_matroid(matroid)))
5,324,725
def train_concise_ch11(trainer_fn, hyperparams, data_iter, num_epochs=4): """Defined in :numref:`sec_minibatches`""" # 初始化模型 net = nn.Sequential(nn.Linear(5, 1)) def init_weights(m): if type(m) == nn.Linear: torch.nn.init.normal_(m.weight, std=0.01) net.apply(init_weights) optimizer = trainer_fn(net.parameters(), **hyperparams) loss = nn.MSELoss() # 注意: L2 Loss = 1/2 * MSE Loss。 # PyTorch的MSE损失与MXNet的L2损失大概相差2倍。 # 因此,我们将PyTorch中的损失减半 animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[0, num_epochs], ylim=[0.22, 0.35]) n, timer = 0, d2l.Timer() for _ in range(num_epochs): for X, y in data_iter: optimizer.zero_grad() out = net(X) y = y.reshape(out.shape) l = loss(out, y)/2 l.backward() optimizer.step() n += X.shape[0] if n % 200 == 0: timer.stop() animator.add(n/X.shape[0]/len(data_iter), (d2l.evaluate_loss(net, data_iter, loss)/2,)) timer.start() print(f'loss: {animator.Y[0][-1]:.3f}, {timer.avg():.3f} sec/epoch')
5,324,726
def select_angles_from_sites(networkx_graph, top, Atom1, Atom2, Atom3): """Return angles based on interactive selection.""" params_list = select_params_on_networkx( networkx_graph, [Atom1, Atom2, Atom3] ) if params_list: edges_widget = widgets.Dropdown( options=params_list, layout=widgets.Layout(width="60%"), style=dict(description_width="initial"), description="Selected Edge", ) interact( select_edges_on_networkx, networkx_graph=fixed(networkx_graph), top=fixed(top), list_of_params=edges_widget, ) else: plot_networkx_params(networkx_graph, list_of_edges=[]) return
5,324,727
def suppress_traceback(debug: bool = True) -> None: """ Decorator to suppress traceback when in debug mode. Parameters ---------- debug: bool turn on debug mode or not Returns ------- None """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): with _DebugTraceback(debug) as tb: return func(*args, **kwargs) return wrapper return decorator
5,324,728
def reset_logger(): """Makes sure that mutations to the logger don't persist between tests.""" logger = logging.getLogger('awsprocesscreds') original_level = logger.level original_handlers = copy.copy(logger.handlers) original_filters = copy.copy(logger.filters) # Everything after the yield will be called during test cleanup. yield logger.setLevel(original_level) for handler in logger.handlers: if handler not in original_handlers: logger.removeHandler(handler) for log_filter in logger.filters: if log_filter not in original_filters: logger.removeFilter(log_filter)
5,324,729
def statistics_on_atlases(in_registered_map, name_map, prefix_file=None): """Computes a list of statistics files for each atlas. Args: in_registered_map (str): Map already registered on atlases. name_map (str): Name of the registered map in CAPS format. prefix_file (Opt[str]): <prefix_file>_space-<atlas_name>_map-<name_map>_statistics.tsv Returns: List of paths leading to the statistics TSV files. """ import os from nipype.utils.filemanip import split_filename from clinica.utils.atlas import ( AtlasAbstract, JHUDTI811mm, JHUTracts01mm, JHUTracts251mm, ) from clinica.utils.statistics import statistics_on_atlas in_atlas_list = [JHUDTI811mm(), JHUTracts01mm(), JHUTracts251mm()] atlas_statistics_list = [] for atlas in in_atlas_list: if not isinstance(atlas, AtlasAbstract): raise TypeError("Atlas element must be an AtlasAbstract type") if prefix_file is None: _, base, _ = split_filename(in_registered_map) filename = ( f"{base}_space-{atlas.get_name_atlas()}" f"_res-{atlas.get_spatial_resolution()}_map-{name_map}_statistics.tsv" ) else: filename = ( f"{prefix_file}_space-{atlas.get_name_atlas()}" f"_res-{atlas.get_spatial_resolution()}_map-{name_map}_statistics.tsv" ) out_atlas_statistics = os.path.abspath(os.path.join(os.getcwd(), filename)) statistics_on_atlas(in_registered_map, atlas, out_atlas_statistics) atlas_statistics_list.append(out_atlas_statistics) return atlas_statistics_list
5,324,730
def render_projection_from_filelist(files: list) -> str: """Render a full projection montage from the given list of files Returns the filename to the output image, which must be manually deleted after use. """ temp_dir = tempfile.TemporaryDirectory() convert_filelist_to_tempfiles(files, temp_dir.name) # by using jpeg for the output format, we avoid including the # rendered projections in the subsequent renders max_file = os.path.join(temp_dir.name, "max.jpg") min_file = os.path.join(temp_dir.name, "min.jpg") avg_file = os.path.join(temp_dir.name, "avg.jpg") convert_tempfiles_to_projection(temp_dir.name, max_file, ProjectionType.MAX) convert_tempfiles_to_projection(temp_dir.name, min_file, ProjectionType.MIN) convert_tempfiles_to_projection(temp_dir.name, avg_file, ProjectionType.AVG) output_filename = tempfile.mktemp(suffix='.jpg') make_montage(min_file, max_file, avg_file, output_filename) temp_dir.cleanup() return output_filename
5,324,731
def get_value_for_attribute(attribute): """For a given key return the value. Args: attribute (str): Some metadata key. Returns: str: The value of the requested key, if key isn't present then None. """ path = '/computeMetadata/v1/instance/attributes/%s' % attribute try: http_response = _issue_http_request( HTTP_GET, path, REQUIRED_METADATA_HEADER) return http_response.read() except (TypeError, ValueError, errors.MetadataServerHttpError): LOGGER.exception('Unable to read value for attribute key %s ' 'from metadata server.', attribute) return None
5,324,732
def list_selected_groups(remote): """Returns a list of unique facegroup IDs for the current face selection (requires an active selection)""" cmd1 = mmapi.StoredCommands() key1 = cmd1.AppendSelectCommand_ListSelectedFaceGroups() remote.runCommand(cmd1) groups1 = mmapi.vectori() cmd1.GetSelectCommandResult_ListSelectedFaceGroups(key1, groups1); return vectori_to_list(groups1);
5,324,733
def do_3009(pc, target_id, x, y, skill_id, skill_lv): """ファイアブラスト 対象の範囲に火焔攻撃を行う""" monster = get_monster(pc, target_id, x, y, skill_id, skill_lv) if monster is None: return start_cast(pc, target_id, x, y, skill_id, skill_lv, 500) monsters.skill_attack_monster_range(pc, monster, (3, 3), 50, skill_id, skill_lv) pc.set_battlestatus(1)
5,324,734
def mock_cali_query(tmpdir): """Create a mock cali-query path.""" tmpdir = tmpdir.mkdir("tmp-bin") cali_query = tmpdir.join("cali-query") with cali_query.open("w") as file: file.write("") st = os.stat(str(cali_query)) os.chmod(str(cali_query), st.st_mode | stat.S_IEXEC) # save current PATH variable old_path = os.environ.get("PATH") # append tmpdir to PATH variable os.environ["PATH"] = "%s:%s" % (str(tmpdir), old_path) # send it yield tmpdir # restore original PATH variable os.environ["PATH"] = old_path
5,324,735
def make_user_variable( id_name, cluster_name, w_name, d_name, y_tree_name, y_name, x_name_ord, x_name_unord, x_name_always_in_ord, z_name_list, x_name_always_in_unord, z_name_split_ord, z_name_split_unord, z_name_mgate, z_name_amgate, x_name_remain_ord, x_name_remain_unord, x_balance_name_ord, x_balance_name_unord): """Put variable names in dictionary.""" def check_none(name): if name is None: return [] return name variable_dict = {'id_name': check_none(id_name), 'cluster_name': check_none(cluster_name), 'w_name': check_none(w_name), 'd_name': check_none(d_name), 'y_tree_name': check_none(y_tree_name), 'y_name': check_none(y_name), 'x_name_ord': check_none(x_name_ord), 'x_name_unord': check_none(x_name_unord), 'x_name_always_in_ord': check_none(x_name_always_in_ord), 'z_name_list': check_none(z_name_list), 'x_name_always_in_unord': check_none( x_name_always_in_unord), 'z_name_ord': check_none(z_name_split_ord), 'z_name_unord': check_none(z_name_split_unord), 'z_name_mgate': check_none(z_name_mgate), 'z_name_amgate': check_none(z_name_amgate), 'x_name_remain_ord': check_none(x_name_remain_ord), 'x_name_remain_unord': check_none(x_name_remain_unord), 'x_balance_name_ord': check_none(x_balance_name_ord), 'x_balance_name_unord': check_none(x_balance_name_unord), } return variable_dict
5,324,736
def _quotes_add_night(quotes): """为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间""" for symbol in quotes: product_id = quotes[symbol]["product_id"] if quotes[symbol].get("trading_time") and product_id in night_trading_table: quotes[symbol]["trading_time"].setdefault("night", [night_trading_table[product_id]])
5,324,737
def show(red, green, blue, blink): """ CLI tool to write sigle color to busylight. """ bl = busylight.BusyLight(red=red, green=green, blue=blue, blink=blink) bl.write()
5,324,738
def get_skip_report_step_by_index(skip_report_list): """Parse the missed step from skip a report. Based on the index within the skip report file (each line a report), the missed step for this entry gets extracted. In case no step could be found, the whole entry could not been parsed or no report for this index exists, the step is 'None'. """ def extract_step(index): skip_report_entry = ( skip_report_list[index] if index < len(skip_report_list) else "" ) step_findings = re.findall( "^([0-9]+),0x[0-9,a-f]+,[0-9,-]+ [0-9,:]+$", skip_report_entry.strip() ) step = int(step_findings[0]) if len(step_findings) == 1 else None return step return extract_step
5,324,739
def write_api(entrypoint, kind="node", pkg_path=None, overwrite=False): """ """ entrypoint_name = entrypoint['Name'].replace(".", "_").lower() class_name = entrypoint['NewName'] class_dir = entrypoint['Module'] class_type = entrypoint['Type'] class_file = class_name.lower() doc_builder = DocBuilder() doc_builder.class_name = class_name doc_builder.class_module = class_dir doc_builder.desc = entrypoint['Desc'] doc_builder_core = DocBuilder() doc_builder_core.class_name = class_name doc_builder_core.class_module = class_dir doc_builder_core.desc = entrypoint['Desc'] banner = COPYRIGHT_NOTICE + CODEGEN_WARNING if verbose: print(class_name) ################################### # create function funobjs = create_py(entrypoint, kind=kind) visible_args = [ arg for arg in funobjs['inputs'] if isinstance( arg.hidden, Missing)] doc_args = [ DocParameter( name=arg.new_name_converted, desc=arg.desc) for arg in visible_args if arg.name_converted != 'column'] doc_builder.add_manifest_args(doc_args) doc_builder_core.add_manifest_args(doc_args) # see what column param type is column_arg = [ arg for arg in visible_args if arg.name_converted == 'column'] # columns for entrypoint hidden_args = [arg for arg in funobjs['inputs'] if not isinstance(arg.hidden, Missing)] columns_entrypoint = [arg for arg in hidden_args] # In a function header, arguments must appear in this order: # * any normal arguments(name); # * any default arguments (name=value); # * the *name (or* in 3.X) form; # * any name or name=value keyword-only arguments (in 3.X); # * the **name form. class_args = [arg.get_arg() for arg in visible_args if isinstance( arg.default, Missing) and arg.name_converted != 'column'] class_args += [arg.get_arg() for arg in visible_args if not isinstance( arg.default, Missing) and arg.name_converted != 'column'] class_args = ',\n '.join(class_args) entrypoint_args_map = [arg for arg in visible_args if isinstance( arg.default, Missing) and arg.name_converted != 'column'] entrypoint_args_map += [arg for arg in visible_args if not isinstance( arg.default, Missing) and arg.name_converted != 'column'] entrypoint_args_map = [ "%s=%s" % (arg.name_converted, arg.name_assignment) for arg in entrypoint_args_map] entrypoint_args_map = '\n'.join(entrypoint_args_map) args_map = [arg for arg in visible_args if isinstance( arg.default, Missing) and arg.name_converted != 'column'] args_map += [arg for arg in visible_args if not isinstance( arg.default, Missing) and arg.name_converted != 'column'] api_args_map = [ "%s=%s" % (arg.new_name_converted, arg.new_name_converted) for arg in args_map] api_args_map = '\n'.join(api_args_map) core_args_map = [ "%s=%s" % (arg.new_name_converted, arg.name_core_assignment) for arg in args_map] core_args_map = '\n'.join(core_args_map) fun_settings_body = None if class_type == 'Component': fun_settings_body = "\n ".join( [arg.get_body() for arg in funobjs['settings']]) dots = "..." if "." in class_dir: dots = "...." imports = [ arg.get_import( prefix=( "%sentrypoints." % dots)) for arg in visible_args if arg.get_import() is not None] imports = '\n'.join(imports) # write the class to a file py_path = module_to_path(class_dir, pkg_path) if not os.path.exists(py_path): os.makedirs(py_path) file = os.path.join(py_path, ".".join([class_file, "py"])) if os.path.exists(file) and not overwrite: raise FileExistsError( "file {} exists, set 'overwrite = TRUE' to overwrite.".format( file)) write_class( entrypoint, class_name, class_type, file, class_file, class_dir, banner, class_args, api_args_map, doc_builder, column_arg, hidden_args) # Generating test classes is broken. Commented out for now. # write the class test to a file # py_path = os.path.join(pkg_path, "tests", *class_dir.split(".")) # if not os.path.exists(py_path): os.makedirs(py_path) # # file = os.path.join(py_path, "test_" + ".".join([class_file, "py"])) # if os.path.exists(file) and not overwrite: # raise FileExistsError("file {} exists, set 'overwrite = TRUE' # to overwrite.".format(file)) # # write_class_test(class_name, file) # write the core class to a file py_path = os.path.join(pkg_path, "internal", "core", *class_dir.split(".")) if not os.path.exists(py_path): os.makedirs(py_path) file = os.path.join(py_path, ".".join([class_file, "py"])) if os.path.exists(file) and not overwrite: raise FileExistsError( "file {} exists, set 'overwrite = TRUE' to overwrite.".format( file)) write_core_class( entrypoint, entrypoint_name, class_name, class_type, file, class_file, class_dir, banner, imports, class_args, core_args_map, entrypoint_args_map, doc_builder_core, column_arg, columns_entrypoint, fun_settings_body, hidden_args) return funobjs
5,324,740
def constraint_notes_are(sequence: FiniteSequence, beat_offset: int, pitches: List[int]) -> bool: """Tells us if the context note on the given beat_offset has the same pitches as the given list of pitches """ if beat_offset > sequence.duration: return True offset_event = sequence.event_at(beat_offset) return sorted(offset_event.pitches) == sorted(pitches)
5,324,741
async def test_setup_g1(opp): """Test setup with a G1 vehicle.""" entry = await setup_subaru_integration( opp, vehicle_list=[TEST_VIN_1_G1], vehicle_data=VEHICLE_DATA[TEST_VIN_1_G1] ) check_entry = opp.config_entries.async_get_entry(entry.entry_id) assert check_entry assert check_entry.state is ConfigEntryState.LOADED
5,324,742
def display_progress_bar(current_value: int, goal_value: int, bar_length: Optional[int] = 50, bar_title: Optional[str] = "progress", hash_symbol: Optional[str] = "#", bar_title_color: Fore = Fore.MAGENTA, bar_active_fore_color: Fore = Fore.WHITE, bar_active_back_color: Back = Back.LIGHTMAGENTA_EX, bar_inactive_fore_color: Fore = Fore.WHITE, bar_inactive_back_color: Back = Back.MAGENTA, bar_pctval_color: Fore = Fore.BLUE, bar_background_color: Back = Back.BLACK) -> None: """Display a progress bar.""" percent_to_goal: float = (current_value / goal_value) * 100 num_hashes: int = int(percent_to_goal // (100 // bar_length)) if (num_hashes > bar_length): num_hashes = bar_length result_str: str = "" result_str = result_str + bar_title_color + bar_title + Style.RESET_ALL + " |" result_str = result_str + bar_active_fore_color + bar_active_back_color result_str = result_str + f"{hash_symbol * num_hashes}" result_str = result_str + bar_inactive_fore_color + bar_inactive_back_color result_str = result_str + f"{'-' * (bar_length - num_hashes)}" if percent_to_goal > 100: result_str = result_str + Style.RESET_ALL + "|+ " else: result_str = result_str + Style.RESET_ALL + "| " result_str = result_str + bar_pctval_color + f"{percent_to_goal:5.2f}" result_str = result_str + Style.RESET_ALL + "%" print(result_str)
5,324,743
def make_anuga_params(): """Function to make the example ANUGA parameters.""" params = pt.modelParams() path = os.path.join(os.path.dirname(__file__), 'ex_anuga_data.npz') data = np.load(path) # pull depth and stage from that data depth = data['depth'] qx = data['qx'] qy = data['qy'] # define the params variables params.stage = np.copy(depth) params.depth = depth params.qx = qx params.qy = qy params.dx = 10. params.theta = 1.0 params.model = 'Anuga' return params
5,324,744
def test_enum_validation_in_template(tiny_template, tmpdir): """Test that the reader can load from a small xlsx file""" XlTemplateWriter().write(tmpdir.join("test_enum_validation.xlsx"), tiny_template) workbook = openpyxl.load_workbook(tmpdir.join("test_enum_validation.xlsx")) worksheet = workbook["TEST_SHEET"] for v in worksheet.data_validations.dataValidation: if v.type == "list": # checking enum in .xlsx assert ( str(v.formula1) == f"{XlTemplateWriter._data_dict_sheet_name!r}!$D$2:$D$3" )
5,324,745
def clip(tensor: T.Tensor, a_min: T.Scalar=None, a_max: T.Scalar=None) -> T.Tensor: """ Return a tensor with its values clipped between a_min and a_max. Args: tensor: A tensor. a_min (optional): The desired lower bound on the elements of the tensor. a_max (optional): The desired upper bound on the elements of the tensor. Returns: tensor: A new tensor with its values clipped between a_min and a_max. """ return tensor.clip(a_min, a_max)
5,324,746
def remove_objects_from_args(args, # type: Iterable[Any] kwargs, # type: Dict[str, Any] pvalue_class # type: Union[Type[T], Tuple[Type[T], ...]] ): # type: (...) -> Tuple[List[Any], Dict[str, Any], List[T]] """For internal use only; no backwards-compatibility guarantees. Replaces all objects of a given type in args/kwargs with a placeholder. Args: args: A list of positional arguments. kwargs: A dictionary of keyword arguments. pvalue_class: A class object representing the types of arguments that must be replaced with a placeholder value (instance of ArgumentPlaceholder). Returns: A 3-tuple containing a modified list of positional arguments, a modified dictionary of keyword arguments, and a list of all objects replaced with a placeholder value. """ pvals = [] def swapper(value): pvals.append(value) return ArgumentPlaceholder() new_args = [swapper(v) if isinstance(v, pvalue_class) else v for v in args] # Make sure the order in which we process the dictionary keys is predictable # by sorting the entries first. This will be important when putting back # PValues. new_kwargs = dict((k, swapper(v)) if isinstance(v, pvalue_class) else (k, v) for k, v in sorted(kwargs.items())) return (new_args, new_kwargs, pvals)
5,324,747
def reminder_validator(input_str): """ Allows a string that matches utils.REMINDER_REGEX. Raises ValidationError otherwise. """ match = re.match(REMINDER_REGEX, input_str) if match or input_str == '.': return input_str else: raise ValidationError('Expected format: <number><w|d|h|m> ' '<popup|email|sms>. (Ctrl-C to exit)\n')
5,324,748
def evaluate_nll(confidences, true_labels, log_input=True, eps=1e-8, reduction="mean"): """ Args: confidences (Array): An array with shape [N, K,]. true_labels (Array): An array with shape [N,]. log_input (bool): Specifies whether confidences are already given as log values. eps (float): Small value to avoid evaluation of log(0) when log_input is False. reduction (str): Specifies the reduction to apply to the output. Returns: An array of negative log-likelihood with shape [1,] when reduction in ["mean", "sum",], or raw negative log-likelihood values with shape [N,] when reduction in ["none",]. """ log_confidences = confidences if log_input else jnp.log(confidences + eps) true_target = onehot(true_labels, num_classes=log_confidences.shape[1]) raw_results = -jnp.sum(true_target * log_confidences, axis=-1) if reduction == "none": return raw_results elif reduction == "mean": return jnp.mean(raw_results) elif reduction == "sum": return jnp.sum(raw_results) else: raise NotImplementedError(f'Unknown reduction=\"{reduction}\"')
5,324,749
def start(filename=None, level=logging.INFO, debug=False): """After initialization, start file logging. """ global _logging_started assert _logging_configured if _logging_started: return if debug: handler = logging.StreamHandler() else: handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1000 * 1024, backupCount=9) handler.setFormatter(_mem_handler.formatter) _mem_handler.setTarget(handler) # root logger logger = logging.getLogger() logger.removeHandler(_mem_handler) logger.addHandler(handler) logger.addFilter(PrivacyFilter()) logger.setLevel(level) # flush what we have stored from the plugin initialization _mem_handler.flush() _logging_started = True
5,324,750
def parse(features: str) -> AirPlayFlags: """Parse an AirPlay feature string and return what is supported. A feature string have one of the following formats: - 0x12345678 - 0x12345678,0xabcdef12 => 0xabcdef1212345678 """ match = re.match(r"^0x([0-9A-Fa-f]{1,8})(?:,0x([0-9A-Fa-f]{1,8})|)$", features) if match is None: raise ValueError(f"invalid feature string: {features}") value, upper = match.groups() if upper is not None: value = upper + value return AirPlayFlags(int(value, 16))
5,324,751
def _prefix_with_swift_module(path, resource_info): """Prepends a path with the resource info's Swift module, if set. Args: path: The path to prepend. resource_info: The resource info struct. Returns: The path with the Swift module name prepended if it was set, or just the path itself if there was no module name. """ swift_module = resource_info.swift_module if swift_module: return swift_module + "-" + path return path
5,324,752
def str2datetime(dt, format=None): """ convert a string into a datetime object, it can be: - 2013-05-24 18:49:46 - 2013-05-24 18:49:46.568 @param dt string @param format format for the conversion, the most complete one is ``%Y-%m-%d %H:%M:%S.%f`` which you get by default @rtype datetime @return datetime """ if "+" in dt: dt = dt.split("+")[0].strip() elif " -" in dt: dt = dt.split(" -")[0].strip() if format is None: if " " in dt: if "." in dt: return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f") else: return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S") elif "T" in dt: if "." in dt: return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f") else: return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") else: return datetime.datetime.strptime(dt, "%Y-%m-%d") else: return datetime.datetime.strptime(dt, format)
5,324,753
def run_func_motion_correct(func_reorient, out_dir=None, run=True): """Run the 'func_motion_correct_workflow' function to execute the modular workflow with the provided inputs. :type func_reorient: str :param func_reorient: Filepath to the deobliqued, reoriented functional timeseries. :type out_dir: str :param out_dir: (default: None) The output directory to write the results to; if left as None, will write to the current directory. :type run: bool :param run: (default: True) Will run the workflow; if set to False, will connect the Nipype workflow and return the workflow object instead. :rtype: str :return: (if run=True) The filepath of the generated anatomical_reorient file. :rtype: Nipype workflow object :return: (if run=False) The connected Nipype workflow object. :rtype: str :return: (if run=False) The base directory of the workflow if it were to be run. """ import os import glob import nipype.interfaces.io as nio import nipype.pipeline.engine as pe output = "func_motion_correct" workflow = pe.Workflow(name='%s_workflow' % output) if not out_dir: out_dir = os.getcwd() workflow_dir = os.path.join(out_dir, "workflow_output", output) workflow.base_dir = workflow_dir resource_pool = {} config = {} num_cores_per_subject = 1 resource_pool["func_reorient"] = func_reorient workflow, resource_pool = \ func_motion_correct_workflow(workflow, resource_pool, config) ds = pe.Node(nio.DataSink(), name='datasink_func_motion_correct') ds.inputs.base_directory = workflow_dir node, out_file = resource_pool["func_motion_correct"] workflow.connect(node, out_file, ds, 'func_motion_correct') ds = pe.Node(nio.DataSink(), name='datasink_coordinate_transformation') ds.inputs.base_directory = workflow_dir node, out_file = resource_pool["coordinate_transformation"] workflow.connect(node, out_file, ds, 'coordinate_transformation') if run: workflow.run(plugin='MultiProc', plugin_args= \ {'n_procs': num_cores_per_subject}) outpath = glob.glob(os.path.join(workflow_dir, "func_motion_correct",\ "*"))[0] return outpath else: return workflow, workflow.base_dir
5,324,754
def add_ackers(parser): """ Add --ackers option to parser """ parser.add_argument( "-a", "--ackers", help="Set number of acker executors for your topology. " "Defaults to the number of worker nodes in your " "Storm environment.", type=option_alias("topology.acker.executors"), action=_StoreDictAction, dest="options", )
5,324,755
def get_zoomin(self, scale=1.0): """ Returns a spherical region encompassing maximally refined cells. Moved from Amr class. What should it do?? Parameters ---------- scale : float The radius of the returned sphere is scaled by 'scale'. """ from ..utils import sampling imin = np.where(self.dm['m'] == self.dm['m'].min()) xr = [self.dm['px'][imin].min(), self.dm['px'][imin].max()] yr = [self.dm['py'][imin].min(), self.dm['py'][imin].max()] zr = [self.dm['pz'][imin].min(), self.dm['pz'][imin].max()] xc = 0.5 * sum(xr) yc = 0.5 * sum(yr) zc = 0.5 * sum(zr) radius = 0.5 * max([xr[1]-xr[0], yr[1]-yr[0], zr[1]-zr[0]]) * scale #print(radius) return sampling.set_region(centers=[xc, yc, zc], radius=radius)
5,324,756
def _CreateTsMonFlushingProcess(options): """Creates a separate process to flush ts_mon metrics. Useful for multiprocessing scenarios where we don't want multiple ts-mon threads send contradictory metrics. Instead, functions in chromite.lib.metrics will send their calls to a Queue, which is consumed by a dedicated flushing process. Args: options: An argparse options object to configure ts-mon with. Side effects: Sets chromite.lib.metrics.MESSAGE_QUEUE, which causes the metric functions to send their calls to the Queue instead of creating the metrics. """ # If this is nested, we don't need to create another queue and another # message consumer. Do nothing to continue to use the existing queue. if metrics.MESSAGE_QUEUE or metrics.FLUSHING_PROCESS: return with parallel.Manager() as manager: message_q = manager.Queue() metrics.FLUSHING_PROCESS = multiprocessing.Process( target=lambda: _SetupAndConsumeMessages(message_q, options)) metrics.FLUSHING_PROCESS.start() # this makes the chromite.lib.metric functions use the queue. # note - we have to do this *after* forking the ConsumeMessages process. metrics.MESSAGE_QUEUE = message_q try: yield message_q finally: _CleanupMetricsFlushingProcess()
5,324,757
def parse_repeating_time_interval_to_days(date_str): """Parsea un string con un intervalo de tiempo con repetición especificado por la norma ISO 8601 en una cantidad de días que representa ese intervalo. Devuelve 0 en caso de que el intervalo sea inválido. """ intervals = {'Y': 365, 'M': 30, 'W': 7, 'D': 1, 'H': 0, 'S': 0} if date_str.find('R/P') != 0: # Periodicity mal formada return 0 date_str = date_str.strip('R/P') days = 0 index = 0 for interval in intervals: value_end = date_str.find(interval) if value_end < 0: continue try: days += int(float(date_str[index:value_end]) * intervals[interval]) # Valor de accrualPeriodicity inválido, se toma como 0 except ValueError: continue index = value_end # Si el número de días es menor lo redondeamos a 1 return max(days, 1)
5,324,758
def get_pk_and_validate(model): """ :param model: :return: """ hits = [] for field in get_model_fields(model): extra_attrs = field.field_info.extra if extra_attrs.get('primary_key'): hits.append(field) hit_count = len(hits) if hit_count != 1: raise ERRORS.PROGRAMMING_ERROR.exception( '错误:模型{}已定义了{}个主键,分别是{}.提示:一个模型有且只有一个主键'.format( model, hit_count, ','.join([hit.name for hit in hits])) ) return hits[0]
5,324,759
def test_from_partial(): """Test from_partial works correctly.""" assert Dice.from_partial({1: Fraction(1, 2),}) == Dice.from_full( {0: Fraction(1, 2), 1: Fraction(1, 2),} ) assert Dice.from_partial({1: Fraction(1, 3), 2: Fraction(1, 3),}) == Dice.from_full( {0: Fraction(1, 3), 1: Fraction(1, 3), 2: Fraction(1, 3),} )
5,324,760
def lat_to_y(lat): """Convert latitude to Web-Mercator Args: lat: a latutude value Returns: float: a Web-Mercator y coordinate """ r = 6_378_137 # radius of the Earth at the equator return log(tan((90 + lat) * pi / 360)) * r
5,324,761
def AddPrivateNetworkIpArgs(parser): """Set arguments for choosing the network IP address.""" parser.add_argument( '--private-network-ip', help="""\ Specifies the RFC1918 IP to assign to the instance. The IP should be in the subnet or legacy network IP range. """)
5,324,762
def test_bucket_dump_returns_objs_in_compartment_order(get_model_instances): """ Bucket.dump should return a list of objects that have been "put" into the bucket in compartment and then PK order. """ bucket = relationtrees.Bucket([m.EndNode, m.ReferenceNode]) bucket.put(get_model_instances(['ref2', 'ref0'])) bucket.put(get_model_instances(['end0', 'end1', 'end2'])) exp = (list(get_model_instances(['end0', 'end1', 'end2'])) + list(get_model_instances(['ref0', 'ref2']))) assert bucket.dump() == exp
5,324,763
def find_file(filename): """Find a file of given name on the file system. This function is intended to use in tests and demo applications to locate data files without resorting to absolute paths. You may use it for your code as well. It looks in the following locations: * If an absolute filename is given, it is used * Check whether the given relative path exists with respect to the current working directory * Check whether the given relative path exists with respect to the specified XDG data directory (e.g. through the environment variable :code:`XDG_DATA_DIRS`). :param filename: The (relative) filename to search for :type filename: str :return: An absolute filename """ # If the path is absolute, do not change it if os.path.isabs(filename): return filename # Gather a list of candidate paths for relative path candidates = [] # Use the current working directory candidates.append(os.path.join(os.getcwd(), filename)) # Use the XDG data directories if platform.system() in ["Linux", "Darwin"]: for xdg_dir in xdg.xdg_data_dirs(): candidates.append(os.path.join(xdg_dir, filename)) # Iterate through the list to check for file existence for candidate in candidates: if os.path.exists(candidate): return candidate raise FileNotFoundError( f"Cannot locate file {filename}. Tried the following locations: {', '.join(candidates)}" )
5,324,764
def test_createIngress(kubeConfig): """ Do I create a Ingress kubernetes resource from a yaml manifest file? """ namespace = 'g-se-com' fileData = 'data' pOpen = patch_open(fileData) pCall = patch.object(lib.TxKubernetesClient, 'call') pApiMethod = patch.object(client, 'ExtensionsV1beta1Api', return_value=Mock( create_namespaced_ingress='a', ), autospec=True, ) with pApiMethod as mApiMethod, pCall as mCall, pOpen: yield lib.createIngress('/path', namespace) mApiMethod.assert_called_once() mCall.assert_called_once_with('a', namespace, fileData)
5,324,765
def test_epo_move_system_command(requests_mock): """ Unit test to validate epo_get_version_command Args: requests_mock ():mocking the http GET request Returns: test Passed or Failed """ requests_mock.get(f'{EPO_URL}/remote/system.move?names=TIE&parentGroupId=2', text='OK:\ntrue') client = Client( base_url=f'{EPO_URL}/remote', headers={}, auth=('', '') ) args = { 'names': 'TIE', 'parentGroupId': '2' } result = epo_move_system_command(client, args) assert result.outputs is None assert result.readable_output == 'System(s) TIE moved successfully to GroupId 2'
5,324,766
def generate_lasso_mask(image, selectedData): """ Generates a polygon mask using the given lasso coordinates :param selectedData: The raw coordinates selected from the data :return: The polygon mask generated from the given coordinate """ height = image.size[1] y_coords = selectedData["lassoPoints"]["y"] y_coords_corrected = [height - coord for coord in y_coords] coordinates_tuple = list(zip(selectedData["lassoPoints"]["x"], y_coords_corrected)) mask = Image.new("L", image.size) draw = ImageDraw.Draw(mask) draw.polygon(coordinates_tuple, fill=255) return mask
5,324,767
def test_no_folding(runner, foo_package): """Must create all missing folders.""" result = runner.invoke(steeve.cli, ['--no-folding', 'stow', 'foo', '1.0']) assert result.exit_code == 0 assert not os.path.islink('bin') assert os.path.islink(os.path.join('bin', 'foo'))
5,324,768
def test_medicationstatement_2(base_settings): """No. 2 tests collection for MedicationStatement. Test File: medicationstatementexample4.json """ filename = base_settings["unittest_data_dir"] / "medicationstatementexample4.json" inst = medicationstatement.MedicationStatement.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "MedicationStatement" == inst.resource_type impl_medicationstatement_2(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "MedicationStatement" == data["resourceType"] inst2 = medicationstatement.MedicationStatement(**data) impl_medicationstatement_2(inst2)
5,324,769
def plot_scatter( title, x_metric, y_metrics, x_axis_title, y_axis_title, df_aggregated ): """ Function to plot and format a scatterplot from the aggregated dataframe """ data = [] shapes = list() for y_metric in y_metrics: data.append( go.Scatter( x=df_aggregated.index, y=df_aggregated[y_metric], mode='lines', marker=dict(opacity=0.8, line=dict(width=0)), name=y_metric ) ) fig = go.Figure(data=data) fig.update_layout( title=title, xaxis_title=x_axis_title, yaxis_title=y_axis_title, legend=go.layout.Legend( x=0, y=1, traceorder="normal", font=dict(family="sans-serif", size=8, color="black"), bgcolor="LightSteelBlue", bordercolor="Black", borderwidth=2 ), shapes=shapes ) st.plotly_chart(fig)
5,324,770
def guitar(C): """Triangular wave (pulled guitar string).""" L = 0.75 x0 = 0.8*L a = 0.005 freq = 440 wavelength = 2*L c = freq*wavelength from math import pi w = 2*pi*freq num_periods = 1 T = 2*pi/w*num_periods # Choose dt the same as the stability limit for Nx=50 dt = L/50./c def I(x): return a*x/x0 if x < x0 else a/(L-x0)*(L-x) umin = -1.2*a; umax = -umin cpu, all_u = viz(I, 0, 0, c, L, dt, C, T, umin, umax, animate=True, tool='scitools') # checking #for e in all_u: # print e[int(len(all_u[1])/2)]
5,324,771
def start_threads_dict(): """ 获取指定URL起始THREADS字典 :return: dict-->配置中指定URL起始THREADS字典 """ temp_dict = dict() enable_flag = const.CONF.get('Auto_Test.assign_start_threads', 'ENABLE') if enable_flag and isinstance(enable_flag, str): if enable_flag.lower() == 'true': temp_dict = const.CONF['Auto_Test.assign_start_threads'] return temp_dict
5,324,772
def exists_in_s3(s3_path: str) -> Optional[bool]: """Check whether a fully specified s3 path exists. Args: s3_path: Full path on s3 in format "s3://<bucket_name>/<obj_path>". Returns: Boolean of whether the file exists on s3 (None if there was an error.) """ bucket, key = decompose_s3_path(s3_path) s3_client = boto3.client("s3") try: s3_client.head_object(Bucket=bucket, Key=key) except botocore.exceptions.ClientError: return False except Exception as e: print( f"ERROR: unexpected exception checking existence of s3_path={s3_path}" f": {e}" ) return None return True
5,324,773
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str: """ Return a single block URI after validating that it is the *only* URI in all_blockchain_uris that matches the w3 instance. """ from ethpm.uri import check_if_chain_matches_chain_uri matching_uris = [ uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri) ] if not matching_uris: raise EthPMValidationError("Package has no matching URIs on chain.") elif len(matching_uris) != 1: raise EthPMValidationError( f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}." ) return matching_uris[0]
5,324,774
def test_count_for_loops(): """Test case for counting the for loops.""" expected_for = 1 assert test_pycount.count_for_loops() == expected_for
5,324,775
def ListAndWaitForObjects(service, counting_start_time, expected_set_of_objects, object_prefix): """List objects and wait for consistency. Args: service: the ObjectStorageServiceBase object to use. counting_start_time: The start time used to count for the inconsistency window. expected_set_of_objects: The set of expectation. object_prefix: The prefix of objects to list from. Returns: result_consistent: Is the list consistent list_count: Count of the lists before the result is consistent. list_latency: Latency of the list request that is consistent. total_wait_time: Total time waited before it's consistent. """ total_wait_time = 0 list_count = 0 result_consistent = False list_latency = 0 while total_wait_time < LIST_CONSISTENCY_WAIT_TIME_LIMIT: list_start_time = time.time() list_result = service.ListObjects(FLAGS.bucket, object_prefix) list_count += 1 list_latency = time.time() - list_start_time if expected_set_of_objects.difference(set(list_result)): total_wait_time = time.time() - counting_start_time continue else: result_consistent = True break return result_consistent, list_count, list_latency, total_wait_time
5,324,776
def test_breadth(capsys, tree): """Correct output order.""" tree.breadth_first_traversal(lambda n: print(n.__str__())) sys.stderr.write("10\n5\n3\n6\nmeow\n15\n11\n") out, err = capsys.readouterr() assert out == err
5,324,777
def load_mbb_player_boxscore(seasons: List[int]) -> pd.DataFrame: """Load men's college basketball player boxscore data Example: `mbb_df = sportsdataverse.mbb.load_mbb_player_boxscore(seasons=range(2002,2022))` Args: seasons (list): Used to define different seasons. 2002 is the earliest available season. Returns: pd.DataFrame: Pandas dataframe containing the player boxscores available for the requested seasons. Raises: ValueError: If `season` is less than 2002. """ data = pd.DataFrame() if type(seasons) is int: seasons = [seasons] for i in seasons: if int(i) < 2002: raise SeasonNotFoundError("season cannot be less than 2002") i_data = pd.read_parquet(MBB_PLAYER_BOX_URL.format(season = i), engine='auto', columns=None) data = data.append(i_data) #Give each row a unique index data.reset_index(drop=True, inplace=True) return data
5,324,778
def countdown(code, input): """ .countdown <month> <day> <year> - displays a countdown to a given date. """ error = '{red}Please use correct format: %scountdown <month> <day> <year>' % code.prefix text = input.group(2).strip() if ' ' in text: text = text.split() elif '/' in text: text = text.split('/') elif '.' in text: text = text.split('.') else: return code.say(error) if len(text) != 3: return code.say(error) if not text[0].isdigit() or not text[1].isdigit() or not text[2].isdigit(): return code.say(error) month, day, year = text try: diff = datetime.datetime( int(year), int(month), int(day)) - datetime.datetime.today() except ValueError: return code.say('{red}Incorrect input!') output = [] output.append(str(diff.days) + " day(s)") output.append(str(diff.seconds / 60 / 60) + " hour(s)") output.append( str(diff.seconds / 60 - diff.seconds / 60 / 60 * 60) + " minute(s)") output.append(month + "/" + day + "/" + year) code.say(' - '.join(output))
5,324,779
def split_rdd(rdd): """ Separate a rdd into two weighted rdds train(70%) and test(30%) :param rdd """ SPLIT_WEIGHT = 0.7 (rdd_train, rdd_test) = rdd.randomSplit([SPLIT_WEIGHT, 1 - SPLIT_WEIGHT]) return rdd_train, rdd_test
5,324,780
def QuickSort(A, l, r): """ Arguments: A -- total number list l -- left index of input list r -- right index of input list Returns: ASorted -- sorted list cpNum -- Number of comparisons """ # Number of comparisons cpNum = r - l # Base case if cpNum == 0: return [A[l]], 0 elif cpNum < 0: return [], 0 # Partition part A[l], A[r] = A[r], A[l] # Swap the first and the last element p = A[l] i = l + 1 for j in range(l + 1, r + 1): if A[j] < p: A[j], A[i] = A[i], A[j] i += 1 A[l], A[i-1] = A[i-1], A[l] # Recursion call ALeft, cpNumLeft = QuickSort(A, l, i-2) ARight, cpNumRight = QuickSort(A, i, r) ASorted = ALeft + [p] + ARight cpNum = cpNum + cpNumLeft + cpNumRight return ASorted, cpNum
5,324,781
def get_cache_dir(): """get directory to store data cached by application """ return os.path.join(get_userdata_dir(), CACHE_DIR)
5,324,782
def checksum(input): """ Checksum by counting items that have duplicates and/or triplicates and multiplying""" checksum_twos = 0 checksum_threes = 0 for id in input: c = [v for k,v in Counter(id).items()] if 2 in c: checksum_twos += 1 if 3 in c: checksum_threes += 1 return checksum_threes * checksum_twos
5,324,783
def Rescale(UnscaledMatrix, Scales): """Forces a matrix of raw (user-supplied) information (for example, # of House Seats, or DJIA) to conform to svd-appropriate range. Practically, this is done by subtracting min and dividing by scaled-range (which itself is max-min). """ # Calulate multiplicative factors InvSpan = [] for scale in Scales: InvSpan.append(1 / float(scale["max"] - scale["min"])) # Recenter OutMatrix = ma.copy(UnscaledMatrix) cols = UnscaledMatrix.shape[1] for i in range(cols): OutMatrix[:,i] -= Scales[i]["min"] # Rescale NaIndex = isnan(OutMatrix) OutMatrix[NaIndex] = 0 OutMatrix = dot(OutMatrix, diag(InvSpan)) OutMatrix[NaIndex] = nan return OutMatrix
5,324,784
def test_is_url_image(url, expected_result, expected_error): """Test if url return an image.""" with expected_error: assert is_url_image(url) == expected_result
5,324,785
def winrate_of(node: sgf.Node) -> float: """ The winrate of the node/position is defined as winrate of the most visited child. """ max_visits = 0 winrate = 0 variations = ([] if node.next == None else [node.next]) + node.variations for child in variations: if "B" in child.properties or "W" in child.properties: try: info = parse_comment(child.properties["C"][0]) if info[1] > max_visits: max_visits = info[1] winrate = info[0] except: pass if max_visits == 0: return None return winrate
5,324,786
def extract_kernel_version(kernel_img_path): """ Extracts the kernel version out of the given image path. The extraction logic is designed to closely mimick the logic Zipl configuration to BLS conversion script works, so that it is possible to identify the possible issues with kernel images. :param str kernel_img_path: The path to the kernel image. :returns: Extracted kernel version from the given path :rtype: str """ # Mimick bash substitution used in the conversion script, see: # https://github.com/ibm-s390-linux/s390-tools/blob/b5604850ab66f862850568a37404faa647b5c098/scripts/zipl-switch-to-blscfg#L168 if 'vmlinuz-' in kernel_img_path: fragments = kernel_img_path.rsplit('/vmlinuz-', 1) return fragments[1] if len(fragments) > 1 else fragments[0] fragments = kernel_img_path.rsplit('/', 1) return fragments[1] if len(fragments) > 1 else fragments[0]
5,324,787
def about(request): """ About view """ try: about = About.objects.get().description except: about = "No information here yet." return render(request, 'about_page.html', {'about': about})
5,324,788
def concat_strings(string_list): """ Concatenate all the strings in possibly-nested string_list. @param list[str]|str string_list: a list of strings @rtype: str >>> list_ = (["The", "cow", "goes", "moo", "!"]) >>> concat_strings(list_) 'The cow goes moo !' >>> list_ = (["This", "sentence", "is actually", \ "constructed", ["from", ["other"], "smaller"], "strings"]) >>> concat_strings(list_) 'This sentence is actually constructed from other smaller strings' """ if isinstance(string_list, str): # string_list is a str return string_list else: return " ".join([concat_strings(elem) for elem in string_list])
5,324,789
def produce_grid(tuple_of_limits, grid_spacing): """Produce a 2D grid for the simulation system. The grid is based on the tuple of Cartesian Coordinate limits calculated in an earlier step. Parameters ---------- tuple_of_limits : tuple ``x_min, x_max, y_min, y_max`` grid_spacing : float grid size in all directions in ångström Returns ------- grid : array ``numpy.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing]`` """ x_min, x_max, y_min, y_max = tuple_of_limits grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing] return grid
5,324,790
def visualize(**images): """PLot images in one row.""" n = len(images) plt.figure(figsize=(16, 9)) for i, (name, image) in enumerate(images.items()): plt.subplot(1, n, i + 1) plt.xticks([]) plt.yticks([]) plt.title(' '.join(name.split('_')).title(), color='white') plt.imshow(image) plt.show()
5,324,791
def run_disruptions(): """ Only check for disruptions """ click.secho('Needs implementing', fg='red')
5,324,792
def intensity_histogram_measures(regionmask, intensity): """Computes Intensity Distribution features This functions computes features that describe the distribution characteristic of the instensity. Args: regionmask=binary image intensity= intensity image """ feat= Intensity_Histogram_Measures([np.percentile(intensity[regionmask], 0), np.percentile(intensity[regionmask], 25), np.percentile(intensity[regionmask], 50), np.percentile(intensity[regionmask], 75), np.percentile(intensity[regionmask], 100), np.mean(intensity[regionmask]), stats.mode(intensity[regionmask],axis = None)[0][0], np.std(intensity[regionmask]), stats.skew(intensity[regionmask]), stats.kurtosis(intensity[regionmask])] ) return feat
5,324,793
def match(input_character, final_answer): """ :param input_character: str, allow users to input a string that will be verified whether there are any matches with the final answer. :param final_answer: str, the final answer. :return: str, return the matching result that could consist of '-' and letters. """ result = "" for f in final_answer: if f == input_character: result += input_character else: result += '-' if final_answer.find(input_character) != -1: print('You are correct!') else: print('There is no ' + input_character + '\'s in the word.') return result
5,324,794
def read_json(filename, **kwargs): """Read JSON. Parameters ---------- filename : str **kwargs Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format` Returns ------- `~astropy.cosmology.Cosmology` instance """ # read if isinstance(filename, (str, bytes, os.PathLike)): with open(filename, "r") as file: data = file.read() else: # file-like : this also handles errors in dumping data = filename.read() mapping = json.loads(data) # parse json mappable to dict # deserialize Quantity with u.add_enabled_units(cu.redshift): for k, v in mapping.items(): if isinstance(v, dict) and "value" in v and "unit" in v: mapping[k] = u.Quantity(v["value"], v["unit"]) for k, v in mapping.get("meta", {}).items(): # also the metadata if isinstance(v, dict) and "value" in v and "unit" in v: mapping["meta"][k] = u.Quantity(v["value"], v["unit"]) return Cosmology.from_format(mapping, format="mapping", **kwargs)
5,324,795
def menu_load_notes(window, text1, text2, com_socket): """Menu selected load notes option.""" load_notes(window, text1, text2, com_socket)
5,324,796
def render_error(request, status=500, title=_('Oops!'), err_msg=_('An error occured')): """Render any error page with a given error code, title and text body Title and description are passed through as-is to allow html. Make sure no user input is contained therein for security reasons. The description will be wrapped in <p></p> tags. """ return Response(render_template(request, 'mediagoblin/error.html', {'err_code': status, 'title': title, 'err_msg': err_msg}), status=status)
5,324,797
def affaire_spatial(request): """ Get modification affaire by affaire_fille """ # Check connected if not check_connected(request): raise exc.HTTPForbidden() results = request.dbsession.query(VAffaire).filter( VAffaire.date_cloture == None ).filter( VAffaire.date_envoi == None ).filter( VAffaire.abandon == False ).filter( VAffaire.localisation_e != 0 ).filter( VAffaire.localisation_n != 0 ).all() affaires = [] counter = 0 for result in results: affaires.append({ 'type': 'Feature', 'id': counter, 'geometry': { 'type': 'Point', 'coordinates': [result.localisation_e, result.localisation_n] }, 'properties': { 'number': str(result.id) } }) counter += 1 return affaires
5,324,798
def get_cosets(big_galois: GaloisGroup, small_galois: GaloisGroup) -> SetOfCosets: """ Given a big group `big_galois` and a subgroup `small_galois`, return the cosets of small_galois \\ big_galois. Args: big_galois: A `GaloisGroup` whose cosets to examine small_galois: The acting subgroup of big_galois Returns: A colletion of cosets. Each coset is a frozenset of `Permutation`s. """ return frozenset(frozenset(h * g for h in small_galois) for g in big_galois)
5,324,799