content
stringlengths
22
815k
id
int64
0
4.91M
def parse_request_GET(request): """ Parse JSON-RPC parameters from the request query string.""" args = request.GET.get('params') if args is not None: try: request.rpc_args = json.loads(args) except ValueError: raise JsonRpcParseError else: request.rpc_args = () request.rpc_method = request.GET.get('method') request.rpc_id = request.GET.get('id') request.rpc_version = request.GET.get('jsonrpc')
5,324,000
def analyze_integration_target_dependencies(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[str]] """Analyze the given list of integration test targets and return a dictionary expressing target names and the target names which depend on them.""" real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/' role_targets = [target for target in integration_targets if target.type == 'role'] hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases) dependencies = collections.defaultdict(set) # handle setup dependencies for target in integration_targets: for setup_target_name in target.setup_always + target.setup_once: dependencies[setup_target_name].add(target.name) # handle target dependencies for target in integration_targets: for need_target in target.needs_target: dependencies[need_target].add(target.name) # handle symlink dependencies between targets # this use case is supported, but discouraged for target in integration_targets: for path in data_context().content.walk_files(target.path): if not os.path.islink(to_bytes(path.rstrip(os.path.sep))): continue real_link_path = os.path.realpath(path) if not real_link_path.startswith(real_target_root): continue link_target = real_link_path[len(real_target_root):].split('/')[0] if link_target == target.name: continue dependencies[link_target].add(target.name) # intentionally primitive analysis of role meta to avoid a dependency on pyyaml # script based targets are scanned as they may execute a playbook with role dependencies for target in integration_targets: meta_dir = os.path.join(target.path, 'meta') if not os.path.isdir(meta_dir): continue meta_paths = data_context().content.get_files(meta_dir) for meta_path in meta_paths: if os.path.exists(meta_path): # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file) try: meta_lines = read_text_file(meta_path).splitlines() except UnicodeDecodeError: continue for meta_line in meta_lines: if re.search(r'^ *#.*$', meta_line): continue if not meta_line.strip(): continue for hidden_target_name in hidden_role_target_names: if hidden_target_name in meta_line: dependencies[hidden_target_name].add(target.name) while True: changes = 0 for dummy, dependent_target_names in dependencies.items(): for dependent_target_name in list(dependent_target_names): new_target_names = dependencies.get(dependent_target_name) if new_target_names: for new_target_name in new_target_names: if new_target_name not in dependent_target_names: dependent_target_names.add(new_target_name) changes += 1 if not changes: break for target_name in sorted(dependencies): consumers = dependencies[target_name] if not consumers: continue display.info('%s:' % target_name, verbosity=4) for consumer in sorted(consumers): display.info(' %s' % consumer, verbosity=4) return dependencies
5,324,001
def newton_cotes(order, domain=(0, 1), growth=False, segments=1): """ Generate the abscissas and weights in Newton-Cotes quadrature. Newton-Cotes quadrature, are a group of formulas for numerical integration based on evaluating the integrand at equally spaced points. Args: order (int, numpy.ndarray:): Quadrature order. domain (:func:`chaospy.Distribution`, ;class:`numpy.ndarray`): Either distribution or bounding of interval to integrate over. growth (bool): If True sets the growth rule for the quadrature rule to only include orders that enhances nested samples. segments (int): Split intervals into N subintervals and create a patched quadrature based on the segmented quadrature. Can not be lower than `order`. If 0 is provided, default to square root of `order`. Nested samples only exist when the number of segments are fixed. Returns: (numpy.ndarray, numpy.ndarray): abscissas: The quadrature points for where to evaluate the model function with ``abscissas.shape == (len(dist), N)`` where ``N`` is the number of samples. weights: The quadrature weights with ``weights.shape == (N,)``. Examples: >>> abscissas, weights = chaospy.quadrature.newton_cotes(4) >>> abscissas.round(4) array([[0. , 0.25, 0.5 , 0.75, 1. ]]) >>> weights.round(4) array([0.0778, 0.3556, 0.1333, 0.3556, 0.0778]) >>> abscissas, weights = chaospy.quadrature.newton_cotes(4, segments=2) >>> abscissas.round(4) array([[0. , 0.25, 0.5 , 0.75, 1. ]]) >>> weights.round(4) array([0.0833, 0.3333, 0.1667, 0.3333, 0.0833]) """ order = numpy.asarray(order) order = numpy.where(growth, numpy.where(order, 2**order, 0), order) return hypercube_quadrature( _newton_cotes, order=order, domain=domain, segments=segments, )
5,324,002
def rename_img(path): """ 对图片排序后改名 :param path: :return: """ imgs = os.listdir(path) for i,fi in enumerate(imgs): old_name=os.path.join(path,fi) new_name=os.path.join(path,str(i+2)+'.jpg') print(new_name) os.rename(old_name,new_name)
5,324,003
def internalpackage_remove(package_name): """Removes an internal package by deleting the folder on the PyPi packages dir :param package_name: String of package to remove""" dst_dir = os.path.join(PACKAGES_DIR, package_name) if os.path.exists(dst_dir): shutil.rmtree(dst_dir)
5,324,004
def transpose(a, axes=None): """transpose(a, axes=None) returns array with dimensions permuted according to axes. If axes is None (default) returns array with dimensions reversed. """ # if axes is None: # this test has been moved into multiarray.transpose # axes = arange(len(array(a).shape))[::-1] return multiarray.transpose(a, axes)
5,324,005
def test_gan_loss(): """test gan loss.""" with pytest.raises(NotImplementedError): loss_cfg = dict( type='GANLoss', gan_type='test', real_label_val=1.0, fake_label_val=0.0, loss_weight=1) _ = build_loss(loss_cfg) input_1 = torch.ones(1, 1) input_2 = torch.ones(1, 3, 6, 6) * 2 # vanilla loss_cfg = dict( type='GANLoss', gan_type='vanilla', real_label_val=1.0, fake_label_val=0.0, loss_weight=2.0) gan_loss = build_loss(loss_cfg) loss = gan_loss(input_1, True, is_disc=False) assert_almost_equal(loss.item(), 0.6265233) loss = gan_loss(input_1, False, is_disc=False) assert_almost_equal(loss.item(), 2.6265232) loss = gan_loss(input_1, True, is_disc=True) assert_almost_equal(loss.item(), 0.3132616) loss = gan_loss(input_1, False, is_disc=True) assert_almost_equal(loss.item(), 1.3132616) # lsgan loss_cfg = dict( type='GANLoss', gan_type='lsgan', real_label_val=1.0, fake_label_val=0.0, loss_weight=2.0) gan_loss = build_loss(loss_cfg) loss = gan_loss(input_2, True, is_disc=False) assert_almost_equal(loss.item(), 2.0) loss = gan_loss(input_2, False, is_disc=False) assert_almost_equal(loss.item(), 8.0) loss = gan_loss(input_2, True, is_disc=True) assert_almost_equal(loss.item(), 1.0) loss = gan_loss(input_2, False, is_disc=True) assert_almost_equal(loss.item(), 4.0) # wgan loss_cfg = dict( type='GANLoss', gan_type='wgan', real_label_val=1.0, fake_label_val=0.0, loss_weight=2.0) gan_loss = build_loss(loss_cfg) loss = gan_loss(input_2, True, is_disc=False) assert_almost_equal(loss.item(), -4.0) loss = gan_loss(input_2, False, is_disc=False) assert_almost_equal(loss.item(), 4) loss = gan_loss(input_2, True, is_disc=True) assert_almost_equal(loss.item(), -2.0) loss = gan_loss(input_2, False, is_disc=True) assert_almost_equal(loss.item(), 2.0) # hinge loss_cfg = dict( type='GANLoss', gan_type='hinge', real_label_val=1.0, fake_label_val=0.0, loss_weight=2.0) gan_loss = build_loss(loss_cfg) loss = gan_loss(input_2, True, is_disc=False) assert_almost_equal(loss.item(), -4.0) loss = gan_loss(input_2, False, is_disc=False) assert_almost_equal(loss.item(), -4.0) loss = gan_loss(input_2, True, is_disc=True) assert_almost_equal(loss.item(), 0.0) loss = gan_loss(input_2, False, is_disc=True) assert_almost_equal(loss.item(), 3.0)
5,324,006
def migrate(engine: Engine) -> None: """Ensures that the connected database has all the database objects. Very simple migration. Does not handle changing the models currently. Parameters ---------- engine: Engine SQLAlchemy engine. """ Base.metadata.create_all(engine)
5,324,007
def find_next_prime(N: int) -> int: """Find next prime >= N Parameters ---------- N : int Starting point to find the next prime >= N. Returns ------- int the next prime found after the number N """ def is_prime(n): if n % 2 == 0: return False i = 3 while i * i <= n: if n % i != 0: i += 2 else: return False return True if N < 3: return 2 if N % 2 == 0: N += 1 for n in range(N, 2 * N, 2): if is_prime(n): return n
5,324,008
def in_16(library, session, space, offset, extended=False): """Reads in an 16-bit value from the specified memory space and offset. Corresponds to viIn16* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ value_16 = ViUInt16() if extended: ret = library.viIn16Ex(session, space, offset, byref(value_16)) else: ret = library.viIn16(session, space, offset, byref(value_16)) return value_16.value, ret
5,324,009
def match_head(subject, pattern): """Checks if the head of subject matches the pattern's head.""" if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) assert subject_head is not None return issubclass(subject_head, pattern_head)
5,324,010
def update(number): """ update() : Update document in Firestore collection with request body. Ensure you pass a custom ID as part of json body in post request, e.g. json={'id': '1', 'title': 'Write a blog post today'} """ try: todo_ref = user_ref.document(number).collection("todos") id = request.json['id'] todo_ref.document(id).update(request.json) all_todos = [doc.to_dict() for doc in todo_ref.stream()] return jsonify(all_todos), 200 except Exception as e: return f"An Error Occured: {e}"
5,324,011
def test_using_name_in_text_without_explicit_timer(capsys): """Test that the name of the timer can be referenced in the text""" name = "NamedTimer" with Timer(name=name, text="{name}: {:.2f}"): waste_time() stdout, stderr = capsys.readouterr() assert re.match(f"{name}: " + r"0\.\d{2}", stdout)
5,324,012
def sfn_result(session, arn, wait=10): """Get the results of a StepFunction execution Args: session (Session): Boto3 session arn (string): ARN of the execution to get the results of wait (int): Seconds to wait between polling Returns: dict|None: Dict of Json data or None if there was an error getting the failure output """ client = session.client('stepfunctions') while True: resp = client.describe_execution(executionArn = arn) if resp['status'] != 'RUNNING': if 'output' in resp: return json.loads(resp['output']) else: resp = client.get_execution_history(executionArn = arn, reverseOrder = True) event = resp['events'][0] for key in ['Failed', 'Aborted', 'TimedOut']: key = 'execution{}EventDetails'.format(key) if key in event: return event[key] return None else: time.sleep(wait)
5,324,013
def forum_latest_user_posts(parser, token): """ {% forum_latest_user_posts user [number] as [context_var] %} """ bits = token.contents.split() if len(bits) not in (2, 3, 5): raise TemplateSyntaxError('%s tag requires one, two or four arguments' % bits[0]) if bits[3] != 'as': raise TemplateSyntaxError("Second argument to %s tag must be 'as'" % bits[0]) if not bits[2]: bits[2] = 5 # Default number of items if not bits[3]: bits[4] = 'latest_user_posts' return ForumLatestUserPostsNode(bits[1], bits[2], bits[4])
5,324,014
def gene_symbol_writeout(outdir, gene_set): """write out file with gene symbols that are targeted :param outdir: absolute filepath to output directory :param gene_set: first output of determine_gene_targets() :return: writes out a file with gene symbols that are targeted """ with open('%s/gene_set.txt' % (outdir), 'w') as outfile: for gene in gene_set: outfile.write('%s\n' % gene) sys.stdout.write('gene symbols for targeted genes written out\n')
5,324,015
def parse_args(args): """Parse command line arguments. """ parser = argparse.ArgumentParser(description='YouTube Subscription Search') parser.add_argument( '-s', '--secrets-file', default='client_id.json', help='Client secret file. See README.md on how to get this file.') parser.add_argument( '-p', '--set-playlist', action='store_true', help='Set the playlist to save videos to.') parser.add_argument( '-P', '--just-set-playlist', action='store_true', help='Just set playlist to save videos to and exit.') parser.add_argument( '-r', '--refresh-subscriptions', action='store_true', help='Force a refresh of subscriptions, and search subs.') parser.add_argument( '-R', '--just-refresh-subscriptions', action='store_true', help='Refresh subscriptions, and do not search subs.') parser.add_argument( '-v', '--verbose', action='store_true', help='Verbose output') parser.add_argument( '-d', '--debug', action='store_true', help='Debug output') return parser.parse_args(args)
5,324,016
def test_irc_nickname_strings(test_input, rescue_plain_fx: Rescue): """ Verifies the irc nickname can be set when passed a string Args: test_input (str): values to test rescue_plain_fx (Rescue): Rescue fixture """ rescue_plain_fx.irc_nickname = test_input assert rescue_plain_fx.irc_nickname == test_input
5,324,017
def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0, mode='train', **kwargs): """Pure transformer-style multi-headed attention. Args: x: inputs ((q, k, v), mask) params: parameters (none) num_heads: int: number of attention heads dropout: float: dropout rate mode: str: 'train' or 'eval' **kwargs: other arguments including the rng Returns: Pure Multi-headed attention result, and the mask. """ del params rng = kwargs.get('rng', None) (q, k, v), mask = x feature_depth = q.shape[-1] assert feature_depth % num_heads == 0 head_depth = feature_depth // num_heads nbatch = np.shape(q)[0] # nbatch, seqlen, feature_depth --> nbatch, num_heads, seqlen, head_depth def SplitHeads(x): return np.transpose( np.reshape(x, (nbatch, -1, num_heads, head_depth)), (0, 2, 1, 3)) # nbatch, num_heads, seqlen, head_depth --> nbatch, seqlen, feature_depth def JoinHeads(x): # pylint: disable=invalid-name return np.reshape( np.transpose(x, (0, 2, 1, 3)), (nbatch, -1, num_heads*head_depth)) # Split heads, dot-product attention, rejoin heads. res = JoinHeads( DotProductAttention( SplitHeads(q), SplitHeads(k), SplitHeads(v), mask, dropout=dropout, mode=mode, rng=rng)) return res, mask # Keep the mask.
5,324,018
def validate_options(options): """ Validate the options and return bool. :param options: options to validate :type options: dict :rtype: bool """ pywikibot.log('Options:') notice_keys = [ 'email_subject', 'email_subject2', 'email_text', 'email_text2', 'note_summary', 'note_summary2', 'note_text', 'note_text2', ] required_keys = notice_keys + ['date', 'exclusions'] has_keys = [] result = True for key, value in options.items(): pywikibot.log(f'-{key} = {value}') if key in required_keys: has_keys.append(key) if key == 'date': if not isinstance(value, date): result = False elif key == 'exclusions': if isinstance(value, str): options[key] = [value] elif not isinstance(value, list): result = False else: for item in value: if not isinstance(item, str): result = False elif key == 'max_attempts': try: options[key] = int(value) except ValueError: result = False elif key in notice_keys: if not isinstance(value, str): result = False else: result = False pywikibot.log(f'\u2192{key} = {options[key]}') if sorted(has_keys) != sorted(required_keys): result = False return result
5,324,019
def verify() -> bool: """Verify access to the NFVIS Device.""" print("==> Verifying access to the NFVIS Device Environment.") nip, url, login, password = nvfis_getgcred() s = requests.Session() s.auth = (login, password) s.headers = ({'Content-type': 'application/vnd.yang.data+json', 'Accept': 'application/vnd.yang.data+json'}) s.verify = False # Test: Is device pingable response = os.system( "ping -c 2 {} >> nfvis_tests.txt".format(nip) ) # and then check the response... if response == 0: pingstatus = "Ping Success" print(" " + pingstatus) else: pingstatus = "Ping Failed" print(" " + pingstatus) return False # Test: Is the REST API running r = nfv_get_networks_configuration(s, url) if r.status_code == 200: print(" REST API Success") else: print(" REST API Failed") return False print("Tests complete.\n") return True
5,324,020
def main( domain: InnerEnv, planner: planning_types.Planner, belief: belief_types.Belief, runs: int, logging_level: str, ) -> List[Dict[str, Any]]: """plan online function of online planning Handles calling :func:`episode` :: for r in runs: rewards = episode(domain, planner, belief_updat) In the episode actions are taken in ``domain`` according to the ``planner``, which uses a belief maintained by ``belief``. Returns a one-dimensional list of dictionaries. This is a flat concatenation of the results returned by :func:`episode`. Each entry (dict) has a key "run" that indicates the number o fthe run it was produced. :param domain: :param planner: :param belief: :param runs: :param logging_level: :return: flat concatenation of the results of each episode """ utils.set_logging_options(logging_level) logger = logging.getLogger("plan-online") logger.info("starting %s run(s)", runs) output: List[Dict[str, Any]] = [] for run in range(runs): belief.distribution = domain.functional_reset episode_output = episode( planner=planner, belief=belief, domain=domain, ) # here we explicitly add the information of which run the result was # generated to each entry in the results for o in episode_output: o["run"] = run # extend -- flat concatenation -- of our results output.extend(episode_output) logger.info( "run %s/%s terminated: r(%s)", run + 1, runs, utils.discounted_return([t["reward"] for t in episode_output], 0.95), ) return output
5,324,021
def convert_to_target(filepath, data_ann_path_out, target_map): """ Saves the image as a .png in the data path """ if not os.path.isfile(filepath): print("No such file found: ", filepath) return False ann_file = os.path.basename(filepath) # open the image mask = target_map.read(filepath) source_labels = set(np.unique(mask)) num_added = 0 new_mask = np.ones_like(mask) * target_map.UNLABELLED for cat_id in source_labels: # skip the category if not in the mapping if not cat_id in target_map.MAP: continue cat_mask = mask == cat_id new_mask[cat_mask] = target_map.MAP[cat_id] num_added += 1 if num_added == 0: return False im = Image.fromarray(new_mask).convert("L") im.save(os.path.join(data_ann_path_out, ann_file)) return True
5,324,022
def mvrth(value): """ relative move kphi by value """ yield from _quickmove_rel_plan(value,arpes_motors.m4)
5,324,023
def format_color( color: Union[ColorInputType, Any], warn_if_invalid: bool = True ) -> Union[ColorType, Any]: """ Format color from string, int, or tuple to tuple type. Available formats: - Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py) - HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided - Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided - int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the aa (alpha) value is not optional for the int format and must be provided - tuple/list of int color values: ``(R, G, B, A)`` or ``(R, G, B)``, where R, G, B, and A are int values in the range of ``0`` to ``255`` inclusive, the A (alpha) value defaults to ``255`` (opaque) if not provided :param color: Color to format. If format is valid returns the same input value :param warn_if_invalid: If ``True`` warns if the color is invalid :return: Color in (r, g, b, a) format """ if not isinstance(color, ColorInputInstance): return color if not isinstance(color, pygame.Color): try: if isinstance(color, VectorInstance) and 3 <= len(color) <= 4: if PYGAME_V2: for j in color: if not isinstance(j, int): raise ValueError('color cannot contain floating point values') c = pygame.Color(*color) else: c = pygame.Color(color) except ValueError: if warn_if_invalid: warn(f'invalid color value "{color}"') else: raise return color else: c = color return c.r, c.g, c.b, c.a
5,324,024
def is_yaml_requested( content_type: str = None, proto: ExtendedProto = None, path_suffix: Optional[str] = None, ) -> bool: """Checks whether YAML is requested by the user, depending on params.""" is_yaml = False if content_type is not None: is_yaml = ("yaml" in content_type) or ("yml" in content_type) if proto is not None: is_yaml = is_yaml or (proto == "yaml") if path_suffix is not None: is_yaml = is_yaml or (path_suffix in [".yaml", ".yml"]) return is_yaml
5,324,025
def _pqs_in_range(dehn_pq_limit, num_cusps): """ Return an iterator. This iterator, at each step, returns a tuple. The contents of this tuple are num_cusps other tuples, and each of these is of the form (p,q), where 0 <= p <= dehn_pq_limit, -dehn_pq_limit <= q <= dehn_pq_limit, and gcd(p,q) <= 1. Ex: pqs_in_range(3, 2) returns ((-3, 1), (-3, 1)), ((-3, 1), (-3, 2)), ((-3, 1), (-2, 1)), ((-3, 1), (-2, 3)), ((-3, 1), (-1, 0)), ... ((3, 2), (2, 1)) ((3, 2), (2, 3)) ((3, 2), (3, 1)) ((3, 2), (3, 2)) """ pqs = list() for p in range(-1 * dehn_pq_limit, dehn_pq_limit + 1): for q in range(0, dehn_pq_limit + 1): if abs(gcd(p,q)) <= 1: pqs.append((p,q)) # pqs_mult = [ pqs, pqs, pqs... ] # because we wish to return pqs x pqs x pqs ... x pqs pqs_mult = list() for i in range(0, num_cusps): pqs_mult.append(pqs) return product(*pqs_mult)
5,324,026
def _units_defaults(calendar, has_year_zero=None): """ Set calendar specific default units as 'days since reference_date' Day 0 of *excel* and *excel1900* starts at 1899-12-31 00:00:00. Day 0 of *excel1904* starts at 1903-12-31 00:00:00. Decimal calendars *decimal*, *decimal360*, *decimal365*, and *decimal366* do not need units so 0001-01-01 00:00:00 is taken. Day 0 of *julian*, *gregorian* and *standard* starts at -4713-01-01 12:00:00 if not has_year_zero, and at -4712-01-01 12:00:00 if has_year_zero. Day 0 of *proleptic_gregorian* starts at -4714-11-24 12:00:00 if not has_year_zero, and at -4713-11-24 12:00:00 if has_year_zero. Day 0 of *360_day*, *365_day*, *366_day*, *all_leap*, and *noleap* starts at 0000-01-01 12:00:00. Parameters ---------- calendar : str One of the supported calendar names in *_cfcalendars* and *_noncfcalendars* has_year_zero : bool, optional Astronomical year numbering is used, i.e. year zero exists, if True and possible for the given *calendar*. If *None* (default), calendar-specific defaults are assumed. Returns ------- str 'days since reference_date' with calendar-specific reference_date Examples -------- >>> print(_units_defaults('Excel')) 'days since 1899-12-31 00:00:00' """ calendar = calendar.lower() if has_year_zero is None: has_year_zero = _year_zero_defaults(calendar) if calendar in ['standard', 'gregorian', 'julian']: if has_year_zero: return 'days since -4712-01-01 12:00:00' else: return 'days since -4713-01-01 12:00:00' elif calendar in ['proleptic_gregorian']: if has_year_zero: return 'days since -4713-11-24 12:00:00' else: return 'days since -4714-11-24 12:00:00' elif calendar in _idealized_cfcalendars: return 'days since 0000-01-01 12:00:00' elif calendar in ['excel', 'excel1900']: return 'days since 1899-12-31 00:00:00' elif calendar in ['excel1904']: return 'days since 1903-12-31 00:00:00' elif calendar in _decimalcalendars: return 'days since 0001-01-01 00:00:00' else: raise ValueError(f'Unknown calendar: {calendar}')
5,324,027
def test_use_mount(aggregator, instance_basic_mount, gauge_metrics, rate_metrics): """ Same as above, using mount to tag """ c = Disk('disk', None, {}, [instance_basic_mount]) c.check(instance_basic_mount) for name, value in iteritems(gauge_metrics): aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_MOUNT_POINT)]) for name, value in iteritems(rate_metrics): aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)]) aggregator.assert_all_metrics_covered()
5,324,028
def submit_a_feed(request): """ 用户添加一个自定义的订阅源 """ feed_url = request.POST.get('url', '').strip()[:1024] user = get_login_user(request) if feed_url: host = get_host_name(feed_url) if host in settings.ALLOWED_HOSTS: rsp = add_self_feed(feed_url) elif settings.QNMLGB_HOST in host: rsp = add_qnmlgb_feed(feed_url) else: # 区分播客还是普通 RSS feed_obj = feedparser.parse(feed_url) if is_podcast_feed(feed_obj): rsp = add_postcast_feed(feed_obj) else: rsp = add_atom_feed(feed_obj) if rsp: logger.warning(f"有新订阅源被提交:`{feed_url}") set_active_site(rsp['site']) # 已登录用户,自动订阅 if user: add_user_sub_feeds(user.oauth_id, [rsp['site'], ]) # if rsp.get('creator') == 'user': # # 新增的普通 RSS 才触发异步更新任务 # django_rq.enqueue(update_sites_async, [rsp['site'], ], result_ttl=1, ttl=3600, failure_ttl=3600) return JsonResponse(rsp) else: logger.warning(f"RSS 解析失败:`{feed_url}") return HttpResponseNotFound("Param Error")
5,324,029
def check_existing_credendtials(account_Name): """ Function that check if a Credentials exists with that account name and return a Boolean """ return Credentials.credential_exist(account_Name)
5,324,030
def hash_paths(paths, log_interval): """Returns a map of the base64 hash to the filename for all paths in path.""" output = {} count_since_log = 0 for path in paths: output[hash_file(path)] = os.path.basename(path) count_since_log += 1 if count_since_log >= log_interval: print("Hashed {} of {} files: {}".format(len(output), len(paths), os.path.basename(path))) count_since_log = 0 return output
5,324,031
def scmplx(p,a,b): """ p is a string designating a type, either scalar_f or scalar_d. """ if p == 'scalar_f': return vsip_cmplx_f(a,b) elif p == 'scalar_d': return vsip_cmplx_d(a,b) else: assert False,'Type %s not defined for cmplx.'%p
5,324,032
def ips_description(request): """See :class:`bgpranking.api.get_ips_descs`""" asn = request.get('asn') block = request.get('block') if asn is None or block is None: return json.dumps({}) return json.dumps(bgpranking.get_ips_descs(asn, block, request.get('date'), request.get('sources')))
5,324,033
def handle_msg(msg: dict) ->list: """ Handler for message request object. Logs message and returns list of responses.""" msg_alert(msg['From'], msg['Body']) msg, lol = parse_msg(msg) if lol is not None: resp = lol elif lol is None: resp = get_response(msg) log_msg = [ {'From': msg['From']}, {'Message': msg['Body']}, ] return(resp)
5,324,034
def create_app(register_blueprints=True): """Function to instantiate, configure, and return a flask app""" app = Flask(__name__, instance_relative_config=True) app.config.from_object('app.default_config') # default config # app.config.from_pyfile('application.cfg.py') # server config file, do not include in versioning db.init_app(app) api = Api(app) api.add_resource(UserList, '/api/users') if register_blueprints: register_blueprints_on_app(app) return app
5,324,035
def RoleAdmin(): """超级管理员""" return 1
5,324,036
def _get_proj_info(): """Information on system PROJ Returns ------- proj_info: dict system PROJ information """ import pyproj from pyproj.exceptions import DataDirError try: data_dir = pyproj.datadir.get_data_dir() except DataDirError: data_dir = None blob = [ ("pyproj", pyproj.__version__), ("PROJ", pyproj.proj_version_str), ("data dir", data_dir), ] return dict(blob)
5,324,037
def run_program(intcodes): """run intcodes, which are stored as a dict of step: intcode pairs""" pc = 0 last = len(intcodes) - 1 while pc <= last: if intcodes[pc] == 1: # add if pc + 3 > last: raise Exception("out of opcodes") arg1 = intcodes[pc + 1] arg2 = intcodes[pc + 2] dest = intcodes[pc + 3] intcodes[dest] = intcodes[arg1] + intcodes[arg2] pc += 4 elif intcodes[pc] == 2: # multiply if pc + 3 > last: raise Exception("out of opcodes") arg1 = intcodes[pc + 1] arg2 = intcodes[pc + 2] dest = intcodes[pc + 3] intcodes[dest] = intcodes[arg1] * intcodes[arg2] pc += 4 elif intcodes[pc] == 99: # end program return intcodes else: # invalid raise Exception("invalid opcode: {}".format(intcodes[pc])) # should never reach this point (only if end is reached before program # stop instruction) raise Exception("ran out of intcodes before program stop reached")
5,324,038
def concat(lst, cat_symb=None, append_to_end=False): """Concatenates `lst` of Tensors, optionally with a join symbol. Args: lst: list of Tensors to concatenate. cat_symb: concatenation symbol. append_to_end: if set to ``True``, it will add the `cat_symb` to the end of the concatenated sequence. Returns: cat_tens (Tensor): concatenated tensor where sub-tensors are separated by 'cat_symb'. """ assert isinstance(lst, list) if cat_symb is not None: new_lst = [] if not isinstance(cat_symb, list): cat_symb = [cat_symb] cat_symb = T.tensor(cat_symb).to(lst[0].device) for indx, e in enumerate(lst): new_lst.append(e) if indx == len(lst)-1 and not append_to_end: continue new_lst.append(cat_symb) else: new_lst = lst cat_tens = T.cat(new_lst) return cat_tens
5,324,039
async def get_the_pointer_there(): """ - Figure out where we are currently pointing (read acc/mag) - Calculate diff with forward azimuth - Turn the pointer """ for cycle in range(STEPPER_MOVES_PER_CYCLE): acc_x, acc_y, acc_z = accel.acceleration # m/s^2 mag_x, mag_y, mag_z = mag.magnetic # micro-Teslas #print('Acceleration (m/s^2): ({0:10.3f}, {1:10.3f}, {2:10.3f})'.format(acc_x, acc_y, acc_z)) #print('Magnetometer (mcr-T): ({0:10.3f}, {1:10.3f}, {2:10.3f})'.format(mag_x, mag_y, mag_z)) acc_norm = math.sqrt(acc_x * acc_x + acc_y * acc_y + acc_z * acc_z) pitch = math.asin(acc_x/acc_norm) roll = math.asin(acc_y/acc_norm) #print('Pitch : {}'.format(math.degrees(pitch))) #print('Roll : {}'.format(math.degrees(roll))) # Could normalize mag vals as above but ehhh # Tilt-compensated magnetic sensor measurements tilt_mag_x = mag_x * math.cos(pitch) - mag_z * math.sin(pitch) tilt_mag_y = mag_y * math.cos(roll) - mag_z * math.sin(roll) #print('Tilt-comp mag : ({0:10.3f}, {1:10.3f})'.format(tilt_mag_x, tilt_mag_y)) heading = math.degrees(math.atan2(tilt_mag_y, tilt_mag_x)) heading = 360 + heading if heading < 0 else heading print('Heading: {}'.format(heading)) # Difference diff = FWD_AZMT - heading diff = diff % 360 diff = diff - 360 if diff > 180 else diff print("Diff: {0:.6f} degrees".format(diff)) # Move the pointer if diff < 0 - TOLERANCE: print("Go anti-clockwise!") await step_anticlockwise(STEPPER_STEPS) elif diff > 0 + TOLERANCE: print("Go clockwise!") await step_clockwise(STEPPER_STEPS) else: print("You're in range... FOLLOW THAT ARROWWWWWWW")
5,324,040
def find_mcs(mols): """Function to count the number of molecules making ito the end of the test""" out_mols = ROMol_Vect() while mols.hasNext(): molobj = mols.next() rdmol, molobj = get_or_create_rdmol(molobj) # Add this mol to that vector out_mols.add(rdmol) # Now find the MCS mcs=RDKFuncs.findMCS(out_mols)#,True,1,60,False,False,False,False,AtomComparator.AtomCompareElements,BondComparator.BondCompareAny) # Now just return the count return mcs.getSmartsString()
5,324,041
def save_csv(df, data_dir, filename): """ Saves a pandas DataFrame df inside a data_dir folder with a filename.csv extension Robust for all OS because of pathlib module """ import pandas as pd from pathlib import Path df.to_csv(Path(data_dir).joinpath(filename), index=False)
5,324,042
def build_karel3(): """ This function builds the third karel """ add = 2 head = GOval(80, 55, x=190 + 120 * add, y=167) head.filled = True head.color = 'black' head.fill_color = 'gray' window.add(head) r_eye = GRect(13, 13, x=212 + 120 * add, y=189) r_eye.filled = True r_eye.color = 'black' r_eye.fill_color = 'blue' window.add(r_eye) l_eye = GRect(13, 13, x=235 + 120 * add, y=189) l_eye.filled = True l_eye.color = 'black' l_eye.fill_color = 'blue' window.add(l_eye) hands = GRect(105, 45, x=177 + 120 * add, y=237) hands.filled = True hands.color = 'black' hands.fill_color = 'lime' window.add(hands) body_1 = GRect(60, 65, x=201 + 120 * add, y=223) body_1.filled = True body_1.color = 'black' body_1.fill_color = 'blue' window.add(body_1) body_2 = GRect(80, 60, x=190 + 120 * add, y=230) body_2.filled = True body_2.color = 'black' body_2.fill_color = 'blue' window.add(body_2) r_foot = GOval(29, 24, x=190 + 120 * add, y=290) r_foot.filled = True r_foot.color = 'black' r_foot.fill_color = 'red' window.add(r_foot) l_foot = GOval(29, 24, x=241 + 120 * add, y=290) l_foot.filled = True l_foot.color = 'black' l_foot.fill_color = 'red' window.add(l_foot) label = GOval(22, 22, x=218+120*add, y=130) label.filled = True label.fill_color = 'goldenrod' label.color = 'goldenrod' window.add(label)
5,324,043
def to_numpy(tensor): """Convert 3-D torch tensor to a 3-D numpy array. Args: tensor: Tensor to be converted. """ return tensor.transpose(0, 1).transpose(1, 2).clone().numpy()
5,324,044
def calib_constants(det, exp=None, ctype='pedestals', run=None, time_sec=None, vers=None, url=cc.URL) : """Returns calibration constants and document with metadata for specified parameters. To get meaningful constants, at least a few parameters must be specified, e.g.: - det, ctype, time_sec - det, ctype, version - det, exp, ctype, run - det, exp, ctype, time_sec - det, exp, ctype, run, version etc... """ db_det, db_exp, colname, query = dbnames_collection_query(det, exp, ctype, run, time_sec, vers) logger.debug('get_constants: %s %s %s %s' % (db_det, db_exp, colname, str(query))) dbname = db_det if exp is None else db_exp doc = find_doc(dbname, colname, query, url) if doc is None : logger.warning('document is not available for query: %s' % str(query)) return (None, None) return (get_data_for_doc(dbname, colname, doc, url), doc)
5,324,045
def decode(src, dst): """Decodes media file at src and stores it at dst""" cmd = f""" ffmpeg -y -hide_banner -nostats -v warning -i {src} -c:v rawvideo -an {dst} """ try: subprocess.check_call(shlex.split(cmd)) except subprocess.CalledProcessError as err: raise DecodeFailed(f"Failed to decode '{src}' - {err}")
5,324,046
def check_a_better_than_b(model_a, model_b, X_y_func, metric): """Trains two models and checks that model_a does better than model_b.""" from .. import model_selection metric_a = model_selection.online_score( X_y=X_y_func(), model=model_a, metric=copy.deepcopy(metric) ) metric_b = model_selection.online_score( X_y=X_y_func(), model=model_b, metric=copy.deepcopy(metric) ) if metric.bigger_is_better: assert metric_a.get() > metric_b.get() else: assert metric_a.get() < metric_b.get()
5,324,047
def is_cluster_healthy(admin, zk, retries=10, retry_wait=30): """Return true if cluster is healthy.""" retries_left = retries while retries_left: md = _request_meta(admin) if md is not None and not _unhealthy(md, zk): logger.info("Cluster is healthy!") return True else: logger.warning( "Cluster is not healthy, retries left/total = %d/%d.", retries_left, retries, ) time.sleep(retry_wait) retries_left = retries_left - 1 logger.error("Cluster was found to be un-healthy after multiple retries.") return False
5,324,048
def load_data(loc): """ Load in the csv file """ df = pd.read_csv(loc, engine = "python", encoding = "utf-8") df.fillna("") df = np.asarray(df) return df
5,324,049
def listdir(folder, suffix): """ Output the path of files in the folder with specific suffix""" list_path = [] for root, _, files in os.walk(folder, followlinks=True): for f in files: if f.endswith(suffix): list_path.append(osp.join(root, f)) return list_path
5,324,050
def mqtt_subscribe_cb(topic: str, msg: str): """ Callback method that is executed anytime an MQTT message is received. Args: topic (str): The MQTT topic that was received msg (str): The MQTT message that was received """ print((topic, msg)) if topic == b'notification' and msg == b'received': print('ESP received hello message')
5,324,051
def book_meta_data(book_link, book_title, author_name): """ book_link: Direct link to the book book_title: Title of the book author_name: Name of the author """ global soup i, retries = 0, 3 # fetching the metadata for each of the books try: print("Fetching book metadata...") request = urlopen( Request( book_link, headers={ "Connection": "close", "User-Agent": get_user_agent()})).read() soup = BeautifulSoup(request, 'html.parser') except HTTPError as e: print(e) except URLError: if i + 1 == retries: print('The server could not be found!') else: time.sleep(42) else: print("Metadata Fetched!") print("=" * 40) metadict = {} # dictionary to hold the fetched details columns = [ "Categories", "File", "ISBN", "Language", "Pages", "Publisher", "Year"] try: # fetch the book details by requesting direct link of books for child in soup.find('div', {'class': 'bookDetailsBox'}).children: # check if the parsed content (child) is NavigableString if isinstance(child, NavigableString): continue # check if the parsed content (child) is Tag if isinstance(child, Tag): # get the keys and values metadata = soup.find('div', {'class': child['class'][1]}).get_text().split() # create a dictionary metadict[metadata[0].replace(":", "")] = metadata[1] except AttributeError: print("No Children found!") # removing ISBN as it is not useful here if 'ISBN' in metadict.keys(): del metadict['ISBN'] if 'ISBN' in columns: columns.remove('ISBN') # sorting the dictionary by keys metadict = dict(sorted(metadict.items())) """ logic to add NaN values """ # check if the length of columns list and fetched data keys are same if len(columns) != len(metadict.keys()): # if not then find the columns which are not there not_present = set(columns) - set(metadict.keys()) # added them to dictionary with None values for col in not_present: metadict[col] = 'NaN' # sort the data again for sanity metadict = OrderedDict(sorted(metadict.items())) # cleaning 'File' column metadict['File'] = metadict['File'].replace(",", "") # cleaning 'Categories' column if 'Categories' in metadict.keys(): metadict['Categories'] = metadict['Categories'].replace("\\\\", ",") # adding author column to the front of the dictionary metadict = OrderedDict([('Author', author_name)] + list(metadict.items())) # adding title column to the front of the dictionary metadict = OrderedDict([('Title', book_title)] + list(metadict.items())) csv_file = "metadata.csv" # name of the csv file print("Creating Excel File... Adding row...") # open the file in append mode and write the dictionary content try: with open(csv_file, 'a+', newline='') as csvfile: writer = csv.DictWriter( csvfile, fieldnames=metadict.keys(), delimiter=',') if csvfile.tell() == 0: writer.writeheader() writer.writerow(metadict) except IOError: print("I/O error") print("Row Added!") print("=" * 40) # call the download function download(book_link)
5,324,052
def clif_deps_to_cclibs(labels): """Gets the cc_library name for each of label as a list.""" return [_clif_to_lib(name, PYCLIF_CC_LIB_SUFFIX) for name in labels]
5,324,053
def _get_cache_filename(year=2020): """Returns the `Path` to the COBS data file for a given year.""" return CACHEDIR / f'cobs{year}.feather'
5,324,054
def get_critical_hours_end(critical_ffmc: float, solar_noon_ffmc: float, critical_hour_start: float): """ Returns the hour of day (on 24H clock) at which the hourly FFMC drops below the threshold of critical_ffmc. Should only be called if critical_hour_start is not None. """ if critical_hour_start is None: return None if critical_hour_start < 13: # if critical_hour_start is in the morning, we know that based on the diurnal curve, # the critical hour is going to extend into the afternoon, so set clock_time to then clock_time = 14.0 else: clock_time = critical_hour_start + 1.0 # increase time in increments of 1 hours while get_afternoon_overnight_diurnal_ffmc(clock_time, solar_noon_ffmc) >= critical_ffmc: clock_time += 1.0 if clock_time >= 32: # break if clock_time is now 08:00 of the next day break # subtract the hour that caused FFMC to drop below critical_ffmc clock_time -= 1.0 if clock_time >= 24.0: clock_time = clock_time - 24.0 return clock_time
5,324,055
def dolpc(x, model_order=8): """ Function dolpc computes the autoregressive model from spectral magnitude samples. @param x: Critical band filters. @param model_order: Order of model. Default is 8. @returns: Autoregressive model from spectral magnitude samples. """ num_bands, num_frames = x.shape # Calculate autocorrelation R = np.zeros((2 * (num_bands - 1), num_frames)) R[0:num_bands, :] = x for i in range(num_bands - 1): R[i + num_bands - 1, :] = x[num_bands - (i + 1), :] r = fft.ifft(R.T).real.T r = r[0:num_bands, :] y = np.ones((num_frames, model_order + 1)) e = np.zeros((num_frames, 1)) # Find LPC coeffs by durbin if model_order == 0: for i in range(num_frames): _ , e_tmp, _ = spectrum.LEVINSON(r[:, i], model_order, allow_singularity = True) e[i, 0] = e_tmp else: for i in range(num_frames): y_tmp, e_tmp, _ = spectrum.LEVINSON(r[:, i], model_order, allow_singularity = True) y[i, 1:model_order + 1] = y_tmp e[i, 0] = e_tmp # Normalize each poly by gain. y = np.divide(y.T, np.add(np.tile(e.T, (model_order + 1, 1)), 1e-8)) return y
5,324,056
def filter_domains(domains, by="evalue", coverage_pct=0.5, tolerance_pct=0.1): """Filter overlapping Domain objects and test adjcency rules. Adjacency rules are tested again here, in case they are missed within overlap groups. For example, the NRPS-para261 domain is not always entirely contained by a condensation domain, so should be caught by this pass. Parameters: domains (list): Domain instances to be filtered by (str): Metric used to choose representative domain hit (def. 'evalue') coverage_pct (float): Conserved domain coverage percentage threshold tolerance_pct (float): CD length tolerance percentage threshold Returns: list: Domain objects remaining after filtering """ domains = [ choose_representative_domain(group, by) for group in group_overlapping_hits(domains) ] i, total = 1, len(domains) while i < total: if i + 1 == total: break previous, current = domains[i - 1 : i + 1] # When domains are likely together, e.g. two small C domain hits right next # to each other or multiple Methyltransf_X domains, extend its border if previous.type == current.type and is_fragmented_domain( previous, current, coverage_pct, tolerance_pct ): previous.end = current.end del domains[i] continue i += 1 return domains
5,324,057
def _replicate_and_maybe_restore_latest_checkpoint( unreplicated_optimizer_state, unreplicated_params, unreplicated_batch_stats, unreplicated_training_metrics_grabber, train_dir, use_deprecated_checkpointing): """Restore from the latest checkpoint, if it exists.""" uninitialized_global_step = -1 unreplicated_checkpoint_state = dict( params=unreplicated_params, optimizer_state=unreplicated_optimizer_state, batch_stats=unreplicated_batch_stats, training_metrics_grabber=unreplicated_training_metrics_grabber, global_step=uninitialized_global_step, preemption_count=0, sum_train_cost=0.0) latest = checkpoint.load_latest_checkpoint( train_dir, target=unreplicated_checkpoint_state, recents_filename='latest', use_deprecated_checkpointing=use_deprecated_checkpointing) found_checkpoint = ( latest and latest['global_step'] != uninitialized_global_step) optimizer_state = jax_utils.replicate(unreplicated_optimizer_state) params = jax_utils.replicate(unreplicated_params) batch_stats = jax_utils.replicate(unreplicated_batch_stats) training_metrics_grabber = jax_utils.replicate( unreplicated_training_metrics_grabber) if not found_checkpoint: return ( optimizer_state, params, batch_stats, training_metrics_grabber, 0, # global_step 0.0, # sum_train_cost 0, # preemption_count False) # is_restored pytree_dict, extra_state = restore_checkpoint( latest, pytree_keys=[ 'optimizer_state', 'params', 'batch_stats', 'training_metrics_grabber', ], use_deprecated_checkpointing=use_deprecated_checkpointing) return ( pytree_dict['optimizer_state'], pytree_dict['params'], pytree_dict['batch_stats'], pytree_dict['training_metrics_grabber'], extra_state['global_step'], extra_state['sum_train_cost'], extra_state['preemption_count'], True)
5,324,058
def minkowskiSum(obj1, obj2): """ Minkowski sum of two polygon objects Args: obj1, obj2: (n,2) array of corner point Return: poly: (n,2) array of minkowski polygon vertices centered at (0, 0) bound: [min_x, min_y] max/min signed distances from vertices [max_x, max_y] to center of polygon """ # assert obj1.ndim == 2 and obj1.shape[1] == 2 # assert obj2.ndim == 2 and obj2.shape[1] == 2 poly = np.array([], dtype=np.float).reshape(0, 2) for p1 in obj1: for p2 in obj2: poly = np.vstack([poly, np.array([p1+p2])]) poly00 = poly - np.mean(obj1, axis=0) - np.mean(obj2, axis=0) hull = ConvexHull(poly00) poly = poly00[hull.vertices] bound = {'max': hull.max_bound, 'min': hull.min_bound} # assert poly.ndim == 2 and poly.shape[1] == 2 return poly, bound
5,324,059
def get_rotated_image_from_contour(img, contour, rotation=90): """ Returns a rotated version of img based on cv2.minAreaRect of contour. First side, (i.e most left to top edge) is always "Width" from minAreaRect. If our width > height, we know we have the sheet rotated to the right. We need to shift our angle and width 90 degree in order to get the correct coordinates from cv2.boxPoints """ rotated_rect = cv2.minAreaRect(contour) # Get the center x,y and width and height. x_center = int(rotated_rect[0][0]) y_center = int(rotated_rect[0][1]) width = int(rotated_rect[1][0]) height = int(rotated_rect[1][1]) angle_degrees = rotated_rect[2] if(width > height): temp_height = height height = width width = temp_height angle_degrees = 90 + angle_degrees # Reassign rotated rect with updated values rotated_rect = ((x_center, y_center), (width, height), angle_degrees) # Find the 4 (x,y) coordinates for the rotated rectangle, order: bl, tl,tr, br rect_box_points = cv2.boxPoints(rotated_rect) img_debug_contour = img.copy() cv2.drawContours(img_debug_contour, [contour], 0, (0, 0, 255), 3) show_window('biggest_contour', img_debug_contour) img_debug = img.copy() cv2.drawContours(img_debug, [np.int0(rect_box_points)], 0, (0, 255, 255), 3) # show_window('min_area_rect_original_image', img_debug) # Prepare for rotation transformation src_pts = rect_box_points.astype("float32") dst_pts = np.array([ [0, height-1], # Bottom Left [0, 0], # Top Left [width-1, 0], # Top Right ], dtype="float32") # Affine rotation transformation ROTATION_MAT = cv2.getAffineTransform(src_pts[:3], dst_pts) rotated = cv2.warpAffine( img, ROTATION_MAT, (width, height)) return ndimage.rotate(rotated, rotation)
5,324,060
def test_set_const_crashes__setattr(): """Attempting to set a const field via attribute access crashes. Only use attribute access to avoid interaction with __setitem__. """ with pytest.raises( errors.ImmutableFieldError, match=re.escape( "Cannot assign to immutable field: <binobj.fields.stringlike.Bytes(" "name='header')" ), ): BasicStructWithArray().header = b"ABC"
5,324,061
def config(config: Config, key: str, value: str): """Change the configuration.""" config.update(key, value)
5,324,062
def fix_dataset_dims(d): """Given one of the dataset files given by the organizers, fix its dimensions so its easier to concatenate and use with xr.open_mfdataset. Arguments: d. xr.Dataset. The dataset you get when you open one of the provided files. """ month = int(d.forecast_time[0].dt.month) day = int(d.forecast_time[0].dt.day) label = f"{month:02}{day:02}" new_d = d.expand_dims("forecast_monthday").assign_coords( forecast_monthday=xr.DataArray([label], dims="forecast_monthday") ) new_d = new_d.assign_coords(forecast_year=new_d.forecast_time.dt.year).swap_dims( forecast_time="forecast_year" ) # Reorder the dimensions to something that is more intuitive (according to me). dims = set(new_d.dims) dims.difference_update( ("forecast_monthday", "forecast_year", "latitude", "longitude") ) new_d = new_d.transpose( "forecast_year", "forecast_monthday", *dims, "latitude", "longitude" ) # new_d = new_d.chunk(chunks="auto") return new_d
5,324,063
def prettyprint_xml(element): """ A rough and dirty way to prettyprint an Element with indention. :param lxml.etree._Element element: The Element or ElementTree to format. :rtype: str :returns: A prettyprinted representation of the element. """ return etree.tostring(element, pretty_print=True).decode('utf-8')
5,324,064
def load_vimba_lib(vimba_project: str): """ Load shared library shipped with the Vimba installation Arguments: vimba_project - Library name without prefix or extension Return: CDLL or WinDLL Handle on loaded library Raises: VimbaSystemError if given library could not be loaded. """ platform_handlers = { 'linux': _load_under_linux, 'win32': _load_under_windows } if sys.platform not in platform_handlers: msg = 'Abort. Unsupported Platform ({}) detected.' raise VimbaSystemError(msg.format(sys.platform)) return platform_handlers[sys.platform](vimba_project)
5,324,065
def decorator(IterativeReconAlg, name=None, docstring=None): """ Calls run_main_iter when parameters are given to it. :param IterativeReconAlg: obj, class instance of IterativeReconAlg :param name: str for name of func :param docstring: str other documentation that may need to be included from external source. :return: func Examples -------- >>> import tigre >>> from tigre.demos.Test_data.data_loader import load_head_phantom >>> geo = tigre.geometry_defaut(high_quality=False) >>> src = load_head_phantom(number_of_voxels=geo.nVoxel) >>> proj = Ax(src,geo,angles) >>> angles = np.linspace(0,2*np.pi,100) >>> iterativereconalg = decorator(IterativeReconAlg) >>> output = iterativereconalg(proj,geo,angles, niter=50) """ def iterativereconalg(proj, geo, angles, niter, **kwargs): alg = IterativeReconAlg(proj, geo, angles, niter, **kwargs) if name is not None: alg.name = name alg.run_main_iter() if alg.computel2: return alg.getres(), alg.geterrors() else: return alg.getres() if docstring is not None: setattr( iterativereconalg, '__doc__', docstring + IterativeReconAlg.__doc__) else: setattr(iterativereconalg, '__doc__', IterativeReconAlg.__doc__) if name is not None: setattr(iterativereconalg, '__name__', name) return iterativereconalg
5,324,066
def read_json_file(filepath: str) -> Any: """Read JSON from a file. Args: filepath (str): Path to file Returns: Any: The parsed JSON """ with open(filepath, 'r') as json_file: data = json.load(json_file) return data
5,324,067
def penalized_loss(loss_func, model, inputs, targets, output_regularization, l2_regularization = 0.0, use_dnn = False): """Computes penalized loss with L2 regularization and output penalty. Args: loss_func: Loss function. model: Neural network model. inputs: Input values to be fed into the model for computing predictions. targets: Target values containing either real values or binary labels. output_regularization: Coefficient for feature output penalty. l2_regularization: Coefficient for L2 regularization. use_dnn: Whether using DNN or not when computing L2 regularization. Returns: The penalized loss. """ loss = loss_func(model, inputs, targets) reg_loss = 0.0 if output_regularization > 0: reg_loss += output_regularization * feature_output_regularization( model, inputs) if l2_regularization > 0: num_networks = 1 if use_dnn else len(model.feature_nns) reg_loss += l2_regularization * weight_decay( model, num_networks=num_networks) return loss + reg_loss
5,324,068
async def test_platform_manually_configured(hass): """Test that we do not discover anything or try to set up a gateway.""" assert await async_setup_component(hass, climate.DOMAIN, { 'climate': { 'platform': deconz.DOMAIN } }) is True assert deconz.DOMAIN not in hass.data
5,324,069
def main(json_file): """Extracts information about the tools present on this host. Args: json_file: File to write JSON containing the tools information. """ info = { } info['Xcode Version'], info['Xcode Build Version'] = extract_xcode_version( utils.call('xcodebuild', '-version').stdout) info['Xcode SDKs'] = extract_sdks( utils.call('xcodebuild', '-showsdks').stdout) info['Free Space'] = get_free_disk_space() info['Logical CPUs'] = get_num_cpus() info['Python Version'] = get_python_version() info['Python Location'] = get_python_location() info['Mac OS X Version'] = get_osx_version() info['Available Tools'], info['Missing Tools'] = check_for_tools() if json_file: with open(json_file, 'w') as json_file: json.dump(info, json_file)
5,324,070
def test_clean_output(_py_controller, msg): """Check if output text is getting cleaned from Result. When `Result: Hello` is present, clean output should return only Hello. If output is only `Hello`, clean output should do nothing """ assert _py_controller._clean_output(msg) == 'Hello'
5,324,071
def load_corpus(corpus_id: str, download_if_missing=False) -> Optional[list]: """Loads a corpus that has previously been downloaded Parameters ---------- corpus_id: str The id of the corpus to load. download_if_missing: bool, False If the corpus is not present on disk, should Canary attempt to download it? Returns ------- Union[list, None] if a corpus can be loaded, a list of relevant dataset files will be returned. Otherwise nothing will be returned. Raises ------- UserWarning A warning is raised if the requested corpus cannot be found. """ allowed_values = [x.stem for x in Path(CANARY_CORPORA_LOCATION).iterdir() if x.is_dir()] with open(f"{CANARY_ROOT_DIR}/_data/corpora.json") as corpora: corpora = json.load(corpora) corpora_ids = [corpus['id'] for corpus in corpora] allowed_values += corpora_ids if corpus_id not in allowed_values: raise ValueError(f"Incorrect corpus id supplied. Allowed values are: {allowed_values}") # Corpus id should now be valid and will be in corpora root if downloaded corpus_location = Path(CANARY_CORPORA_LOCATION) / corpus_id if os.path.isdir(corpus_location) is False and download_if_missing is True: download_corpus(corpus_id) return load_corpus(corpus_id) if os.path.isdir(corpus_location) is False and download_if_missing is False: raise UserWarning("It appears the requested corpus has not been downloaded and is not present on disk. " "Have you downloaded it? You can set download_if_missing to True and the " "corpus will be downloaded. Alternatively, use the function download_corpus.") import glob return glob.glob(f"{corpus_location}/*")
5,324,072
def tx_deserialize( tx_hex ): """ Given a serialized transaction, return its inputs, outputs, locktime, and version Each input will have: * txid: string * vout: int * [optional] sequence: int * [optional] scriptSig: {"asm": ..., "hex": ...} Each output will have: * value: Decimal (in BTC) * script_hex: string Return tx, formatted as {'locktime': ..., 'version': ..., 'vin': ..., 'vout': ...} """ tx = bitcoin.deserialize( tx_hex ) inputs = tx["ins"] outputs = tx["outs"] ret_inputs = [] ret_outputs = [] for inp in inputs: ret_inp = { "txid": inp["outpoint"]["hash"], "vout": int(inp["outpoint"]["index"]), } if "sequence" in inp: ret_inp["sequence"] = int(inp["sequence"]) if "script" in inp: ret_inp["scriptSig"] = { "asm": tx_script_to_asm(inp['script']), "hex": inp["script"] } ret_inputs.append( ret_inp ) for i in xrange(0, len(outputs)): out = outputs[i] assert len(out['script']) > 0, "Invalid transaction scriptpubkey:\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True) assert out['value'] < 1000 * (10**8), "High transaction value\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True) ret_out = { "n": i, "value": Decimal(out["value"]) / 10**8, "scriptPubKey": { "hex": out["script"], "asm": tx_script_to_asm(out['script']) }, # compat with pybitcoin "script_hex": out["script"] } ret_outputs.append( ret_out ) ret = { "txid": bitcoin.txhash(tx_hex), "hex": tx_hex, "size": len(tx_hex) / 2, "locktime": tx['locktime'], "version": tx['version'], "vin": ret_inputs, "vout": ret_outputs } return ret
5,324,073
def wait_for_result(polling_function, polling_config): """ wait_for_result will periodically run `polling_function` using the parameters described in `polling_config` and return the output of the polling function. Args: polling_config (PollingConfig): The parameters to use to poll the db. polling_function (Callable[[], (bool, Any)]): The function being polled. The function takes no arguments and must return a status which indicates if the function was succesful or not, as well as some return value. Returns: Any: The output of the polling function, if it is succesful, None otherwise. """ if polling_config.polling_interval == 0: iterations = 1 else: iterations = int(polling_config.timeout // polling_config.polling_interval) + 1 for _ in range(iterations): (status, result) = polling_function() if status: return result time.sleep(polling_config.polling_interval) if polling_config.strict: assert False return None
5,324,074
def regrid(idx): """ Decorator factory to compute a model on a constant grid, then interpolate. This is to be used for reconvolution fits when the independant axis isn't evently spaced. This function returns a decorator. You should call the result of this function with the model to regrid. The constant grid Parameters ---------- idx : int Index of variable to regrid in client function. Returns ------- regridder : decorator Example ------- ``` def model(x, amp, tau, t0, sig): # Convolution assumes constant grid spacing. return convolve(step(x)*exp_decay(x, amp, tau), gauss_kernel(x, t0, sig)) deco = regrid(1) regridded = deco(model) # Or, on a single line regridded = regrid(1)(model) # compute on first axis # Or, during definition @regrid(1) def model(x, *args): ... ``` """ #logger.debug("Applying 'regrid' decorator") def _regrid(func, *args, **kw): #logger.debug("Regridding func {}".format(func.__name__)) x = args[idx] #print("regridding...") mn, mx = np.min(x), np.max(x) extension=1 margin = (mx-mn)*extension dx = np.abs(np.min(x[1:]-x[:-1])) #print("regrid args", args) #print("regrid kw", kw) #print("regrid func", func) grid = np.arange(mn-margin, mx+margin+dx, dx) args = list(args) args[idx] = grid y = func(*args, **kw) #print("y", y) intrp = interp1d(grid, y, kind=3, copy=False, assume_sorted=True) return intrp(x) return decorator(_regrid)
5,324,075
def create_random_bytes( min_length: Optional[int] = None, max_length: Optional[int] = None, lower_case: bool = False ) -> bytes: """Generates a random bytes given the constraints""" if min_length is None: min_length = 0 if max_length is None: max_length = min_length + 1 * 2 length = randint(min_length, max_length) result = hexlify(urandom(length)) if lower_case: result = result.lower() if max_length and len(result) > max_length: end = randint(min_length or 0, max_length) return result[0:end] return result
5,324,076
def calculate_v_correction(df, photopic_response): """ Closure to calculate the e correction factor from a dataframe """ # Get angles from column names first try: angles = df.drop(["0_deg", "wavelength"], axis=1).columns.to_numpy(float) except: angles = df.drop(["wavelength"], axis=1).columns.to_numpy(float) def calculate_vfactor(column): """ Function to calculate the vfactor """ return sum(column * photopic_response["photopic_response"].to_numpy()) / sum( df["0.0"] * photopic_response["photopic_response"].to_numpy() ) try: v_factor = df.drop(["0_deg", "wavelength"], axis=1).apply(calculate_vfactor) except: v_factor = df.drop(["wavelength"], axis=1).apply(calculate_vfactor) # It is now important to only integrate from 0 to 90° and not the entire spectrum # It is probably smarter to pull this at some point up but this works. relevant_v_factor = v_factor.loc[ np.logical_and( np.array(v_factor.index).astype(float) >= 0, np.array(v_factor.index).astype(float) <= 90, ) ] relevant_angles = np.array( v_factor.loc[ np.logical_and( np.array(v_factor.index).astype(float) >= 0, np.array(v_factor.index).astype(float) <= 90, ) ].index ).astype(float) return np.sum( relevant_v_factor * np.sin(np.deg2rad(relevant_angles)) * np.deg2rad(np.diff(relevant_angles)[0]) )
5,324,077
def wrap_statement(token_str): """ Wraps a long string of space-separated tokens or a list of tokens. """ if isinstance(token_str, list): token_str = ' '.join(token_str) wrap_ind = '\n' + INDENT * 4 return wrap_ind.join(gtextWrapper.wrap(token_str))
5,324,078
def mlas_packb(B, K, N, transb_size, transb=True): """Pre-pack B matrix if it is constant for mlas_matmul, C = A * B^T. It only supports float32 datatype. Parameters ---------- B : tvm.te.Tensor The second input of mlas_matmul. K : int The number of colums of A. N : int The number of colums of output C. transb_size : int The size (in bytes) of the output pre-packed B matrix. transb : bool Whether the B matrix is transposed. Returns ------- PackedB: tvm.te.Tensor The pre-packed B matrix. """ return te.extern( (transb_size), [B], lambda ins, outs: tvm.tir.call_packed( "tvm.contrib.mlas.gemm_packb", N, K, K if transb else N, transb, ins[0], outs[0], ), name="PackedB", )
5,324,079
def file_version_summary(list_of_files): """ Given the result of list_file_versions, returns a list of all file versions, with "+" for upload and "-" for hide, looking like this: ['+ photos/a.jpg', '- photos/b.jpg', '+ photos/c.jpg'] """ return [('+ ' if (f['action'] == 'upload') else '- ') + f['fileName'] for f in list_of_files]
5,324,080
def make_players(data, what_to_replace_null_data_with): """ 1. feature selection 2. replacing null values :param data: :param what_to_replace_null_data_with: accepted values: "1", "mean", "median" :return: players """ players = data[["Overall", "Potential", "Position", "Skill Moves", "Crossing", "Finishing", "HeadingAccuracy", "ShortPassing", "Volleys", "Dribbling", "Curve", "FKAccuracy", "LongPassing", "BallControl", "Acceleration", "SprintSpeed", "Agility", "Reactions", "Balance", "ShotPower", "Jumping", "Stamina", "Strength", "LongShots", "Aggression", "Interceptions", "Positioning", "Vision", "Penalties", "Composure", "Marking", "StandingTackle", "SlidingTackle", "GKDiving", "GKHandling", "GKKicking", "GKPositioning", "GKReflexes"]] for col in players: if col != "Position": if what_to_replace_null_data_with == "1": players[col].fillna(1, inplace=True) elif what_to_replace_null_data_with == "mean": players[col].fillna(players[col].mean(), inplace=True) elif what_to_replace_null_data_with == "median": players[col].fillna(players[col].median(), inplace=True) else: raise ValueError("Invalid value for second parameter") # drop 60 NA positions from dataframe players = players.dropna() return players
5,324,081
def pwm_to_duty_cycle(pulsewidth_micros, pwm_params): """Converts a pwm signal (measured in microseconds) to a corresponding duty cycle on the gpio pwm pin Parameters ---------- pulsewidth_micros : float Width of the pwm signal in microseconds pwm_params : PWMParams PWMParams object Returns ------- float PWM duty cycle corresponding to the pulse width """ return int(pulsewidth_micros / 1e6 * pwm_params.freq * pwm_params.range)
5,324,082
def update_plugin_packages_in_kv(rid, runit): """Update the plugin packages for this unit in the kv store. It returns a tuple of 'install_packages' and 'purge_packages' that are different from that which was previously stored. :param rid: The relation_id of the unit :type rid: str :param runit: The unit name of the unit :type runit: str :returns: tuple of (added, removed) packages. :rtype: Tuple[List[Str],List[str]] """ current = get_plugin_packages_from_kv(rid, runit) rdata = relation_get(unit=runit, rid=rid) install_packages_json = rdata.get("install-packages", "[]") install_packages = json.loads(install_packages_json) conflicting_packages_json = rdata.get("conflicting-packages", "[]") conflicting_packages = json.loads(conflicting_packages_json) removed = list( (set(current['install_packages']) - set(install_packages)) | (set(conflicting_packages) - set(current['conflicting_packages']))) added = list( (set(install_packages) - set(current['install_packages'])) | (set(current['conflicting_packages']) - set(conflicting_packages))) store_plugin_packages_in_kv( rid, runit, conflicting_packages, install_packages) return (added, removed)
5,324,083
def get_a_record(dns_name, zone_name): """Lookup an 'A' record with the supplied name. Args: dns_name: DNS nname of the resource. zone_name: Cloud DNS managed zone name. Returns: The first A record for the DNS resource. None if not found """ rr_set_response = api.CLIENTS.dns.resourceRecordSets().list( managedZone=zone_name, project=zones.CONFIG.managed_zone_project, name=dns_name, type='A').execute() # There should only be one with this name. rr_set = rr_set_response.get('rrsets', []) if len(rr_set) >= 1: return rr_set[0] else: return None
5,324,084
def get_file_obj(fname, mode='r', encoding=None): """ Light wrapper to handle strings and let files (anything else) pass through. It also handle '.gz' files. Parameters ---------- fname: string or file-like object File to open / forward mode: string Argument passed to the 'open' or 'gzip.open' function encoding: string For Python 3 only, specify the encoding of the file Returns ------- A file-like object that is always a context-manager. If the `fname` was already a file-like object, the returned context manager *will not close the file*. """ if _is_string_like(fname): return _open(fname, mode, encoding) try: # Make sure the object has the write methods if 'r' in mode: assert hasattr(fname, 'read') if 'w' in mode or 'a' in mode: assert hasattr(fname, 'write') except AssertionError: # pragma: no cover raise ValueError('fname must be a string or a file-like object') return EmptyContextManager(fname)
5,324,085
def decode(codes, alphabet): """ Converts one-hot encodings to string Parameters ---------- code : torch.Tensor One-hot encodings. alphabet : Alphabet Matches one-hot encodings to letters. Returns ------- genes : list of Tensor List of proteins others : list of Tensor List of proteins states : list of Tensor List of alignment state strings dm : torch.Tensor B x N x M dimension matrix with padding. """ s = list(map(lambda x: alphabet[int(x)], codes)) return ''.join(s)
5,324,086
def GetSuites(milo_client, waterfall, builder_name, build_number): """Gets a list of suites ids for a given build from Milo. Args: milo_client: MiloClient object. waterfall: Buildbot waterfall. builder_name: Buildbot builder name. build_number: Buidlbot build number. Returns: A set of suite ids. """ buildinfo = milo_client.BuildInfoGetBuildbot(waterfall, builder_name, build_number) suite_ids = set() for step in buildinfo['steps']: for link in buildinfo['steps'][step].get('otherLinks', []): if link.get('label') == 'Link to suite': url = link.get('url') m = SUITE_RE.search(url) if m: suite_ids.add(m.group(1)) else: logging.error('Unable to parse suite link for %s: %s' % (buildinfo['steps'][step]['name'], url)) return suite_ids
5,324,087
def test_sdfi(X_raw): """Test SimpleDataFrameImputer.""" train = X_raw.iloc[:30, :] test = X_raw.iloc[30:, :] print('\ntrain.shape:', train.shape) print('test.shape:', test.shape) imputer = SimpleDataFrameImputer(median_cols=['Age'], mode_cols=['Embarked']) imputer.fit(train) test_imputed = imputer.transform(test) print('test_imputed:\n', test_imputed) print('test_imputed.shape:', test_imputed.shape) assert (test_imputed.loc[43, 'Age'] == 34) and (test_imputed.loc[45, 'Age'] == 34) assert (test_imputed.loc[44, 'Embarked'] == 'S') and (test_imputed.loc[47, 'Embarked'] == 'S')
5,324,088
def getNormform_space(synonym): """ """ return re.sub("[^a-z0-9]", " ", synonym.lower())
5,324,089
def generate_chart_dataset(path_to_csv, dataset_type): """create the dataset""" worker1 = multiprocessing.Process( target=fn_task_generate_filled_line_fusion, args=(path_to_csv, dataset_type) ) worker2 = multiprocessing.Process( target=fn_task_generate_candlestick_fusion, args=(path_to_csv, dataset_type) ) worker3 = multiprocessing.Process( target=fn_task_generate_barline_fusion, args=(path_to_csv, dataset_type) ) worker1.start() worker2.start() worker3.start() worker1.join() worker2.join() worker3.join()
5,324,090
def electrolyte_conductivity_PeymanMPM(c_e, T): """ Conductivity of LiPF6 in EC:DMC as a function of ion concentration. The original data is from [1]. The fit is from Dualfoil [2]. References ---------- .. [1] C Capiglia et al. 7Li and 19F diffusion coefficients and thermal properties of non-aqueous electrolyte solutions for rechargeable lithium batteries. Journal of power sources 81 (1999): 859-862. .. [2] http://www.cchem.berkeley.edu/jsngrp/fortran.html Parameters ---------- c_e: :class:`pybamm.Symbol` Dimensional electrolyte concentration T: :class:`pybamm.Symbol` Dimensional temperature Returns ------- :class:`pybamm.Symbol` Electrolyte conductivity """ sigma_e = 1.3 E_k_e = 34700 arrhenius = exp(E_k_e / constants.R * (1 / 298.15 - 1 / T)) return sigma_e * arrhenius
5,324,091
def turn_off(): """ Turn off the cofee machine. Exit the program """ machine_on = False quit()
5,324,092
def rename_leaves_taxids(tree): """ Rename the leaf nodes with just the NCBI taxonomy ID if we have it :param tree: the tree to rename :return: the tree with renamed leaves """ for n in tree.get_leaves(): m = re.search(r'\[(\d+)\]', n.name) if m: n.name = m.groups()[0] return tree
5,324,093
def hshift(x, shifts=0): """shift batch of images horizontally""" return paddle.roll(x, int(shifts*x.shape[3]), axis=3)
5,324,094
def getFirstCatalogKeyPath(cataloglist, keypath, default = None): """ Get the value of the keypath in the first catalog containing it. """ for name in cataloglist: catalog = getCatalog(name) if catalog is not None: value = valueForKeyPath(catalog, keypath) if value is not None: return value return default
5,324,095
def load_context(): """Load and parse command line arguments and create runtime context. Parse command line arguments and create runtime context. Also set any logging parameters passed in (just to file for the moment). Returns: context: runtime parameters """ context = None try: parser = argparse.ArgumentParser(description='crape applied for jobs from recruiter web pages') parser.add_argument('-s', '--source', default=os.getcwd(), help='Source folder') parser.add_argument('-c', '--cutoff', default=JobSite.convert_from_datetime(datetime.today() - timedelta(weeks=2)), help='Cutoff date') parser.add_argument('-o', '--output', default='jobs_applied_for.csv', help='CSV output file') context = parser.parse_args() context.cutoff = JobSite.convert_to_datetime(context.cutoff) except Exception as e: print(e) raise e return context
5,324,096
def pinv(a: numpy.ndarray, rcond: float): """ usage.statsmodels: 1 """ ...
5,324,097
def HT_DCPHASE(ds, count): """Hilbert Transform - Dominant Cycle Phase""" return call_talib_with_ds(ds, count, talib.HT_DCPHASE)
5,324,098
def split_every(n, iterable): """https://stackoverflow.com/questions/9475241/split-string-every-nth-character""" i = iter(iterable) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
5,324,099