content
stringlengths
22
815k
id
int64
0
4.91M
def ax2cu(ax): """Axis angle pair to cubochoric vector.""" return Rotation.ho2cu(Rotation.ax2ho(ax))
21,000
def test_transfer( stubbed_sender, stubbed_receiver_token_account_pk, stubbed_sender_token_account_pk, test_token ): # pylint: disable=redefined-outer-name """Test token transfer.""" expected_amount = 500 assert_valid_response( test_token.transfer( source=stubbed_sender_token_account_pk, dest=stubbed_receiver_token_account_pk, owner=stubbed_sender, amount=expected_amount, opts=TxOpts(skip_confirmation=False), ) ) resp = test_token.get_balance(stubbed_receiver_token_account_pk) balance_info = resp["result"]["value"] assert balance_info["amount"] == str(expected_amount) assert balance_info["decimals"] == 6 assert balance_info["uiAmount"] == 0.0005
21,001
def print_menu(): """Display a table with list of tasks and their associated commands. """ speak("I can do the following") table = Table(title="\nI can do the following :- ", show_lines = True) table.add_column("Sr. No.", style="cyan", no_wrap=True) table.add_column("Task", style="yellow") table.add_column("Command", justify="left", style="green") table.add_row("1", "Speak Text entered by User", "text to speech") table.add_row("2", "Search anything on Google", "Search on Google") table.add_row("3", "Search anything on Wikipedia", "Search on Wikipedia") table.add_row("4", "Read a MS Word(docx) document", "Read MS Word document") table.add_row("5", "Convert speech to text", "Convert speech to text") table.add_row("6", "Read a book(PDF)", "Read a book ") table.add_row("7", "Quit the program", "Python close") console = Console() console.print(table)
21,002
def test_cancel_user_bad_token_no_email(app, session): """Ensure a token without an email subject is rejected.""" u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User') token = auth_service.encodeJwt( app, subject=auth_service.JWT_SUBJECT_CANCEL_EMAIL, ) with pytest.raises(JwtPayloadError) as exc_data: auth_service.cancel_registration_with_token(session, token) assert 'email claim' in str(exc_data.value) assert u.active is False assert u.confirmed is False
21,003
def download_and_store( feed_url: Text, ignore_file: Optional[Text], storage_path: Text, proxy_string: Optional[Text], link: urllib.parse.ParseResult) -> Dict: """Download and store a link. Storage defined in args""" # check if the actual url is in the ignore file. If so, no download will take place. if analyze.in_ignore_file(link.geturl(), ignore_file): logging.info("Download link [%s] in ignore file.", link.geturl()) return {} logging.info("downloading %s", link.geturl()) # if netloc does not contain a hostname, assume a relative path to the feed url if link.netloc == '': parsed_feed_url = urllib.parse.urlparse(feed_url) link = link._replace(scheme=parsed_feed_url.scheme, netloc=parsed_feed_url.netloc, path=link.path) logging.info("possible relative path %s, trying to append host: %s", link.path, parsed_feed_url.netloc) try: req = requests.get(link.geturl(), proxies=proxies(proxy_string), headers=default_headers(), verify=False, stream=True, timeout=60) except requests.exceptions.ReadTimeout: logging.info("%s timed out", link.geturl()) return {} except requests.exceptions.ConnectionError: logging.info("%s connection error", link.geturl()) return {} except requests.exceptions.MissingSchema: logging.info("%s missing schema", link.geturl()) return {} if req.status_code >= 400: logging.info("Status %s - %s", req.status_code, link) return {} basename = extract.safe_filename(os.path.basename(link.path)) fname = os.path.join(storage_path, "download", basename) # check if the filename on disk is in the ignore file. If so, do not return filename # for upload. This differ from URL in the ignore file as the file is in fact downloaded by # the feed worker, but not uploaded to Scio. if analyze.in_ignore_file(basename, ignore_file): logging.info("Ignoring %s based on %s", fname, ignore_file) return {} with open(fname, "wb") as download_file: logging.info("Writing %s", fname) req.raw.decode_content = True shutil.copyfileobj(req.raw, download_file) return {'filename': fname, 'uri': link.geturl()}
21,004
def define_genom_loc(current_loc, pstart, p_center, pend, hit_start, hit_end, hit_strand, ovl_range): """ [Local] Returns location label to be given to the annotated peak, if upstream/downstream or overlapping one edge of feature.""" all_pos = ["start", "end"] closest_pos, dmin = distance_to_peak_center( p_center, hit_start, hit_end, hit_strand, all_pos) if current_loc == "not.specified": # Not internal if closest_pos == "end" and any(ovl_range): return "overlapEnd" elif closest_pos == "start" and any(ovl_range): return "overlapStart" elif not any(ovl_range): # Check about direction :"upstream", "downstream" current_loc = find_peak_dir( hit_start, hit_end, hit_strand, pstart, p_center, pend) return current_loc return current_loc
21,005
def define_macro(preprocessor: Preprocessor, name: str, args: List[str], text: str) -> None: """Defines a macro. Inputs: - preprocessor - the object to which the macro is added - name: str - the name of the new macro - args: List[str] - List or arguments name - text: str - the text the command prints. Occurences of args will be replaced by the corresponding value during the call. will only replace occurence that aren't part of a larger word """ # replace arg occurences with placeholder for i, arg in enumerate(args): text = re.sub( REGEX_IDENTIFIER_WRAPPED.format(re.escape(arg)), # pattern "\\1{}\\3".format("\000(arg {})\000".format(i)), # placeholder text, flags = re.MULTILINE ) # define the command def cmd(pre: Preprocessor, cmd_args: List[str], text: str = text, ident: str = name) -> str: """a defined macro command""" for i, arg in enumerate(cmd_args): pattern = re.escape("\000(arg {})\000".format(i)) text = re.sub(pattern, arg, text, flags=re.MULTILINE) pre.context.update( pre.current_position.cmd_argbegin, 'in expansion of defined command {}'.format(ident) ) parsed = pre.parse(text) pre.context.pop() return parsed cmd.doc = "{} {}".format(name, " ".join(args)) # type: ignore # place it in command_vars if "def" not in preprocessor.command_vars: preprocessor.command_vars["def"] = dict() preprocessor.command_vars["def"]["{}:{}".format(name, len(args))] = cmd overloads = [] usages = [] for key, val in preprocessor.command_vars["def"].items(): if key.startswith(name+":"): overloads.append(int(key[key.find(":") + 1])) usages.append(val.doc) usage = "usage: " + "\n ".join(usages) overload_nb = rreplace(", ".join(str(x) for x in sorted(overloads)), ", ", " or ") # overwrite defined command def defined_cmd(pre: Preprocessor, args_string: str) -> str: """This is the actual command, parses arguments and calls the correct overload""" split = pre.split_args(args_string) try: arguments = macro_parser.parse_args(split) except argparse.ArgumentError: pre.send_error("invalid-argument", "invalid argument for macro.\n{}".format(usage) ) if len(arguments.vars) not in overloads: pre.send_error("invalid-argument",( "invalid number of arguments for macro.\nexpected {} got {}.\n" "{}").format(overload_nb, len(arguments.vars), usage) ) return pre.command_vars["def"]["{}:{}".format(name, len(arguments.vars))]( pre, arguments.vars ) defined_cmd.__doc__ = "Defined command for {} (expects {} arguments)\n{}".format( name, overload_nb, usage ) defined_cmd.doc = defined_cmd.__doc__ # type: ignore defined_cmd.__name__ = """def_cmd_{}""".format(name) preprocessor.commands[name] = defined_cmd
21,006
def actp(Gij, X0, jacobian=False): """ action on point cloud """ X1 = Gij[:,:,None,None] * X0 if jacobian: X, Y, Z, d = X1.unbind(dim=-1) o = torch.zeros_like(d) B, N, H, W = d.shape if isinstance(Gij, SE3): Ja = torch.stack([ d, o, o, o, Z, -Y, o, d, o, -Z, o, X, o, o, d, Y, -X, o, o, o, o, o, o, o, ], dim=-1).view(B, N, H, W, 4, 6) elif isinstance(Gij, Sim3): Ja = torch.stack([ d, o, o, o, Z, -Y, X, o, d, o, -Z, o, X, Y, o, o, d, Y, -X, o, Z, o, o, o, o, o, o, o ], dim=-1).view(B, N, H, W, 4, 7) return X1, Ja return X1, None
21,007
def teardown_module(module): """ Delete necessary files. """ os.chdir("..") rmtree("temp_utilities", ignore_errors=True)
21,008
def isPalindrome(s): """Assumes s is a str Returns True if s is a palindrome; False otherwise. Punctuation marks, blanks, and capitalization are ignored.""" def toChars(s): s = s.lower() letters = '' for c in s: if c in 'abcdefghijklmnopqrstuvwxyz': letters = letters + c return letters def isPal(s): print(' isPal called with', s) if len(s) <= 1: print(' About to return True from base case') return True else: answer = s[0] == s[-1] and isPal(s[1:-1]) print(' About to return', answer, 'for', s) return answer return isPal(toChars(s))
21,009
def find_make_workdir(subdir, despike, spm, logger=None): """ generates realign directory to query based on flags and if it exists and create new workdir """ rlgn_dir = utils.defaults['realign_ants'] if spm: rlgn_dir = utils.defaults['realign_spm'] if despike: rlgn_dir = utils.defaults['despike'] + rlgn_dir rlgn_dir = os.path.join(subdir, rlgn_dir) if not os.path.isdir(rlgn_dir): if logger: logger.error('{0} doesnt exist skipping'.format(rlgn_dir)) raise IOError('{0} doesnt exist skipping'.format(rlgn_dir)) if logger: logger.info(rlgn_dir) workdirnme = utils.defaults['coreg'] workdir, exists = utils.make_dir(rlgn_dir, workdirnme) if not exists: if logger: logger.error('{0}: skipping {1} doesnt exist'.format(subdir, workdir)) raise IOError('{0}: MISSING, Skipping'.format(workdir)) bpdirnme = utils.defaults['bandpass'] bpdir, exists = utils.make_dir(workdir, bpdirnme) if exists: if logger: logger.error('{0}: skipping {1} existS'.format(subdir, bpdir)) raise IOError('{0}: EXISTS, Skipping'.format(bpdir)) return rlgn_dir, workdir, bpdir
21,010
def _SendChangeSVN(options): """Send a change to the try server by committing a diff file on a subversion server.""" if not options.svn_repo: raise NoTryServerAccess('Please use the --svn_repo option to specify the' ' try server svn repository to connect to.') values = _ParseSendChangeOptions(options) description = ''.join("%s=%s\n" % (k, v) for k, v in values) logging.info('Sending by SVN') logging.info(description) logging.info(options.svn_repo) logging.info(options.diff) if options.dry_run: return # Create a temporary directory, put a uniquely named file in it with the diff # content and svn import that. temp_dir = tempfile.mkdtemp() temp_file = tempfile.NamedTemporaryFile() try: try: # Description temp_file.write(description) temp_file.flush() # Diff file current_time = str(datetime.datetime.now()).replace(':', '.') file_name = (Escape(options.user) + '.' + Escape(options.name) + '.%s.diff' % current_time) full_path = os.path.join(temp_dir, file_name) with open(full_path, 'wb') as f: f.write(options.diff) # Committing it will trigger a try job. if sys.platform == "cygwin": # Small chromium-specific issue here: # git-try uses /usr/bin/python on cygwin but svn.bat will be used # instead of /usr/bin/svn by default. That causes bad things(tm) since # Windows' svn.exe has no clue about cygwin paths. Hence force to use # the cygwin version in this particular context. exe = "/usr/bin/svn" else: exe = "svn" command = [exe, 'import', '-q', temp_dir, options.svn_repo, '--file', temp_file.name] if scm.SVN.AssertVersion("1.5")[0]: command.append('--no-ignore') subprocess2.check_call(command) except subprocess2.CalledProcessError, e: raise NoTryServerAccess(str(e)) finally: temp_file.close() shutil.rmtree(temp_dir, True)
21,011
def compute_irs(ground_truth_data, representation_function, random_state, diff_quantile=0.99, num_train=gin.REQUIRED, batch_size=gin.REQUIRED): """Computes the Interventional Robustness Score. Args: ground_truth_data: GroundTruthData to be sampled from. representation_function: Function that takes observations as input and outputs a dim_representation sized representation for each observation. random_state: Numpy random state used for randomness. diff_quantile: Float value between 0 and 1 to decide what quantile of diffs to select (use 1.0 for the version in the paper). num_train: Number of points used for training. batch_size: Batch size for sampling. Returns: Dict with IRS and number of active dimensions. """ logging.info("Generating training set.") mus, ys = utils.generate_batch_factor_code(ground_truth_data, representation_function, num_train, random_state, batch_size) assert mus.shape[1] == num_train ys_discrete = utils.make_discretizer(ys) active_mus = _drop_constant_dims(mus) if not active_mus.any(): irs_score = 0.0 else: irs_score = scalable_disentanglement_score(ys_discrete.T, active_mus.T, diff_quantile)["avg_score"] score_dict = {} score_dict["IRS"] = irs_score score_dict["num_active_dims"] = np.sum(active_mus) return score_dict
21,012
def Grab_Pareto_Min_Max(ref_set_array, objective_values, num_objs, num_dec_vars, objectives_names=[], create_txt_file='No'): """ Purposes: Identifies the operating policies producing the best and worst performance in each objective. Gets called automatically by processing_reference_set.Reference_Set() Required Args: 1. ref_set_array: an array of P arrays, P=number of points in the reference set. Each of the P arrays contains N=num_objs+num_vars (number of objective values in optimization problem and number of decision variables). Decision variables come first, followed by objective values. 2. objective_values: The objective value portion of the ref_set_array. It is also an array of P arrays, where each of the P arrays is of length num_objs. 3. num_objs = integer number of objective values (e.g., 5) 4. num_dec_vars: integer number of decision variable values (e.g., 30) Optional Args: 5. objectives_names: (list of names of objectives for objective_values returned from Reference_Set, as defined above). Example: ['Sediment', 'Hydropower']. Used to name .txt file and provide output dictionary keys. 6. create_text_file: String of 'Yes' or 'No'. Indicates whether users wants function to produce text files of operating policies. Returns: 1. indices of ref_set_array that correspond to the points of the highest and lowest value in each of the objectives. 2. Various text files that store the DPS parameters corresponding to the operating policy, if the user wishes to create such files. """ # Find operating policy parameters corresponding to the largest objective # List of index of (1) highest and (2) lowest values (column in objective_values array) indices = [[0 for i in range(2)] for j in range(num_objs)] indices_dict = {} for obj in range(num_objs): indices[obj][0] = np.argmin(objective_values[obj]) # MIN for each objective indices[obj][1] = np.argmax(objective_values[obj]) # MAX for each objective if create_txt_file == 'Yes': # Save max and min policies so PySedSim can import them. np.savetxt('RBF_Parameters_max_' + objectives_names[obj] + '.txt', ref_set_array[indices[1][obj]][0:num_dec_vars], newline=' ') np.savetxt('RBF_Parameters_min_' + objectives_names[obj] + '.txt', ref_set_array[indices[0][obj]][0:num_dec_vars], newline=' ') indices = np.asarray(indices) # cast list as array indices_dict[objectives_names[obj]] = {'Min': indices[obj][0], 'Max': indices[obj][1]} return indices_dict
21,013
def edist(x, y): """ Compute the Euclidean distance between two samples x, y \in R^d.""" try: dist = np.sqrt(np.sum((x-y)**2)) except ValueError: print 'Dimensionality of samples must match!' else: return dist
21,014
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False, docla=False, projection=None, **kwargs): """ http://matplotlib.org/users/gridspec.html Args: fnum (int): fignum = figure number pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple title (str): (default = None) figtitle (None): (default = None) docla (bool): (default = False) doclf (bool): (default = False) Returns: mpl.Figure: fig CommandLine: python -m plottool.custom_figure --exec-figure:0 --show python -m plottool.custom_figure --exec-figure:1 --show Example: >>> fnum = 1 >>> fig = figure(fnum, (2, 2, 1)) >>> gca().text(0.5, 0.5, "ax1", va="center", ha="center") >>> fig = figure(fnum, (2, 2, 2)) >>> gca().text(0.5, 0.5, "ax2", va="center", ha="center") >>> ut.show_if_requested() Example: >>> fnum = 1 >>> fig = figure(fnum, (2, 2, 1)) >>> gca().text(0.5, 0.5, "ax1", va="center", ha="center") >>> fig = figure(fnum, (2, 2, 2)) >>> gca().text(0.5, 0.5, "ax2", va="center", ha="center") >>> fig = figure(fnum, (2, 4, (1, slice(1, None)))) >>> gca().text(0.5, 0.5, "ax3", va="center", ha="center") >>> ut.show_if_requested() """ import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec def ensure_fig(fnum=None): if fnum is None: try: fig = plt.gcf() except Exception as ex: fig = plt.figure() else: try: fig = plt.figure(fnum) except Exception as ex: fig = plt.gcf() return fig def _convert_pnum_int_to_tup(int_pnum): # Convert pnum to tuple format if in integer format nr = int_pnum // 100 nc = int_pnum // 10 - (nr * 10) px = int_pnum - (nr * 100) - (nc * 10) pnum = (nr, nc, px) return pnum def _pnum_to_subspec(pnum): if isinstance(pnum, six.string_types): pnum = list(pnum) nrow, ncols, plotnum = pnum # if kwargs.get('use_gridspec', True): # Convert old pnums to gridspec gs = gridspec.GridSpec(nrow, ncols) if isinstance(plotnum, (tuple, slice, list)): subspec = gs[plotnum] else: subspec = gs[plotnum - 1] return (subspec,) def _setup_subfigure(pnum): if isinstance(pnum, int): pnum = _convert_pnum_int_to_tup(pnum) if doclf: fig.clf() axes_list = fig.get_axes() if docla or len(axes_list) == 0: if pnum is not None: assert pnum[0] > 0, 'nRows must be > 0: pnum=%r' % (pnum,) assert pnum[1] > 0, 'nCols must be > 0: pnum=%r' % (pnum,) subspec = _pnum_to_subspec(pnum) ax = fig.add_subplot(*subspec, projection=projection) if len(axes_list) > 0: ax.cla() else: ax = plt.gca() else: if pnum is not None: subspec = _pnum_to_subspec(pnum) ax = plt.subplot(*subspec) else: ax = plt.gca() fig = ensure_fig(fnum) if pnum is not None: _setup_subfigure(pnum) # Set the title / figtitle if title is not None: ax = plt.gca() ax.set_title(title) if figtitle is not None: fig.suptitle(figtitle) return fig
21,015
def start_wifi(): """Start a new WIFI connection if none is found.""" if not _wifi.is_connected(): display.show(["Connecting wifi."]) if not _wifi.connect(): error("NO INTERNET!")
21,016
def allsec_preorder(h): """ Alternative to using h.allsec(). This returns all sections in order from the root. Traverses the topology each neuron in "pre-order" """ #Iterate over all sections, find roots roots = root_sections(h) # Build list of all sections sec_list = [] for r in roots: add_pre(h,sec_list,r) return sec_list
21,017
def get_func_bytes(*args): """get_func_bytes(func_t pfn) -> int""" return _idaapi.get_func_bytes(*args)
21,018
def test_givens(seed, dtype): """ Tests that when the Givens factors produced by `solve._compute_givens_rotation` are applied to a two-vector `v = (a, b)` via `solve._apply_ith_rotation`, the result is `h = (||v||, 0)` up to a sign. """ np.random.seed(seed) v = np.random.randn(2).astype(dtype) v = jnp.array(v) cs, sn = solve._compute_givens_rotation(v[0], v[1]) cs = jnp.full(1, cs) sn = jnp.full(1, sn) r = np.sqrt(v[0] ** 2 + v[1] ** 2) h, _, _ = solve._apply_ith_rotation(0, (v, cs, sn)) expected = np.array([r, 0.]) eps = jnp.finfo(dtype).eps * r testutils.assert_allclose(np.abs(h), expected, atol=eps)
21,019
def balanced_accuracy(y_true, y_score): """Compute accuracy using one-hot representaitons.""" if isinstance(y_true, list) and isinstance(y_score, list): # Online scenario if y_true[0].ndim == 2 and y_score[0].ndim == 2: # Flatten to single (very long prediction) y_true = np.concatenate(y_true, axis=0) y_score = np.concatenate(y_score, axis=0) if y_score.ndim == 3 and y_score.shape[-1] == 1: y_score = np.ravel(y_score) y_true = np.ravel(y_true).astype(int) y_score = np.around(y_score).astype(int) if y_true.ndim == 2 and y_true.shape[-1] != 1: y_true = np.argmax(y_true, axis=-1) if y_true.ndim == 2 and y_true.shape[-1] == 1: y_true = np.round(y_true).astype(int) if y_score.ndim == 2 and y_score.shape[-1] != 1: y_score = np.argmax(y_score, axis=-1) if y_score.ndim == 2 and y_score.shape[-1] == 1: y_score = np.round(y_score).astype(int) return balanced_accuracy_score(y_true, y_score)
21,020
def main(): """ The main function of the program. First, the input parameters are collected and validated. Then the repayments information are computed and returned. """ locale.setlocale(locale.LC_ALL, 'en_gb') # Changes the locale settings to deal with pounds market_file, loan_amount = _get_input() # Collects the inputs valid_request = _is_loan_request_valid(loan_amount) # Validates the loan amount if valid_request: # If the request is valid... rates_cache = _get_rates_cache(market_file) # Computes the hash map of the available rates/amounts quote_available = _can_be_quoted(loan_amount, rates_cache.values()) # Checks if a quote is available... if quote_available: # If it is... rate, monthly_repay, total_repay = _get_repayments(loan_amount, rates_cache) # Gets repayments information _display_results(loan_amount, rate, monthly_repay, total_repay) # Displays the results else: # ... else returns an error message print 'We''re very sorry but it''s not possible to provide a quote at this time.' else: print 'We''re very sorry but you entered an invalid request!' print 'You can request a loan for at least 1000 pound and at most 15000 pound with a 100 pound increment only.'
21,021
async def get_weather(weather): """ For .weather command, gets the current weather of a city. """ if not OWM_API: await weather.reply( f"`{JAVES_NNAME}:` **Get an API key from** https://openweathermap.org/ `first.`") return APPID = OWM_API if not weather.pattern_match.group(1): CITY = DEFCITY if not CITY: await weather.reply( f"`{JAVES_NNAME}:` **Please specify a city or set one as default using the WEATHER_DEFCITY config variable.**" ) return else: CITY = weather.pattern_match.group(1) timezone_countries = { timezone: country for country, timezones in c_tz.items() for timezone in timezones } if "," in CITY: newcity = CITY.split(",") if len(newcity[1]) == 2: CITY = newcity[0].strip() + "," + newcity[1].strip() else: country = await get_tz((newcity[1].strip()).title()) try: countrycode = timezone_countries[f'{country}'] except KeyError: await weather.reply("`Invalid country.`") return CITY = newcity[0].strip() + "," + countrycode.strip() url = f'https://api.openweathermap.org/data/2.5/weather?q={CITY}&appid={APPID}' request = get(url) result = json.loads(request.text) if request.status_code != 200: await weather.reply(f"`Invalid country.`") return cityname = result['name'] curtemp = result['main']['temp'] humidity = result['main']['humidity'] min_temp = result['main']['temp_min'] max_temp = result['main']['temp_max'] desc = result['weather'][0] desc = desc['main'] country = result['sys']['country'] sunrise = result['sys']['sunrise'] sunset = result['sys']['sunset'] wind = result['wind']['speed'] winddir = result['wind']['deg'] ctimezone = tz(c_tz[country][0]) time = datetime.now(ctimezone).strftime("%A, %I:%M %p") fullc_n = c_n[f"{country}"] dirs = ["N", "NE", "E", "SE", "S", "SW", "W", "NW"] div = (360 / len(dirs)) funmath = int((winddir + (div / 2)) / div) findir = dirs[funmath % len(dirs)] kmph = str(wind * 3.6).split(".") mph = str(wind * 2.237).split(".") def fahrenheit(f): temp = str(((f - 273.15) * 9 / 5 + 32)).split(".") return temp[0] def celsius(c): temp = str((c - 273.15)).split(".") return temp[0] def sun(unix): xx = datetime.fromtimestamp(unix, tz=ctimezone).strftime("%I:%M %p") return xx await weather.reply( f"**Temperature:** `{celsius(curtemp)}°C | {fahrenheit(curtemp)}°F`\n" + f"**Min. Temp.:** `{celsius(min_temp)}°C | {fahrenheit(min_temp)}°F`\n" + f"**Max. Temp.:** `{celsius(max_temp)}°C | {fahrenheit(max_temp)}°F`\n" + f"**Humidity:** `{humidity}%`\n" + f"**Wind:** `{kmph[0]} kmh | {mph[0]} mph, {findir}`\n" + f"**Sunrise:** `{sun(sunrise)}`\n" + f"**Sunset:** `{sun(sunset)}`\n\n" + f"**{desc}**\n" + f"`{cityname}, {fullc_n}`\n" + f"`{time}`")
21,022
def read_table(source, columns=None, nthreads=1, metadata=None, use_pandas_metadata=False): """ Read a Table from Parquet format Parameters ---------- source: str or pyarrow.io.NativeFile Location of Parquet dataset. If a string passed, can be a single file name or directory name. For passing Python file objects or byte buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. columns: list If not None, only these columns will be read from the file. nthreads : int, default 1 Number of columns to read in parallel. Requires that the underlying file source is threadsafe metadata : FileMetaData If separately computed use_pandas_metadata : boolean, default False If True and file has custom pandas schema metadata, ensure that index columns are also loaded Returns ------- pyarrow.Table Content of the file as a table (of columns) """ if is_string(source): fs = LocalFileSystem.get_instance() if fs.isdir(source): return fs.read_parquet(source, columns=columns, metadata=metadata) pf = ParquetFile(source, metadata=metadata) return pf.read(columns=columns, nthreads=nthreads, use_pandas_metadata=use_pandas_metadata)
21,023
def preprocess_img(image, segnet_stream='fstream'): """Preprocess the image to adapt it to network requirements Args: Image we want to input the network in (W,H,3) or (W,H,4) numpy array Returns: Image ready to input to the network of (1,W,H,3) or (1,W,H,4) shape Note: This is really broken if the input is a file path for the astream since there is no motion flow attached... TODO How would you fix this? """ if type(image) is not np.ndarray: image = np.array(Image.open(image), dtype=np.uint8) if segnet_stream == 'astream': input = np.subtract(image.astype(np.float32), np.array((104.00699, 116.66877, 122.67892, 128.), dtype=np.float32)) else: # input = image -> this leads to NaNs input = np.subtract(image.astype(np.float32), np.array((0., 0., 128.), dtype=np.float32)) # TODO What preprocessing should we apply to "low amplitude" optical flows and "high amplitude" warped mask # input = tf.subtract(tf.cast(input, tf.float32), np.array((104.00699, 116.66877, 122.67892), dtype=np.float32)) input = np.expand_dims(input, axis=0) return input
21,024
def redefine_colors(color_map, file=sys.stdout): """Redefine the base console colors with a new mapping.""" _redefine_colors(color_map, file)
21,025
def benchmark_step(config): """Utility function to benchmark speed of 'stepping', i.e. recurrent view. Unused for main train logic""" pl.seed_everything(config.train.seed, workers=True) model = SequenceLightningModule(config) model.setup() model.to("cuda") print("Num Parameters: ", sum(p.numel() for p in model.parameters())) print( "Num Trainable Parameters: ", sum(p.numel() for p in model.parameters() if p.requires_grad), ) model._on_post_move_to_device() for module in model.modules(): if hasattr(module, "setup_step"): module.setup_step() model.eval() val_dataloaders = model.val_dataloader() dl = val_dataloaders[0] if utils.is_list(val_dataloaders) else val_dataloaders import benchmark for batch in dl: benchmark.utils.benchmark( model.forward_recurrence, batch, config.train.benchmark_step_k, T=config.train.benchmark_step_T, ) break
21,026
def confirm_buildstream_installed(): """Confirms that BuildStream is installed, so it can be run using subprocess.run""" if not shutil.which("bst"): # shutil.which will return None, if licensecheck isn't installed echo("Error, BuildStream does not seem to be installed.") echo("(bst_license_checker needs BuildStream to run)") abort()
21,027
def _load_announce_signal_handlers() -> None: """Import modules containing handlers so they connect to the corresponding signals. """ from .announce import connections
21,028
def _export_cert_from_task_keystore( task, keystore_path, alias, password=KEYSTORE_PASS): """ Retrieves certificate from the keystore with given alias by executing a keytool in context of running container and loads the certificate to memory. Args: task (str): Task id of container that contains the keystore keystore_path (str): Path inside container to keystore containing the certificate alias (str): Alias of the certificate in the keystore Returns: x509.Certificate object """ args = ['-rfc'] if password: args.append('-storepass "{password}"'.format(password=password)) args_str = ' '.join(args) cert_bytes = sdk_tasks.task_exec( task, _keystore_export_command(keystore_path, alias, args_str) )[1].encode('ascii') return x509.load_pem_x509_certificate( cert_bytes, DEFAULT_BACKEND)
21,029
def eta_expand( path: qlast.Path, stype: s_types.Type, *, ctx: context.ContextLevel, ) -> qlast.Expr: """η-expansion of an AST path""" if not ALWAYS_EXPAND and not stype.contains_object(ctx.env.schema): # This isn't strictly right from a "fully η expanding" perspective, # but for our uses, we only need to make sure that objects are # exposed to the output, so we can skip anything not containing one. return path if isinstance(stype, s_types.Array): return eta_expand_array(path, stype, ctx=ctx) elif isinstance(stype, s_types.Tuple): return eta_expand_tuple(path, stype, ctx=ctx) else: return path
21,030
def NBAccuracy(features_train, labels_train, features_test, labels_test): """ compute the accuracy of your Naive Bayes classifier """ from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score # create classifier clf = GaussianNB() # fit the classifier on the training features and labels timeit(lambda: clf.fit(features_train, labels_train), "fit") # use the trained classifier to predict labels for the test features labels_pred = timeit(lambda: clf.predict(features_test), "predict") # calculate and return the accuracy on the test data # this is slightly different than the example, # where we just print the accuracy # you might need to import an sklearn module accuracy = accuracy_score(labels_test, labels_pred) return accuracy
21,031
async def test_write_diff_file(snapshot, tmp_path): """ Test that a diff file is written correctly. """ (tmp_path / "history").mkdir() with open(TEST_DIFF_PATH, "r") as f: diff = json.load(f) await write_diff_file(tmp_path, "foo", "1", diff) path = tmp_path / "history" / "foo_1.json" with open(path, "r") as f: assert json.load(f) == snapshot
21,032
def _remove(ctx, config, remote, debs): """ Removes Debian packages from remote, rudely TODO: be less rude (e.g. using --force-yes) :param ctx: the argparse.Namespace object :param config: the config dict :param remote: the teuthology.orchestra.remote.Remote object :param debs: list of packages names to install """ log.info("Removing packages: {pkglist} on Debian system.".format( pkglist=", ".join(debs))) # first ask nicely remote.run( args=[ 'for', 'd', 'in', ] + debs + [ run.Raw(';'), 'do', 'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes', '-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw( 'Dpkg::Options::="--force-confold"'), 'purge', run.Raw('$d'), run.Raw('||'), 'true', run.Raw(';'), 'done', ]) # mop up anything that is broken remote.run( args=[ 'dpkg', '-l', run.Raw('|'), # Any package that is unpacked or half-installed and also requires # reinstallation 'grep', '^.\(U\|H\)R', run.Raw('|'), 'awk', '{print $2}', run.Raw('|'), 'sudo', 'xargs', '--no-run-if-empty', 'dpkg', '-P', '--force-remove-reinstreq', ]) # then let apt clean up remote.run( args=[ 'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes', '-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw( 'Dpkg::Options::="--force-confold"'), 'autoremove', ], )
21,033
def loadPlugin(filename : str): """Loads the given Python file as a FSLeyes plugin. """ # strip underscores to handle e.g. __init__.py, # as pkg_resources might otherwise have trouble name = op.splitext(op.basename(filename))[0].strip('_') modname = 'fsleyes_plugin_{}'.format(name) mod = _importModule(filename, modname) _registerEntryPoints(name, mod, True)
21,034
def main(): """Command line entry point""" opt_short = 'c:dvVh' opt_long = ['config=', 'debug' , 'verbose', 'version', 'help'] try: opts, args = getopt.getopt(sys.argv[1:], opt_short, opt_long) except getopt.GetoptError, e: fatal(e) options = { 'config_file': None, 'verbose': False, 'debug': False, } for name, value in opts: if name in ('-h', '--help'): print usage.format(os.path.basename(sys.argv[0])) sys.exit(0) elif name in ('-c', '--config'): options['config_file'] = value elif name in ('-d', '--debug'): options['debug'] = True elif name in ('-v', '--verbose'): options['verbose'] = True elif name in ('-V', '--version'): print USER_AGENT sys.exit(0) # Check for mandatory options if not options['config_file']: fatal("You must specify a configuration file") # Set the global logging level root_log = logging.getLogger() if options['debug']: root_log.setLevel(logging.DEBUG) elif options['verbose']: root_log.setLevel(logging.INFO) else: root_log.setLevel(logging.WARNING) start_bot(options)
21,035
def verify(body): # noqa: E501 """verify Verifies user with given user id. # noqa: E501 :param body: User id that is required for verification. :type body: dict | bytes :rtype: UserVerificationResponse """ if connexion.request.is_json: body = VerifyUser.from_dict(connexion.request.get_json()) # noqa: E501 user_id = body.user_id user_json = store.value_of(user_id) if user_json == None: response = Error(code=400, message="Invalid user id.") return response, 400 user_dict = json.loads(user_json) user = User.from_dict(user_dict) texts = get_texts(user_id) if not texts: response = Error( code=400, message="Can not recognize characters from identity card." ) return response, 400 language = body.language doc_text_label = get_doc(texts, language=language) user_text_label = create_user_text_label(user) text_validation_point = validate_text_label(doc_text_label, user_text_label) print("text_validation_point: " + str(text_validation_point)) names = recognize_face(user_id) if not names: response = Error( code=400, message="Can not recognize face from identity card." ) return response, 400 face_validation_point = point_on_recognition(names, user_id) print("face_validation_point: " + str(face_validation_point)) verification_rate = text_validation_point + face_validation_point response = UserVerificationResponse( code=200, verification_rate=verification_rate ) return response, 200 else: error = Error(code=400, message="Provide a json payload that contains userId") return error, 400
21,036
def load_interface(interface_name, data): """ Load an interface :param interface_name: a string representing the name of the interface :param data: a dictionary of arguments to be used for initializing the interface :return: an Interface object of the appropriate type """ if interface_name not in _interfaces: raise Exception('Unknown interface') return load_class_from_data(_interfaces[interface_name], data)
21,037
def labotter_in(client: BaseClient): """らぼいん!""" msg = "らぼいんに失敗したっぽ!(既に入っているかもしれないっぽ)" user_id = client.get_send_user() flag, start_time = labo_in(user_id) if flag: msg = "らぼいんしたっぽ! \nいん時刻: {}".format(start_time) client.post(msg)
21,038
def as_observation_matrix(cnarr, variants=None): """Extract HMM fitting values from `cnarr`. For each chromosome arm, extract log2 ratios as a numpy array. Future: If VCF of variants is given, or 'baf' column has already been added to `cnarr` from the same, then the BAF values are a second row/column in each numpy array. Returns: List of numpy.ndarray, one per chromosome arm. """ # TODO incorporate weights -- currently handled by smoothing # TODO incorporate inter-bin distances observations = [arm.log2.values for _c, arm in cnarr.by_arm()] return observations
21,039
def test_generate_stl(): """Check generator builds all STL files""" root = "tests/test_data/model" gen = Generator(root) tw = TreeWalker(root, "scad", None) tw.clean("stl") gen.process_all("stl") assert os.path.isfile(os.path.join(root, "wing/wing.stl"))
21,040
def get_full_word(*args): """get_full_word(ea_t ea) -> ulonglong""" return _idaapi.get_full_word(*args)
21,041
def full_setup(battery_chemistry): """This function gets the baseline vehicle and creates modifications for different configurations, as well as the mission and analyses to go with those configurations.""" # Collect baseline vehicle data and changes when using different configuration settings vehicle = vehicle_setup() # Modify Battery net = vehicle.networks.battery_propeller bat = net.battery if battery_chemistry == 'NMC': bat = SUAVE.Components.Energy.Storages.Batteries.Constant_Mass.Lithium_Ion_LiNiMnCoO2_18650() elif battery_chemistry == 'LFP': bat = SUAVE.Components.Energy.Storages.Batteries.Constant_Mass.Lithium_Ion_LiFePO4_18650() bat.mass_properties.mass = 500. * Units.kg bat.max_voltage = 500. initialize_from_mass(bat) # Assume a battery pack module shape. This step is optional but # required for thermal analysis of the pack number_of_modules = 10 bat.module_config.total = int(np.ceil(bat.pack_config.total/number_of_modules)) bat.module_config.normal_count = int(np.ceil(bat.module_config.total/bat.pack_config.series)) bat.module_config.parallel_count = int(np.ceil(bat.module_config.total/bat.pack_config.parallel)) net.battery = bat net.battery = bat net.voltage = bat.max_voltage configs = configs_setup(vehicle) # Get the analyses to be used when different configurations are evaluated configs_analyses = analyses_setup(configs) # Create the mission that will be flown mission = mission_setup(configs_analyses, vehicle) missions_analyses = missions_setup(mission) # Add the analyses to the proper containers analyses = SUAVE.Analyses.Analysis.Container() analyses.configs = configs_analyses analyses.missions = missions_analyses return configs, analyses
21,042
def sph_yn_exact(n, z): """Return the value of y_n computed using the exact formula. The expression used is http://dlmf.nist.gov/10.49.E4 . """ zm = mpmathify(z) s1 = sum((-1)**k*_a(2*k, n)/zm**(2*k+1) for k in xrange(0, int(n/2) + 1)) s2 = sum((-1)**k*_a(2*k+1, n)/zm**(2*k+2) for k in xrange(0, int((n-1)/2) + 1)) return -cos(zm - n*pi/2)*s1 + sin(zm - n*pi/2)*s2
21,043
def f_bis(n1 : float, n2 : float, n3 : float) -> str: """ ... cf ci-dessus ... """ if n1 < n2: if n2 < n3: return 'cas 1' elif n1 < n3: return 'cas 2' else: return 'cas 5' elif n1 < n3: return 'cas 3' elif n2 < n3: return 'cas 4' else: return 'cas 6'
21,044
def calc_B_effective(*B_phasors): """It calculates the effective value of the magnetic induction field B (microTesla) in a given point, considering the magnetic induction of all the cables provided. Firstly, the function computes the resulting real and imaginary parts of the x and y magnetic induction field components considering all the contributing cables given as input (typically three or six cables). The 'B_components' 2x2 numpy matrix indicates this intermediate step. Secondly, the module of the effective magnetic induction field B is calculated as the squared root of the sum of the squares of the components mentioned above. Lastly, the result is transformed from Tesla units to micro Tesla units. Parameters ------------------- *B_phasors : numpy.ndarray Respectively the real and imaginary part (columns) of the x and y components (rows) of the magnetic induction field B produced by a single cable in a given point Returns ------------------- B_effective_microT : float Effective magnetic induction field B (microTesla) calculated in the given point Notes ------------------- The current function implements the calculations present both in [1]_"Norma Italiana CEI 106-11" formulas (3-4) and [2]_"Norma Italiana CEI 211-4" formulas (17). References ------------------- ..[1] Norma Italiana CEI 106-11, "Guide for the determination of the respect widths for power lines and substations according to DPCM 8 July 2003 (Clause 6) - Part 1: Overhead lines and cables", first edition, 2006-02. ..[2] Norma Italiana CEI 211-4, "Guide to calculation methods of electric and magnetic fields generated by power-lines and electrical substations", second edition, 2008-09. """ B_components = 0 for B_phasor in B_phasors: B_components += B_phasor B_effective_T = np.sqrt(np.sum(B_components**2)) B_effective_microT = B_effective_T*10**(6) return B_effective_microT
21,045
def elision_count(l): """Returns the number of elisions in a given line Args: l (a bs4 <line>): The line Returns: (int): The number of elisions """ return sum([(1 if _has_elision(w) else 0) for w in l("word")])
21,046
def test_enhanced_list_method(service2): """ service2 implements an enhanced list_example_models method using a custom "list" method name. """ container = service2.container record_1 = {'id': 1, 'name': 'Bob Dobalina'} # write through the service with entrypoint_hook( container, "create_example_model" ) as create_example_model: result = create_example_model(record_1) assert result == record_1 # call the list method with entrypoint_hook( container, "list_example_models" ) as list_example_models: results = list_example_models() assert results[0]['id'] == 1 assert results[0]['name'] == 'Bob Dobalina' assert results[0]['more'] == 'data'
21,047
def get_final_df(model, data): """ This function takes the `model` and `data` dict to construct a final dataframe that includes the features along with true and predicted prices of the testing dataset """ # if predicted future price is higher than the current, # then calculate the true future price minus the current price, to get the buy profit buy_profit = lambda current, pred_future, true_future: true_future - current if pred_future > current else 0 # if the predicted future price is lower than the current price, # then subtract the true future price from the current price sell_profit = lambda current, pred_future, true_future: current - true_future if pred_future < current else 0 X_test = data["X_test"] y_test = data["y_test"] # perform prediction and get prices y_pred = model.predict(X_test) if SCALE: y_test = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(np.expand_dims(y_test, axis=0))) y_pred = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(y_pred)) test_df = data["test_df"] # add predicted future prices to the dataframe test_df[f"adjclose_{LOOKUP_STEP}"] = y_pred # add true future prices to the dataframe test_df[f"true_adjclose_{LOOKUP_STEP}"] = y_test # sort the dataframe by date test_df.sort_index(inplace=True) final_df = test_df # add the buy profit column final_df["buy_profit"] = list(map(buy_profit, final_df["adjclose"], final_df[f"adjclose_{LOOKUP_STEP}"], final_df[f"true_adjclose_{LOOKUP_STEP}"]) # since we don't have profit for last sequence, add 0's ) # add the sell profit column final_df["sell_profit"] = list(map(sell_profit, final_df["adjclose"], final_df[f"adjclose_{LOOKUP_STEP}"], final_df[f"true_adjclose_{LOOKUP_STEP}"]) # since we don't have profit for last sequence, add 0's ) return final_df
21,048
def read_fragment_groups(input_string,natoms,num_channels): """ read in the fragment groups for each channel """ inp_line = _get_integer_line(input_string,'FragmentGroups',natoms) assert inp_line is not None out=' '.join(inp_line) return out
21,049
def _assertVolume(volume: float) -> None: """ Check if the volume in between 0.0 to 1.0 """ if volume < 0.0 or 1.0 < volume: raise InvalidVolumeError(volume)
21,050
def save(program, model_path, protocol=4, **configs): """ :api_attr: Static Graph This function save parameters, optimizer information and network description to model_path. The parameters contains all the trainable Tensor, will save to a file with suffix ".pdparams". The optimizer information contains all the Tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. All the information will save to a file with suffix ".pdopt". (If the optimizer have no Tensor need to save (like SGD), the fill will not generated). The network description is the description of the program. It's only used for deployment. The description will save to a file with a suffix ".pdmodel". Args: program(Program) : The program to saved. model_path(str): the file prefix to save the program. The format is "dirname/file_prefix". If file_prefix is empty str. A exception will be raised protocol(int, optional): The protocol version of pickle module must be greater than 1 and less than 5. Default: 4 configs(dict, optional) : optional keyword arguments. Returns: None Examples: .. code-block:: python import paddle import paddle.static as static paddle.enable_static() x = static.data(name="x", shape=[10, 10], dtype='float32') y = static.nn.fc(x, 10) z = static.nn.fc(y, 10) place = paddle.CPUPlace() exe = static.Executor(place) exe.run(static.default_startup_program()) prog = static.default_main_program() static.save(prog, "./temp") """ base_name = os.path.basename(model_path) assert base_name != "", \ "The input model_path MUST be format of dirname/filename [dirname\\filename in Windows system], but received model_path is empty string." if 'pickle_protocol' in configs: protocol = configs['pickle_protocol'] warnings.warn( "'pickle_protocol' is a deprecated argument. Please use 'protocol' instead." ) if not isinstance(protocol, int): raise ValueError("The 'protocol' MUST be `int`, but received {}".format( type(protocol))) if protocol < 2 or protocol > 4: raise ValueError("Expected 1<'protocol'<5, but received protocol={}". format(protocol)) dir_name = os.path.dirname(model_path) if dir_name and not os.path.exists(dir_name): os.makedirs(dir_name) def get_tensor(var): t = global_scope().find_var(var.name).get_tensor() return np.array(t) parameter_list = list(filter(is_parameter, program.list_vars())) param_dict = {p.name: get_tensor(p) for p in parameter_list} param_dict = _unpack_saved_dict(param_dict, protocol) # When value of dict is lager than 4GB ,there is a Bug on 'MAC python3' if sys.platform == 'darwin' and sys.version_info.major == 3: pickle_bytes = pickle.dumps(param_dict, protocol=protocol) with open(model_path + ".pdparams", 'wb') as f: max_bytes = 2**30 for i in range(0, len(pickle_bytes), max_bytes): f.write(pickle_bytes[i:i + max_bytes]) else: with open(model_path + ".pdparams", 'wb') as f: pickle.dump(param_dict, f, protocol=protocol) optimizer_var_list = list( filter(is_belong_to_optimizer, program.list_vars())) opt_dict = {p.name: get_tensor(p) for p in optimizer_var_list} with open(model_path + ".pdopt", 'wb') as f: pickle.dump(opt_dict, f, protocol=protocol) main_program = program.clone() program.desc.flush() main_program.desc._set_version() paddle.fluid.core.save_op_version_info(program.desc) with open(model_path + ".pdmodel", "wb") as f: f.write(program.desc.serialize_to_string())
21,051
def did_discover_device(odrive, logger, app_shutdown_token): """ Handles the discovery of new devices by displaying a message and making the device available to the interactive console """ serial_number = odrive.serial_number if hasattr(odrive, 'serial_number') else "[unknown serial number]" if serial_number in discovered_devices: verb = "Reconnected" index = discovered_devices.index(serial_number) else: verb = "Connected" discovered_devices.append(serial_number) index = len(discovered_devices) - 1 interactive_name = "odrv" + str(index) # Publish new ODrive to interactive console interactive_variables[interactive_name] = odrive globals()[interactive_name] = odrive # Add to globals so tab complete works logger.notify("{} to ODrive {:012X} as {}".format(verb, serial_number, interactive_name)) # Subscribe to disappearance of the device odrive.__channel__._channel_broken.subscribe(lambda: did_lose_device(interactive_name, logger, app_shutdown_token))
21,052
def retrain_different_dataset(index): """ This function is to evaluate all different datasets in the model with one function call """ from utils.helper_functions import load_flags data_set_list = ["Peurifoy"] # data_set_list = ["Chen"] # data_set_list = ["Yang"] #data_set_list = ["Peurifoy","Chen","Yang_sim"] for eval_model in data_set_list: flags = load_flags(os.path.join("models", eval_model+"_best_model")) flags.model_name = "retrain" + str(index) + eval_model flags.train_step = 500 flags.test_ratio = 0.2 training_from_flag(flags)
21,053
def pad_tile_on_edge(tile, tile_row, tile_col, tile_size, ROI): """ add the padding to the tile on the edges. If the tile's center is outside of ROI, move it back to the edge Args: tile: tile value tile_row: row number of the tile relative to its ROI tile_col: col number of the tile relative to its ROI tile_size: default tile size which may be different from the input tile ROI: ROI value which contains the input tile Return: the padded tile """ tile_height, tile_width, tile_channel = tile.shape tile_row_lower = tile_row tile_row_upper = tile_row + tile_height tile_col_lower = tile_col tile_col_upper = tile_col + tile_width # if the tile's center is outside of ROI, move it back to the edge, # and then add the padding if tile_height < tile_size / 2: tile_row_lower = tile_row_upper - tile_size // 2 tile_height = tile_size // 2 if tile_width < tile_size / 2: tile_col_lower = tile_col_upper - tile_size // 2 tile_width = tile_size // 2 tile = ROI[tile_row_lower: tile_row_upper, tile_col_lower: tile_col_upper, ] padding = ((0, tile_size - tile_height), (0, tile_size - tile_width), (0, 0)) return np.pad(tile, padding, "reflect")
21,054
def move_away_broken_database(dbfile: str) -> None: """Move away a broken sqlite3 database.""" isotime = dt_util.utcnow().isoformat() corrupt_postfix = f".corrupt.{isotime}" _LOGGER.error( "The system will rename the corrupt database file %s to %s in order to allow startup to proceed", dbfile, f"{dbfile}{corrupt_postfix}", ) for postfix in SQLITE3_POSTFIXES: path = f"{dbfile}{postfix}" if not os.path.exists(path): continue os.rename(path, f"{path}{corrupt_postfix}")
21,055
def calc_kfold_score(model, df, y, n_splits=3, shuffle=True): """ Calculate crossvalidation score for the given model and data. Uses sklearn's KFold with shuffle=True. :param model: an instance of sklearn-model :param df: the dataframe with training data :param y: dependent value :param n_splits: the amount of splits (i.e. K in K-fold) :param shuffle: whether to shuffle or not :return: mean, std """ kf = KFold(n_splits=n_splits, shuffle=shuffle) scores = list(calc_kfold_score_helper(model, kf, df, y)) mean = np.mean(scores) std = np.std(scores) return mean, std
21,056
def obter_forca (unidade): """Esta funcao devolve a forca de ataque da unidade dada como argumento""" return unidade[2]
21,057
def get_default_configuration(cookiecutter_json: CookiecutterJson) -> Dict[str, str]: """ Get the default values for the cookiecutter configuration. """ default_options = dict() for key, value in cookiecutter_json.items(): if isinstance(value, str) and "{{" not in value: # ignore templated values default_options[key] = value elif isinstance(value, list): assert len(value) > 0, "Option list must have at least one element" default_options[key] = value[0] return default_options
21,058
def test_normalize_attribute_name(name): """Test the attribute name normalization.""" normalized = normalize_attribute_name(name) assert ( normalized.isidentifier() ), f'Attribute "{name}" was not normalized to a valid identifier! (Normalized: "{normalized}")' assert not iskeyword( normalized ), f'Attribute "{name}" was normalized to a python keyword!'
21,059
def random_nodes_generator(num_nodes, seed=20): """ :param int num_nodes: An Integer denoting the number of nodes :param int seed: (Optional) Integer specifying the seed for controlled randomization. :return: A dictionary containing the coordinates. :rtype: dict """ np.random.seed(seed) max_coord_val = num_nodes num_coord_grid = max_coord_val * max_coord_val index = np.arange(max_coord_val * max_coord_val) np.random.shuffle(index) random_slice_start = np.random.randint(0, num_coord_grid - num_nodes) coord_index = index[random_slice_start:random_slice_start + num_nodes] x_array = np.arange(max_coord_val).repeat(max_coord_val) y_array = np.tile(np.arange(max_coord_val), max_coord_val) node_coord = np.empty((num_nodes, 2), dtype=np.int32) node_coord[:, 0] = x_array[coord_index] node_coord[:, 1] = y_array[coord_index] node_dict = {} for i in range(num_nodes): node_dict[i] = (x_array[coord_index[i]], y_array[coord_index[i]]) return node_dict
21,060
def OpenDocumentTextMaster(): """ Creates a text master document """ doc = OpenDocument('application/vnd.oasis.opendocument.text-master') doc.text = Text() doc.body.addElement(doc.text) return doc
21,061
def draw_cell(cell, color=COLOR.GREEN.value): """ 绘制一个像素点 :param cell: 像素点位置 :param color: 像素点颜色 """ (x, y) = cell x = x * CELL_SIZE y = y * CELL_SIZE outer_rect = pygame.Rect(x, y, CELL_SIZE, CELL_SIZE) padding_rect = pygame.Rect(x + 2, y + 2, CELL_SIZE - 4, CELL_SIZE - 4) inner_rect = pygame.Rect(x + 4, y + 4, CELL_SIZE - 8, CELL_SIZE - 8) pygame.draw.rect(screen, color, outer_rect) pygame.draw.rect(screen, COLOR.BG_COLOR.value, padding_rect) pygame.draw.rect(screen, color, inner_rect)
21,062
def get_visible_desktops(): """ Returns a list of visible desktops. The first desktop is on Xinerama screen 0, the second is on Xinerama screen 1, etc. :return: A list of visible desktops. :rtype: util.PropertyCookie (CARDINAL[]/32) """ return util.PropertyCookie(util.get_property(root, '_NET_VISIBLE_DESKTOPS'))
21,063
def get_env_string(env_key, fallback): """ reads boolean literal from environment. (does not use literal compilation as far as env returns always a string value Please note that 0, [], {}, '' treats as False :param str env_key: key to read :param str fallback: fallback value :rtype: str :return: environment value typed in string """ assert isinstance(fallback, str), "fallback should be str instance" return os.environ.get(env_key) or fallback
21,064
def spherical_noise( gridData=None, order_max=8, kind="complex", spherical_harmonic_bases=None ): """Returns order-limited random weights on a spherical surface. Parameters ---------- gridData : io.SphericalGrid SphericalGrid containing azimuth and colatitude order_max : int, optional Spherical order limit [Default: 8] kind : {'complex', 'real'}, optional Spherical harmonic coefficients data type [Default: 'complex'] spherical_harmonic_bases : array_like, optional Spherical harmonic base coefficients (not yet weighted by spatial sampling grid) [Default: None] Returns ------- noisy_weights : array_like, complex Noisy weights """ if spherical_harmonic_bases is None: if gridData is None: raise TypeError( "Either a grid or the spherical harmonic bases have to be provided." ) gridData = SphericalGrid(*gridData) spherical_harmonic_bases = sph_harm_all( order_max, gridData.azimuth, gridData.colatitude, kind=kind ) else: order_max = _np.int(_np.sqrt(spherical_harmonic_bases.shape[1]) - 1) return _np.inner( spherical_harmonic_bases, _np.random.randn((order_max + 1) ** 2) + 1j * _np.random.randn((order_max + 1) ** 2), )
21,065
def rollback_command(): """Command to perform a rollback fo the repo.""" return Command().command(_rollback_command).require_clean().require_migration().with_database()
21,066
def circumcenter(vertices): """ Compute the circumcenter of a triangle (the center of the circle which passes through all the vertices of the triangle). :param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the space)). :returns: The triangle circumcenter. :rtype: n-dimensional vector """ # Compute trilinear coordinates trilinear = numpy.zeros(3) for i in range(3): trilinear[i] = numpy.cos(angle(vertices, i)) bary = trilinear_to_barycentric(trilinear, vertices) return barycentric_to_cartesian(bary, vertices)
21,067
def run(): """Run example for Doc-2-Vec method and IMDB dataset.""" log.info('START') data = {'test-neg.txt': 'TEST_NEG', 'test-pos.txt': 'TEST_POS', 'train-neg.txt': 'TRAIN_NEG', 'train-pos.txt': 'TRAIN_POS', 'train-unsup.txt': 'TRAIN_UNS'} data = {join(IMDB_MERGED_PATH, k): v for k, v in data.iteritems()} sentences = Doc2VecGenerator(data) vector_size = 400 models_path = '/datasets/amazon-data/csv/models/doc2vec/' if not exists(models_path): makedirs(models_path) log.info('Directory: {} has been created'.format(models_path)) f_name = 'imdb-{}.d2v'.format(vector_size) f_model = join(models_path, f_name) log.info('Model Load or Save') if isfile(f_model): model = Doc2Vec.load(f_model) log.info('Model has been loaded from: {}'.format(f_model)) else: cores = multiprocessing.cpu_count() model = Doc2Vec(min_count=1, window=10, size=vector_size, sample=1e-4, negative=5, workers=cores) model.build_vocab(sentences.to_array()) log.info('Epochs') for epoch in range(10): log.info('EPOCH: #{}'.format(epoch)) model.train(sentences.sentences_perm()) model.save(f_model) log.info('Sentiment') train_arrays = numpy.zeros((25000, vector_size)) train_labels = numpy.zeros(25000) for i in range(12500): log.debug('TRAIN_{}'.format(i)) prefix_train_pos = 'TRAIN_POS_' + str(i) prefix_train_neg = 'TRAIN_NEG_' + str(i) train_arrays[i] = model.docvecs[prefix_train_pos] train_arrays[12500 + i] = model.docvecs[prefix_train_neg] train_labels[i] = 1 train_labels[12500 + i] = 0 test_arrays = numpy.zeros((25000, vector_size)) test_labels = numpy.zeros(25000) for i in range(12500): log.debug('TEST_{}'.format(i)) prefix_test_pos = 'TEST_POS_' + str(i) prefix_test_neg = 'TEST_NEG_' + str(i) test_arrays[i] = model.docvecs[prefix_test_pos] test_arrays[12500 + i] = model.docvecs[prefix_test_neg] test_labels[i] = 1 test_labels[12500 + i] = 0 log.info('Fitting') classifiers = { 'BernoulliNB': BernoulliNB(), 'GaussianNB': GaussianNB(), 'DecisionTreeClassifier': DecisionTreeClassifier(), 'AdaBoostClassifier': AdaBoostClassifier(), 'RandomForestClassifier': RandomForestClassifier(), 'LogisticRegression': LogisticRegression(), 'SVC': SVC(), 'LinearSVC': LinearSVC() } results = {} for classifier_name, classifier in classifiers.iteritems(): log.info('Clf: {}'.format(classifier_name)) classifier.fit(train_arrays, train_labels) # # LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, # intercept_scaling=1, penalty='l2', random_state=None, # tol=0.0001) result = classifier.score(test_arrays, test_labels) log.info('Clf acc: {}'.format(result)) results[classifier_name] = result log.info(results) with open(models_path + 'results-{}'.format(f_name)) as res: pickle.dump(results, res)
21,068
def testing_submodules_repo(testing_workdir, request): """Initialize a new git directory with two submodules.""" subprocess.check_call(['git', 'init']) # adding a commit for a readme since git diff behaves weird if # submodules are the first ever commit subprocess.check_call(['touch', 'readme.txt']) with open('readme.txt', 'w') as readme: readme.write('stuff') subprocess.check_call(['git', 'add', '.']) subprocess.check_call(['git', 'commit', '-m', 'Added readme']) subprocess.check_call(['git', 'submodule', 'add', 'https://github.com/conda-forge/conda-feedstock.git']) subprocess.check_call(['git', 'submodule', 'add', 'https://github.com/conda-forge/conda-build-feedstock.git']) subprocess.check_call(['git', 'add', '.']) subprocess.check_call(['git', 'commit', '-m', 'Added conda and cb submodules']) # a second commit, for testing trips back in history subprocess.check_call(['git', 'submodule', 'add', 'https://github.com/conda-forge/conda-build-all-feedstock.git']) subprocess.check_call(['git', 'add', '.']) subprocess.check_call(['git', 'commit', '-m', 'Added cba submodule']) return testing_workdir
21,069
def get_ROC_curve_naive(values, classes): """ Naive implementation of a ROC curve generator that iterates over a number of thresholds. """ # get number of positives and negatives: n_values = len(values); totalP = len(np.where(classes > 0)[0]); totalN = n_values - totalP; min_val = np.min(values); max_val = np.max(values); thresholds = np.arange(min_val, max_val, 1.0); n_thresholds = len(thresholds); TP = np.zeros([n_thresholds, 1]); FP = np.zeros([n_thresholds, 1]); for t in range(n_thresholds): inds = np.where(values >= thresholds[t]); P = np.sum(classes[inds[0]]); TP[t] = P / totalP; F = len(inds[0]) - P; FP[t] = F / totalN; return TP, FP;
21,070
def worker(path, opt): """Worker for each process. Args: path (str): Image path. opt (dict): Configuration dict. It contains: crop_size (int): Crop size. step (int): Step for overlapped sliding window. thresh_size (int): Threshold size. Patches whose size is smaller than thresh_size will be dropped. save_folder (str): Path to save folder. compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION. Returns: process_info (str): Process information displayed in progress bar. """ crop_size = opt['crop_size'] step = opt['step'] thresh_size = opt['thresh_size'] img_name, extension = osp.splitext(osp.basename(path)) # remove the x2, x3, x4 and x8 in the filename for DIV2K img_name = re.sub('x[2348]', '', img_name) img = mmcv.imread(path, flag='unchanged') if img.ndim == 2 or img.ndim == 3: h, w = img.shape[:2] else: raise ValueError(f'Image ndim should be 2 or 3, but got {img.ndim}') h_space = np.arange(0, h - crop_size + 1, step) if h - (h_space[-1] + crop_size) > thresh_size: h_space = np.append(h_space, h - crop_size) w_space = np.arange(0, w - crop_size + 1, step) if w - (w_space[-1] + crop_size) > thresh_size: w_space = np.append(w_space, w - crop_size) index = 0 for x in h_space: for y in w_space: index += 1 cropped_img = img[x:x + crop_size, y:y + crop_size, ...] cv2.imwrite( osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img, [cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']]) process_info = f'Processing {img_name} ...' return process_info
21,071
def RobotNet(images,dropout): """ Build the model for Robot where it will be used as RobotNet. Args: images: 4-D tensor with shape [batch_size, height, width, channals]. dropout: A Python float. The probability that each element is kept. Returns: Output tensor with the computed classes. """ # _X = tf.reshape(images, shape=[-1, IMAGE_HEIGTH, IMAGE_WIDTH, IMAGE_CHANNAL]) # X = tf.cast(_X, tf.float32) X = tf.cast(images, tf.float32) weights1=tf.Variable(tf.random_normal([11, 11, 3, 96],stddev=0.01)) biases1=tf.Variable(tf.zeros([96])) conv1 = conv2d('conv1', X, weights1, biases1,stride=[4,4],padding='SAME') norm1 = norm('norm1', conv1, lsize=2) pool1= max_pool('pool1', norm1, 3, 2) weights2=tf.Variable(tf.random_normal([5, 5, 96, 256],stddev=0.01)) biases2=tf.Variable(tf.constant(0.1,shape=[256])) conv2 = conv2d('conv2', pool1, weights2, biases2,stride=[1,1],padding='SAME') norm2 = norm('norm2', conv2, lsize=2) pool2= max_pool('pool2', norm2, 3, 2) weights3=tf.Variable(tf.random_normal([3, 3, 256, 384],stddev=0.01)) biases3=tf.Variable(tf.zeros([384])) conv3 = conv2d('conv3', pool2, weights3, biases3,stride=[1,1],padding='SAME') weights4=tf.Variable(tf.random_normal([3, 3, 384, 384],stddev=0.01)) biases4=tf.Variable(tf.constant(0.1,shape=[384])) conv4 = conv2d('conv4', conv3, weights4, biases4,stride=[1,1],padding='SAME') weights5=tf.Variable(tf.random_normal([3, 3, 384, 256],stddev=0.01)) biases5=tf.Variable(tf.constant(0.1,shape=[256])) conv5 = conv2d('conv5', conv4, weights5, biases5,stride=[1,1],padding='SAME') pool5= max_pool('pool5', conv5, 3, 2) p_h=pool5.get_shape().as_list()[1] p_w=pool5.get_shape().as_list()[2] print('p_h:',p_h) print('p_w:',p_w) weights6=tf.Variable(tf.random_normal([p_h*p_w*256, 4096],stddev=0.005)) biases6=tf.Variable(tf.constant(0.1,shape=[4096])) dense1 = tf.reshape(pool5, [-1, weights6.get_shape().as_list()[0]]) fc6= tf.nn.relu(tf.matmul(dense1, weights6) + biases6, name='fc6') drop6=tf.nn.dropout(fc6, dropout) weights7=tf.Variable(tf.random_normal([4096, 4096],stddev=0.005)) biases7=tf.Variable(tf.constant(0.1,shape=[4096])) fc7= tf.nn.relu(tf.matmul(drop6, weights7) + biases7, name='fc7') drop7=tf.nn.dropout(fc7, dropout) weights8=tf.Variable(tf.random_normal([4096, 2],stddev=0.01)) biases8=tf.Variable(tf.zeros([2])) net_out= tf.matmul(drop7, weights8) + biases8 saver = tf.train.Saver({v.op.name: v for v in [weights1,biases1,weights2,biases2,weights3,biases3, weights4,biases4,weights5,biases5,weights6,biases6, weights7,biases7,weights8,biases8]}) return net_out,saver
21,072
def cvReleaseMemStorage(*args): """cvReleaseMemStorage(PyObject obj)""" return _cv.cvReleaseMemStorage(*args)
21,073
def run_config_filename(conf_filename): """ Runs xNormal using the path to a configuration file. """ retcode = os.system("\"%s\" %s" % (path, conf_filename)) return retcode
21,074
def test_fetchjson_with_destination_int_old(mock_s3): """Test outKey still works for backwards compatibility.""" mock_body = Mock() bunch_of_bytes = bytes(json.dumps([1, 2, 3]), 'utf-8') mock_body.read.return_value = bunch_of_bytes mock_s3.side_effect = [{'Body': mock_body}] context = Context({ 'k1': 'v1', 's3Fetch': { 'clientArgs': {'ck1': 'cv1', 'ck2': 'cv2'}, 'methodArgs': {'Bucket': 'bucket name', 'Key': 'key name', 'SSECustomerAlgorithm': 'sse alg', 'SSECustomerKey': 'sse key'}, 'outKey': 99}, 99: 'blah' }) s3fetchjson.run_step(context) assert context[99] == [1, 2, 3] assert len(context) == 3
21,075
def GetCommitsInOrder( repo: git.Repo, head_ref: str = "HEAD", tail_ref: typing.Optional[str] = None) -> typing.List[git.Commit]: """Get a list of all commits, in chronological order from old to new. Args: repo: The repo to list the commits of. head_ref: The starting point for iteration, e.g. the commit closest to head. tail_ref: The end point for iteration, e.g. the commit closest to tail. This commit is NOT included in the returned values. Returns: A list of git.Commit objects. """ def TailCommitIterator(): stop_commit = repo.commit(tail_ref) for commit in repo.iter_commits(head_ref): if commit == stop_commit: break yield commit if tail_ref: commit_iter = TailCommitIterator() else: commit_iter = repo.iter_commits(head_ref) try: return list(reversed(list(commit_iter))) except git.GitCommandError: # If HEAD is not found, an exception is raised. return []
21,076
def sell(): """Sell shares of stock""" # return apology("TODO") if request.method == 'GET': return render_template('sell_stock.html') else: symbol = request.form['symbol'] shares = int(request.form['shares']) return sell_stock(Symbol=symbol, Shares=shares, id=session['user_id'])
21,077
def return_sw_checked(softwareversion, osversion): """ Check software existence, return boolean. :param softwareversion: Software release version. :type softwareversion: str :param osversion: OS version. :type osversion: str """ if softwareversion is None: serv = bbconstants.SERVERS["p"] softwareversion = networkutils.sr_lookup(osversion, serv) softwareversion, swchecked = sw_check_contingency(softwareversion) else: swchecked = True return softwareversion, swchecked
21,078
def apply( f: tp.Callable[..., None], obj: A, *rest: A, inplace: bool = False, _top_inplace: tp.Optional[bool] = None, _top_level: bool = True, ) -> A: """ Applies a function to all `to.Tree`s in a Pytree. Works very similar to `jax.tree_map`, but its values are `to.Tree`s instead of leaves, also `f` should apply the changes inplace to Tree object. Arguments: f: The function to apply. obj: a pytree possibly containing Trees. *rest: additional pytrees. inplace: If `True`, the input `obj` is mutated. Returns: A new pytree with the updated Trees or the same input `obj` if `inplace` is `True`. """ if _top_inplace is None: _top_inplace = inplace if _top_level: rest = copy(rest) if not inplace: obj = copy(obj) objs = (obj,) + rest def nested_fn(obj, *rest): if isinstance(obj, Tree): apply( f, obj, *rest, inplace=True, _top_inplace=_top_inplace, _top_level=False, ) jax.tree_map( nested_fn, *objs, is_leaf=lambda x: isinstance(x, Tree) and not x in objs, ) if isinstance(obj, Tree): if _top_inplace or obj._mutable: f(obj, *rest) else: with _make_mutable_toplevel(obj): f(obj, *rest) return obj
21,079
def test_equality_inverse(): """Return not equal if the addresses are different""" loc_1 = 'http://example.com/foo_bar.html' loc_2 = 'http://example.com/bar_foo.html' assert URL(loc_1) != URL(loc_2)
21,080
def cv_data_gen(ad_sc, ad_sp, mode='loo'): """ This function generates cross validation datasets Args: ad_sc: AnnData, single cell data ad_sp: AnnData, gene spatial data mode: string, support 'loo' and 'kfold' """ genes_array = np.array(list(set(ad_sc.var.index.values))) if mode == 'loo': cv = LeaveOneOut() elif mode == 'kfold': cv = KFold(n_splits=10) for train_idx, test_idx in cv.split(genes_array): train_genes = genes_array[train_idx] test_genes = list(genes_array[test_idx]) ad_sc_train, ad_sp_train = ad_sc[:, train_genes], ad_sp[:, train_genes] yield ad_sc_train, ad_sp_train, test_genes
21,081
def test_dedup_access_contiguous(): """ A test where there is a non-square shape that, based on whether contiguity is prioritized, might give different results. Subset is: j 6543 21012 3456 _____ ____| |____ 2 | | | | 1 | | | | 0 i |____| |____| 1 |_____| 2 A square of size 5x5, with two 4x3 squares on each side """ @dace.program def datest(A: dace.float64[N, N], B: dace.float64[N, N]): for i, j in dace.map[6:N - 6, 6:N - 6]: tmp = np.ndarray([3], dace.float64) with dace.tasklet: a << A[i - 2:i + 3, j - 2:j + 3] b >> tmp[0] b = a[2, 2] * 5.0 with dace.tasklet: a << A[i - 1:i + 2, j - 6:j + 5] b >> tmp[1] b = a[0, 0] * 4.0 with dace.tasklet: a << A[i - 1:i + 2, j - 2:j + 7] b >> tmp[2] b = a[0, 0] * 3.0 with dace.tasklet: inp << tmp out >> B[i, j] B[i, j] = (inp[0] + inp[1] + inp[2]) / 3.0 # j contiguous dimension sdfg: dace.SDFG = datest.to_sdfg(strict=True) nodes_before = sdfg.node(0).number_of_nodes() assert sdfg.apply_transformations(DeduplicateAccess) == 1 nodes_after = sdfg.node(0).number_of_nodes() assert nodes_after == nodes_before + 2 # i contiguous dimension sdfg: dace.SDFG = datest.to_sdfg(strict=True) sdfg.arrays['A'].strides = [1, N] nodes_before = sdfg.node(0).number_of_nodes() assert sdfg.apply_transformations(DeduplicateAccess) == 1 nodes_after = sdfg.node(0).number_of_nodes() assert nodes_after == nodes_before + 3
21,082
def run(): """ 主程序入口 :return: None """ GFWeather().run()
21,083
def imageSearch(query, top=10): """Returns the decoded json response content :param query: query for search :param top: number of search result """ # set search url query = '%27' + parse.quote_plus(query) + '%27' # web result only base url base_url = 'https://api.datamarket.azure.com/Bing/Search/v1/Image' url = base_url + '?Query=' + query + '&$top=' + str(top) + '&$format=json&ImageFilters=%27Aspect%3ASquare%27' # create credential for authentication user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36" # create auth object auth = HTTPBasicAuth("", API_KEY) # set headers headers = {'User-Agent': user_agent} # get response from search url response_data = requests.get(url, headers=headers, auth=auth) # decode json response content json_result = response_data.json() return json_result['d']['results']
21,084
def pack_inputs(inputs): """Pack a list of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is None, replace it with a special constant tensor. """ inputs = tf.nest.flatten(inputs) outputs = [] for x in inputs: if x is None: outputs.append(tf.constant(0, shape=[], dtype=tf.int32)) else: outputs.append(x) return tuple(outputs)
21,085
def entries_as_dict(month_index): """Convert index xml list to list of dictionaries.""" # Search path findentrylist = etree.ETXPath("//section[@id='month-index']/ul/li") # Extract data entries_xml = findentrylist(month_index) entries = [to_entry_dict(entry_index_xml) for entry_index_xml in entries_xml] return entries
21,086
def resnet50(): """Constructs a ResNet-50 model. """ return Bottleneck, [3, 4, 6, 3]
21,087
def build_service_job_mapping(client, configured_jobs): """ :param client: A Chronos client used for getting the list of running jobs :param configured_jobs: A list of jobs configured in Paasta, i.e. jobs we expect to be able to find :returns: A dict of {(service, instance): last_chronos_job} where last_chronos_job is the latest job matching (service, instance) or None if there is no such job """ service_job_mapping = {} all_chronos_jobs = client.list() for job in configured_jobs: # find all the jobs belonging to each service matching_jobs = chronos_tools.filter_chronos_jobs( jobs=all_chronos_jobs, service=job[0], instance=job[1], include_disabled=True, include_temporary=True, ) matching_jobs = chronos_tools.sort_jobs(matching_jobs) # Only consider the most recent one service_job_mapping[job] = matching_jobs[0] if len(matching_jobs) > 0 else None return service_job_mapping
21,088
def _validate_labels(labels, lon=True): """ Convert labels argument to length-4 boolean array. """ if labels is None: return [None] * 4 which = 'lon' if lon else 'lat' if isinstance(labels, str): labels = (labels,) array = np.atleast_1d(labels).tolist() if all(isinstance(_, str) for _ in array): bool_ = [False] * 4 opts = ('left', 'right', 'bottom', 'top') for string in array: if string in opts: string = string[0] elif set(string) - set('lrbt'): raise ValueError( f'Invalid {which}label string {string!r}. Must be one of ' + ', '.join(map(repr, opts)) + " or a string of single-letter characters like 'lr'." ) for char in string: bool_['lrbt'.index(char)] = True array = bool_ if len(array) == 1: array.append(False) # default is to label bottom or left if len(array) == 2: if lon: array = [False, False, *array] else: array = [*array, False, False] if len(array) != 4 or any(isinstance(_, str) for _ in array): raise ValueError(f'Invalid {which}label spec: {labels}.') return array
21,089
def copy_template(template, new_file, name): """ Args: template: The absolute path to the template. new_file: The absolute path to the new file. name: The name of the new project Returns: None """ if os.path.isfile(new_file): print("Warning: The file {}, was overwritten.".format(new_file)) with open(template, 'r') as temp_file, open(new_file, 'w') as proj_file: for line in temp_file: line = re.sub('mvp', name, line) proj_file.write(line)
21,090
def files_all(args): """Executes all the files commands from individual databases""" for name in [k.name() for k in args.modules]: parsed = args.parser.parse_args([name, 'files']) parsed.func(parsed)
21,091
def celsius_to_fahrenheit(temperature_C): """ converts C -> F """ return temperature_C * 9.0 / 5.0 + 32.0
21,092
def _parse_date_time(date): """Parse time string. This matches 17:29:43. Args: date (str): the date string to be parsed. Returns: A tuple of the format (date_time, nsec), where date_time is a datetime.time object and nsec is 0. Raises: ValueError: if the date format does not match. """ pattern = re.compile( r'^(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})$' ) if not pattern.match(date): raise ValueError('Wrong date format: {}'.format(date)) hour = pattern.search(date).group('hour') minute = pattern.search(date).group('min') sec = pattern.search(date).group('sec') nsec = 0 time = datetime.time(int(hour), int(minute), int(sec)) return time, nsec
21,093
def _output(line, lines, stream=None): """Internal function: Add line to output lines and optionally print to stream.""" lines.append(line) if stream: print(line, file=stream)
21,094
def parsed_user(request, institute_obj): """Return user info""" user_info = { 'email': 'john@doe.com', 'name': 'John Doe', 'location': 'here', 'institutes': [institute_obj['internal_id']], 'roles': ['admin'] } return user_info
21,095
def lastFromUT1(ut1, longitude): """Convert from universal time (MJD) to local apparent sidereal time (deg). Inputs: - ut1 UT1 MJD - longitude longitude east (deg) Returns: - last local apparent sideral time (deg) History: 2002-08-05 ROwen First version, loosely based on the TCC's tut_LAST. 2014-04-25 ROwen Add from __future__ import division, absolute_import and use relative import. """ # convert UT1 to local mean sidereal time, in degrees lmst = lmstFromUT1(ut1, longitude) # find apparent - mean sidereal time, in degrees # note: this wants the TDB date, but UT1 is probably close enough appMinusMean = llv.eqeqx(ut1) / opscore.RO.PhysConst.RadPerDeg # find local apparent sideral time, in degrees, in range [0, 360) return opscore.RO.MathUtil.wrapPos (lmst + appMinusMean)
21,096
def get_dataset(opts): """ Dataset And Augmentation """ train_transform = transform.Compose([ transform.RandomResizedCrop(opts.crop_size, (0.5, 2.0)), transform.RandomHorizontalFlip(), transform.ToTensor(), transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if opts.crop_val: val_transform = transform.Compose([ transform.Resize(size=opts.crop_size), transform.CenterCrop(size=opts.crop_size), transform.ToTensor(), transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) else: # no crop, batch size = 1 val_transform = transform.Compose([ transform.ToTensor(), transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) labels, labels_old, path_base = tasks.get_task_labels(opts.dataset, opts.task, opts.step) labels_cum = labels_old + labels if opts.dataset == 'voc': dataset = VOCSegmentationIncremental elif opts.dataset == 'ade': dataset = AdeSegmentationIncremental else: raise NotImplementedError if opts.overlap: path_base += "-ov" if not os.path.exists(path_base): os.makedirs(path_base, exist_ok=True) train_dst = dataset(root=opts.data_root, train=True, transform=train_transform, labels=list(labels), labels_old=list(labels_old), idxs_path=path_base + f"/train-{opts.step}.npy", masking=not opts.no_mask, overlap=opts.overlap) if not opts.no_cross_val: # if opts.cross_val: train_len = int(0.8 * len(train_dst)) val_len = len(train_dst)-train_len train_dst, val_dst = torch.utils.data.random_split(train_dst, [train_len, val_len]) else: # don't use cross_val val_dst = dataset(root=opts.data_root, train=False, transform=val_transform, labels=list(labels), labels_old=list(labels_old), idxs_path=path_base + f"/val-{opts.step}.npy", masking=not opts.no_mask, overlap=True) image_set = 'train' if opts.val_on_trainset else 'val' test_dst = dataset(root=opts.data_root, train=opts.val_on_trainset, transform=val_transform, labels=list(labels_cum), idxs_path=path_base + f"/test_on_{image_set}-{opts.step}.npy") return train_dst, val_dst, test_dst, len(labels_cum)
21,097
def strip_translations_header(translations: str) -> str: """ Strip header from translations generated by ``xgettext``. Header consists of multiple lines separated from the body by an empty line. """ return "\n".join(itertools.dropwhile(len, translations.splitlines()))
21,098
def test_safety_interlock_during_init(switch_driver, caplog): """ to check if a warning would show when initialize the instrument with a module in safety interlock state. This test has to be placed first if the scope is set to be "module". """ msg = [ x.message for x in caplog.get_records('setup') if x.levelno == logging.WARNING ] assert "safety interlock" in msg[0]
21,099