content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def filter_fires(im): """ Earth engine QA filter for fires """ burn_dates = im.select("BurnDate") valid_dates = burn_dates.gt(0).And(burn_dates.lt(367)) valid_qa = im.select("QA").lte(4) # keep QA values 1-4 (5 is detection over agricultural areas) mask = valid_dates.And(valid_qa) return im.updateMask(mask)
8e5d71058d39e8b5cf4c78f0e50db80d36d343cc
38,131
def attack(encrypt_oracle, decrypt_oracle, iv, c, t): """ Uses a chosen-ciphertext attack to decrypt the ciphertext. :param encrypt_oracle: the encryption oracle :param decrypt_oracle: the decryption oracle :param iv: the initialization vector :param c: the ciphertext :param t: the tag corresponding to the ciphertext :return: the plaintext """ p_ = bytes(16) + iv + c iv_, c_, t_ = encrypt_oracle(p_) c__ = iv + c p__ = decrypt_oracle(iv_, c__, c_[-32:-16]) return p__[16:]
a7ad7b0b3a73c3af9711a51e45a4f2659527f65a
38,132
def word_overlap(left_words, right_words): """Returns the Jaccard similarity between two sets. Note ---- The topics are considered sets of words, and not distributions. Parameters ---------- left_words : set The set of words for first topic right_words : set The set of words for the other topic Returns ------- jaccard_similarity : float """ intersection = len(left_words.intersection(right_words)) union = len(left_words.union(right_words)) jaccard = intersection / union return jaccard
3baa3ec5605bef4658815ac4d546539c480ae4b5
38,133
def shorten(name): """ Shortens a name and adds some ellipses """ if len(name) > 10: name = name[:10] + '...' return name
3b2a0bcdff21d41ff28b99dfbd833cc3e6d7d497
38,134
import os import sys def getPythonCommand(): """ Method to get the prefered python command. @ In, None @ Out, pythonCommand, str, the name of the command to use. """ if os.name == "nt": pythonCommand = "python" else: pythonCommand = sys.executable ## Alternative method. However, if called by run_tests or raven_framework ## sys.executable is already taken into account PYTHON_COMMAND and this ## logic #if sys.version_info.major > 2: # if os.name == "nt": # #Command is python on windows in conda and Python.org install # pythonCommand = "python" # else: # pythonCommand = "python3" #else: # pythonCommand = "python" #pythonCommand = os.environ.get("PYTHON_COMMAND", pythonCommand) return pythonCommand
be40fe72fd0c3ade3c3e8f4691a433c9d0191c2a
38,135
import argparse def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser(description='Process input files') parser.add_argument('-w', '--watson', type=str, default=None, help='watson (top strand) .vcf file input.') parser.add_argument('-c', '--crick', type=str, default=None, help='crick (bottom strand) .vcf file input.') parser.add_argument('-o', '--output', type=str, default=None, help='output custom tabular format') args = parser.parse_args() return args
fbe7583ab1f4d9302209d546730254644da24d61
38,137
def convert_weight(val, old_scale="kg", new_scale="pound"): """ Convert from a weight scale to another one among kg, gram, and pound. Parameters ---------- val: float or int Value of the weight to be converted expressed in the original scale. old_scale: str Original scale from which the weight value will be converted. Supported scales are Kilogram ['Kilogram', 'kilogram', 'kg'], Gram ['Gram', 'gram', 'gr'] or Pound ['Pound', 'pound', 'pd']. new_scale: str New scale from which the weight value will be converted. Supported scales are Kilogram ['Kilogram', 'kilogram', 'kg'], Gram ['Gram', 'gram', 'gr'] or Pound ['Pound', 'pound', 'pd']. Raises ------- NotImplementedError if either of the scales are not one of the requested ones. Returns ------- res: float Value of the converted weight expressed in the new scale. """ # Convert from 'old_scale' to Kg if old_scale.lower() in ['kilogram', 'kg']: temp = val elif old_scale.lower() in ['gram', 'gr']: temp = val / 1000.0 elif old_scale.lower() in ['pound', 'pd']: temp = 0.4535924 * val else: raise AttributeError( f'{old_scale} is unsupported. kg, gr, and pound are supported') # and from kg to 'new_scale' if new_scale.lower() in ['kilogram', 'kg']: result = temp elif new_scale.lower() in ['gram', 'gr']: result = 1000 * temp elif new_scale.lower() in ['pound', 'pd']: result= temp / 0.4535924 else: raise AttributeError( f'{new_scale} is unsupported. kg, gr, and pound are supported') return result
d38fed48ac998b8c21b8dd25fb497479d04e899a
38,138
def list_filter(data, sequence): """过滤试题库列表,保留特定的信息""" new_data = list() for i in range(data.__len__()): if i in sequence: new_data.append(data[i]) return new_data
9381fcdbdc6c21c5a9a062fe02576ef0c43ee5a3
38,139
import itertools def NumOfNonTrivialPairs(n): """Example: (a,b)(c,d) or (a,c)(b,d) are trivial, but (a,d)(b,c) is not.""" assert n == n/2*2 num = 0 for seta in itertools.combinations(range(n), n/2): setb = [i for i in range(n) if i not in seta] altb = [True]*(n/2) for i in range(n/2): if seta[i] < setb[i]: altb[i] = False if any(altb) and not all(altb): num += 1 return num/2
27e1f5caa13431a68e1211f695ae9c6cb1f43564
38,140
def is_chinese(target_str): """ determine whether the word is Chinese Args: target_str (str): target string """ for ch in target_str: if '\u4e00' <= ch <= '\u9fff': return True return False
ee75d9ea6d6e396964f511c10eaeeaf96a836aaa
38,141
from pathlib import Path def find_in_nbs(fullname, path=None): """ lookup a notebook file to import """ nb_path = '/nbs/' + fullname + '.ipynb' if Path(nb_path).is_file(): return nb_path # allow import Notebook_Name for Notebook Name.ipynb nb_path = nb_path.replace("_", " ") if Path(nb_path).is_file(): return nb_path
9e2009311e53c964972ae23eb47c2382e1e0cb57
38,142
import sys def _environment(): """Collect some useful system information""" data = {} data['os'] = sys.platform data['pyversion'] = '{0:x}'.format(sys.hexversion) data['encoding'] = sys.stdout.encoding or sys.getfilesystemencoding() return data
71caf742e766b9f9937243977c0f2093a129b9a4
38,143
def format_record(record): """Format float values to high precision, not in exponential form.""" return [f'{r:.15f}' if isinstance(r, float) else r for r in record]
a4b6b25ac429129d843ef59b25d9ae9fc7531969
38,144
import math def inverseJukesCantor(d): """Takes a substitution distance and calculates the number of expected changes per site (inverse jukes cantor) d = -3/4 * log(1 - 4/3 * p) exp(-4/3 * d) = 1 - 4/3 * p 4/3 * p = 1 - exp(-4/3 * d) p = 3/4 * (1 - exp(-4/3 * d)) """ assert d >= 0.0 return 0.75 * (1 - math.exp(-d * 4.0/3.0))
648f091d2a8daf0b41cf939c007e50f6c0eef52a
38,145
def cnvtol(self, lab="", value="", toler="", norm="", minref="", **kwargs): """Sets convergence values for nonlinear analyses. APDL Command: CNVTOL Parameters ---------- lab Valid convergence labels. If STAT, list the status of the currently specified criteria. value Typical reference value for the specified convergence label (Lab). toler Tolerance; defaults to 0.005 (0.5%) for force and moment, 1.0E-4 (0.01%) for DVOL, 0.05 (5%) for displacement when rotational DOFs are not present, and 0.05 (5%) for HDSP. norm Specifies norm selection: 2 - L2 norm (check SRSS value). Default, except for Lab = U. 1 - L1 norm (check absolute value sum). 0 - Infinite norm (check each DOF separately). Default for Lab = U. minref The minimum value allowed for the program calculated reference value. If negative, no minimum is enforced. Used only if VALUE is blank. Defaults to 0.01 for force, moment, and volume convergence, 1.0E-6 for heat flow, 1.0E-12 for VLTG and CHRG, 1.0E-6 for HDSP, and 0.0 otherwise. Notes ----- This command is usually not needed because the default convergence criteria are sufficient for most nonlinear analyses. In rare cases, you may need to use this command to diagnose convergence difficulties. Values may be set for the degrees of freedom (DOF) and/or the out-of- balance load for the corresponding forcing quantities. Issuing CNVTOL to set a convergence criterion for a specific convergence label (Lab) does not affect the convergence criterion for any other label. All other convergence criteria will remain at their default setting or at the value set by a previous CNVTOL command. When the GUI is on, if a "Delete" operation in a Nonlinear Convergence Criteria dialog box writes this command to a log file (Jobname.LOG or Jobname.LGW), you will observe that Lab is blank, VALUE = -1, and TOLER is an integer number. In this case, the GUI has assigned a value of TOLER that corresponds to the location of a chosen convergence label in the dialog box's list. It is not intended that you type in such a location value for TOLER in an ANSYS session. However, a file that contains a GUI-generated CNVTOL command of this form can be used for batch input or with the /INPUT command. Convergence norms specified with CNVTOL may be graphically tracked while the solution is in process using the ANSYS program's Graphical Solution Tracking (GST) feature. Use the /GST command to turn GST on or off. By default, GST is ON for interactive sessions and OFF for batch runs. This command is also valid in PREP7. """ command = f"CNVTOL,{lab},{value},{toler},{norm},{minref}" return self.run(command, **kwargs)
632a895db755cdf23c88b1df88700cb9c0529f69
38,147
import os def setup_dirs(config): """Set dirs from config, because now we doesn't use hydra. Only for demo notebook""" # model_save_dir model_save_dir = os.path.join(os.path.abspath(config.model_dir), config.model.model_name_or_path, config.data.task_name) # output_dir output_dir = os.path.join(os.path.abspath(config.output_dir), config.data.task_name) # cache_dir cache_dir = os.path.abspath(config.cache_dir) config.model_dir = model_save_dir config.output_dir = output_dir config.cache_dir = cache_dir os.makedirs(config.model_dir, exist_ok=True) os.makedirs(config.output_dir, exist_ok=True) os.makedirs(config.cache_dir, exist_ok=True) return config
b2af330b9bf8fdcf7e1db3dbf3a7c3f22f7beaed
38,148
from typing import Any def find_key(d: dict, key: str) -> Any: """Finds a key nested arbitrarily deeply inside a dictself. Principally useful since the structure of NRPE relation data is not completely reliable. """ if key in d: return d[key] for child in d.values(): if not isinstance(child, dict): continue val = find_key(child, key) if val: return val
e6b176450d25ea1e194019c7d4bdb85d500488ae
38,149
def gather_results(detectors): """ Execute the d.compute method for each given detector. After that the result (d.getDetectorOutput()) method is called and added as value under the key d.getName() to the result which is returned in the end. :param detectors: :return: """ results = {} for d in detectors: d.compute() results[d.getName()] = d.getDetectorOutput() return results
10366cc7880474f54d093c5f4bd8c11b4b454aab
38,150
def _find_label_rows(sheet): """Search excel file column A for cells containing 'Label'. Return a list of zero-indexed rows. """ label_rows = [] for i in range(sheet.nrows): if "Label" in sheet.cell_value(i, 0): label_rows.append(i) return label_rows
5515874500c5ef514df02019e745d609b0474b2f
38,151
def get_request_data(request, keys): """For HTTP functions""" data = {key: None for key in keys} json_data = request.get_json() param_data = request.args for key in keys: if key in json_data: data[key] = json_data[key] elif key in param_data: data[key] = param_data[key] return data
1904bd3083a764b7fa85199d11176423f2143775
38,153
import argparse def _parse_argument(): """Return arguments for Model Freezer for NeuroPilot Model Hub.""" parser = argparse.ArgumentParser( description='Model Freezer for NeuroPilot Model Hub.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( '--in_path', help='Path to input checkpoint.', type=str, default='model.ckpt', required=True) parser.add_argument( '--out_path', help='Path to the output pb.', type=str, default='model.pb', required=True) parser.add_argument( '--out_nodes', help='Output node names.', type=str, default='generator/add_308', required=True) return parser.parse_args()
a1d1505223522e7dfb15a92e8975e1d5f71df044
38,155
import hashlib def hash_all(strs, digest=None): """Returns a hash of the concatenation of all the strings in strs. If a hashlib message digest is not supplied a new sha1 message digest is used. """ digest = digest or hashlib.sha1() for s in strs: digest.update(s) return digest.hexdigest()
585496aaae534d24cba512482765aeb9250ef6b8
38,156
import socket import struct def parse_ipv6(addr): """ Return a numeric representation of the given IPv6 address. """ binary_ip = socket.inet_pton(socket.AF_INET6, addr) high, low = struct.unpack('!QQ', binary_ip) return high << 64 | low
6456d02ae7b4b5eadd2666fad7308ef5547b98dc
38,157
import os import shutil def delete_folder(path): """ Deletes the folder from the given path :param path: path to folder to delete :return: True if the directory was successfully deleted, False otherwise """ if not os.path.isdir(path): return False shutil.rmtree(path) return os.path.isdir(path)
aa40026fe7c36e8eba6a5dddd40744ad9380056d
38,158
def get_protein(chain): """ Get protein (residues without hetero-residues). """ return [residue for residue in chain.get_residues() if residue.full_id[3][0] == ' ']
f5d20e0a7edf15f3a90a4b2c3f889211cb44d87d
38,159
import zlib def text_decompress(text) -> str: """对文本进行解压 Args: text (str or bytes): 待解压文本 Returns: str: 解压后的文本 """ return zlib.decompress(text).decode() if type(text).__name__ == "bytes" else text
933d8fe5ac4f615b60831551deee80fc429171ac
38,160
def is_rect_intersection(minlat, maxlat, minlon, maxlon, latitude, longitude): """ Checks if there is a radial intersection between a point radius boundary and a latitude/longitude point. :param: minlat : the minimum rectangular latitude :type: float :param: maxlat : the maximum rectangular latitude :type: float :param: minlon : the minimum rectangular longitude :type: float :param: maxlon : the maximum rectangular longitude :type: float :param: latitude : the latitude of the point to check :type: float :param: longitude : the longitude of the point to check :type: float """ if minlat is not None and float( minlat) > float(latitude): return False elif minlon is not None and float( minlon) > float(longitude): return False elif maxlat is not None and float( maxlat) < float(latitude): return False elif maxlon is not None and float( maxlon) < float(longitude): return False else: return True
39f872e74a9cf6d77521a5a5666bf8701662ba0c
38,161
import numpy def vectorspaced(document, all_terms): """ :param document: document object :type document: TrainingDocument :returns: distance vector """ return numpy.array([ document.termsWithWeights.get(word, 0) for word in all_terms ])
e95c4b90818e77df874377b1f9853e5b85d78805
38,162
from datetime import datetime import time def rddToFileName(prefix, suffix, timestamp): """ Return string prefix-time(.suffix) >>> rddToFileName("spark", None, 12345678910) 'spark-12345678910' >>> rddToFileName("spark", "tmp", 12345678910) 'spark-12345678910.tmp' """ if isinstance(timestamp, datetime): seconds = time.mktime(timestamp.timetuple()) timestamp = int(seconds * 1000) + timestamp.microsecond // 1000 if suffix is None: return prefix + "-" + str(timestamp) else: return prefix + "-" + str(timestamp) + "." + suffix
ecf09d0fc16b23b892635197c87495ad8aa42bdf
38,163
def get_word_by_id(word_id): # noqa: E501 """Find word by ID Returns a single word # noqa: E501 :param word_id: ID of word to return :type word_id: str :rtype: Word """ return 'do some magic!'
fbbb62d8f89637c304df0175c2846f098ae08c12
38,164
def group_com(mat_A, mat_B): """Compute the group commutator A B A^dagger B^dagger for two matrices A, B.""" return mat_A @ mat_B @ mat_A.T.conj() @ mat_B.T.conj()
b84170a21fc85f7a3dc68014483137804661f3c0
38,165
import subprocess def run_command(cmd): """ Run a shell command The stdout is shown. Returns the error code """ rv = subprocess.call(cmd, shell=True) return rv
e761609a5338a0d5e5273d5cfcfbe7f923f5d4f6
38,167
def parse_json(j): """ Awful hack to parse a restricted subset of JSON strings into Python dicts. """ return eval(j, {'true':True,'false':False,'null':None})
27036ce0fd6adaedacf9cf81fadf12a14305939f
38,168
from pathlib import Path import os def obtain_base_dir(): """Obtains the base directory to use Returns: A Path object """ path = Path(os.path.expanduser("~")+"/tfbasemodels") path.mkdir(exist_ok=True) return path
d6dbf240fceb09d7a5a70aed242df8e91a959d64
38,169
def compare_dict_keys(dict_a, dict_b, compare_keys): """Compare two dictionaries with the specified keys""" return all(dict_a[k] == dict_b[k] for k in dict_a if k in compare_keys)
00de8cc97f8b56608575570150a97038ed61b997
38,170
def OverrideToImplementCustomLogic(obj): """Users should override this in their sub-classes to implement custom logic. Used in Trainer and Policy to tag methods that need overriding, e.g. `Policy.loss()`. """ return obj
3d3993be19e3f6315bfb7a77f1d9ef1946722eca
38,171
def get_index(sheet, *names): """ Returns the column index for the first matching name, assuming first row is header Matching is done with leading and trailing whitespace stripped, and case insensitive """ header = [(c.value or "").strip().lower() for c in sheet[1]] for name in names: name = name.strip().lower() if name in header: return header.index(name) raise AttributeError(f"None of {names} found in header {header}")
45af5638cc66fb51a4addbf8ecec27ffadd2cf09
38,174
import sys import os def mainopt_mac_uk(i): """<from-format> [<text>] Speak text in Mac OS 10.7+ British voices while using a lexicon converted in from <from-format>. As these voices do not have user-modifiable lexicons, lexconvert must binary-patch your system's master lexicon; this is at your own risk! (Superuser privileges are needed the first time. A backup of the system file is made, and all changes are restored on normal exit but if you force-quit then you might need to restore the backup manually. Text speaking needs to be under lexconvert's control because it usually has to change the input words to make them fit the available space in the binary lexicon.) By default the Daniel voice is used; Emily or Serena can be selected by setting the MACUK_VOICE environment variable.""" # If you have xterm etc, then text will also be printed, with words from the altered lexicon underlined. assert sys.version_info[0]==2, "--mac-uk has not been tested with Python 3, I don't want to risk messing up your system files, please use Python 2" fromFormat = sys.argv[i+1] if not fromFormat in lexFormats: return "No such format "+repr(fromFormat)+" (use --formats to see a list of formats)" lex = get_macuk_lexicon(fromFormat) try: for line in getInputText(i+2,"text",True): m = MacBritish_System_Lexicon(line,os.environ.get("MACUK_VOICE","Daniel")) try: m.readWithLex(lex) finally: m.close() except KeyboardInterrupt: sys.stderr.write("Interrupted\n")
04199be2cae16010323cadcf2ce2bab96d1bd20f
38,175
def get_min_move_unit(stock_code): """获取股票最小移动单位 股票为0.01,ETF及分基金等0.001""" stock_code_int = int(stock_code) if stock_code_int < 100000: return 0.01 elif 600000 <= stock_code_int <= 699999: return 0.01 elif 300000 <= stock_code_int <= 399999: return 0.01 else: return 0.001
2d1173bde9935bddd907fd2c2254674508f30104
38,179
def _hex_to_triplet(h): """Convert an hexadecimal color to a triplet of int8 integers.""" if h.startswith('#'): h = h[1:] return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))
e84b3de0d94eda11a63390cfd0448708bd69cc66
38,180
def create_routes(name: str) -> str: """ Mutations Routes :param name: :return: """ routes = ''' class Create%s(graphene.Mutation): """ Create %s Record """ class Arguments: createRecord = %sRecord(required=True) message = graphene.String() id = graphene.Int() @staticmethod def mutate(root, info, createRecord): try: data = jsonable_encoder(createRecord) data["id"] = int(%s["id"].max()) + 1 print(data) column_order = list(%s.columns) data_new = {col:data[col] for col in column_order} %s.loc[len(%s.index)] = list(data_new.values()) path = Path(str(%s_path)) file_name = str(path.name).split(".")[0] %s.to_json(f"{path.parent.absolute()}{sep}{file_name}.json",orient="records",indent=4) return Create%s(message="success",id=data["id"]) except Exception as e: print(e) raise GraphQLError("Internal Server Error") class Update%s(graphene.Mutation): """ Update %s Record """ class Arguments: id = graphene.Int(required=True) updateRecord = %sRecord(required=True) message = graphene.String() @staticmethod def mutate(root, info, id, updateRecord): try: data = jsonable_encoder(updateRecord) %s.loc[%s['id'] == int(id), list(data.keys())] = list(data.values()) path = Path(str(%s_path)) file_name = str(path.name).split(".")[0] %s.to_json(f"{path.parent.absolute()}{sep}{file_name}.json",orient="records",indent=4) return Update%s(message="success") except Exception as e: print(e) raise GraphQLError("Internal Server Error") class Delete%s(graphene.Mutation): """ Delete %s """ class Arguments: id = graphene.Int(required=True) message = graphene.String() @staticmethod def mutate(root, info, id): try: %s.drop(%s[%s['id'] == int(id)].index, inplace = True) path = Path(str(%s_path)) file_name = str(path.name).split(".")[0] %s.to_json(f"{path.parent.absolute()}{sep}{file_name}.json",orient="records",indent=4) return Delete%s(message="success") except Exception: print(e) message = "failed" raise GraphQLError("Internal Server Error") ''' % ( name.title(), name.title(), name.title(), name, name, name, name, name, name, name.title(), name.title(), name, name.title(), name, name, name, name, name.title(), name.title(), name, name, name, name, name, name, name.title()) return routes
d228cc9839e27a9ce72fa3152e86f6797d07319d
38,181
def get_json(self, request): """ Retrieves the loaded JSON information from the request this method assumes that the request is properly formed and that the header information is set in accordance with JSON. :type request: Request :param request: The request to be used. :rtype: Object :return: The object that represents the parsed JSON information that was passed inside the request data. """ # processes (and retrieves) the data map from the # request and then tries to retrieves the JSON # data from it in case it does not exists the complete # maps is returned as the JSON value form_data_map = self.process_json_data(request) json_v = form_data_map.get("root", form_data_map) return json_v
a170154e0a166a495f60246158b15267dc768772
38,185
def unique_color_from_identifier(identifier): """Return unique color as RGB tuple. Useful for creating PNG images where each color is used as an identifier. Raises TypeError if the identifier is not an integer. Raises ValueError if the identifier is not in the range 0 to 16777215 inclusive. :param identifier: positive integer in range from 0 to 16777215 inclusive :raises: TypeError, ValueError :returns: RGB tuple """ if not isinstance(identifier, int): raise(TypeError("Identifier is not an integer {}".format(identifier))) if identifier < 0: raise(ValueError("Negative identifier not allowed")) if identifier >= 256*256*256: raise(ValueError("Identifier {} >= {}".format(identifier, 256*256*256))) blue = identifier % 256 green = (identifier // 256) % 256 red = (identifier // (256*256)) % 256 return (red, green, blue)
dcf3555c95e6799c1d9042c2342d9181e44d56cd
38,186
def calc_tstop(num_bins, binsize, t_start): """ Calculates the stop point from given parameter. Calculates the stop point :attr:`t_stop` from the three parameter :attr:`t_start`, :attr:`num_bins` and :attr`binsize`. Parameters ---------- num_bins: int Number of bins binsize: quantities.Quantity Size of bins t_start: quantities.Quantity Start time Returns ------- t_stop : quantities.Quantity Stoping point calculated from given parameter. """ if num_bins is not None and binsize is not None and t_start is not None: return t_start.rescale(binsize.units) + num_bins * binsize
b8e6e7fc3cb92b8f505f757d2e75259d9a208fa8
38,187
from re import search from dateutil.parser import parse def detect_date(token: str): """ Attempts to convert string to date if found mask_test = r'(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(\d{1,2})\s+(\d{4})' """ token = token.lower() # Feb 2010 mask1 = r"((jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|(aug)|(sep)|(oct)|(nov)|(dec))\s([1-9]|([12][0-9])|(3[04]))" mask4 = r"((january)|(february)|(march)|(april)|(may)|(june)|(july)|(august)|(september)|(october)|(november)|(december))\s([1-9]|([12][0-9])|(3[04]))" date1 = search(mask1, token) # 12-09-1991 mask2 = r'(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]|(?:january|march|may|july|august|october|december)))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2]|(?:january|march|april|may|january|july|august|september|october|november|december))\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)(?:0?2|(?:february))\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9]|(?:january|february|march|april|may|june|july|august|september))|(?:1[0-2]|(?:october|november|december)))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$' date2 = search(mask2, token) # 09/2020, 09-2020, 09.2020 dates mask3 = r'[0-9]{2}(-|.|/)[0-9]{4}' date3 = search(mask3, token) date4 = search(mask4, token) if date1 or date2 or date3 or date4: try: # Case with 1999 2003 is faulty -> gives mask as 99 2003 if len(token.split(' ')[0]) == len(token.split(' ')[1]) == 4: token = token.split(' ')[0] date = parse(token).date() return date except Exception as e: print("Date Parse Error: {}".format(e)) return None
ddc72ba38b6a2c757d6ce366a45e2a5a689e6b9c
38,188
def get_recall(rec, tru): """Recommendation recall: |{R & P}|/|P| (R - recommended products, P - relevant products)""" return len(rec & tru)/len(tru) if len(tru) != 0 else 0
83ec9f53a43a8d4f0b2d6174457cd8a7937a1bed
38,189
import json def load_config(config_files): """ loads json configuration files the latter configs overwrite the previous configs """ config = dict() for f in config_files: with open(f, 'rt') as cfg: config.update(json.load(cfg)) return config
4a61ca063bf8147a0f2576cddc8bf438b33f8792
38,192
def lex_next_bits(i): """Return next number with same number of set bits as i.""" # i = xxx0 1111 0000 smallest = i & -i; # 0000 0001 0000 ripple = i + smallest; # xxx1 0000 0000 ones = i ^ ripple; # 0001 1111 0000 ones = (ones >> 2)/smallest; # 0000 0000 0111 return ripple | ones; # xxx1 0000 0111
72cf874bdf4045bf9c9d0d03a0016c48df469df6
38,193
import os def read_tracks(filename): """Read the text file and load it into a dictionary with frame number as the keys and the objects and its positions as values. Parameters ---------- filename : str The directory of the video whose tracking data should be shown. The text file will have the same name but '.txt' extention See Also -------- read_tracks : It loads the tracking data into a dictionary. """ # input : video name filedir,filename = os.path.split(filename) tracking_data = {} with open(os.path.join(filedir,filename+'.txt'),mode='r') as f: while True: line = f.readline().split() if len(line)<1: break frame_id = int(line[0]) # frame_id class_id = int(line[5]) track_id = int(line[6]) angel = int(line[7]) box = [int(line[1][1:-1]),int(line[2][:-1]),int(line[3][:-1]),int(line[4][:-1])] if frame_id in tracking_data: # box is x,y,w,h tracking_data[frame_id].append((box,class_id,track_id,angel)) else: tracking_data[frame_id] = [(box,class_id,track_id,angel)] return tracking_data
53935da33cfe5fed488a68ea5e0a38b66a3df517
38,194
from typing import Optional def askyn(question: str, default: Optional[bool] = None) -> bool: """ Asks a yes or no question and returns a bool. REF: https://gist.github.com/garrettdreyfus/8153571 """ # Modify the question with the default value capitalized if default is not None: if default: question += " [Y/n]: " else: question += " [y/N]: " else: question += " [y/n]: " # Iterate until an answer is determined while True: reply = str(input(question).lower().strip()) if reply == "" and default is not None: return default elif reply in ("y", "yes"): return True if reply in ("n", "no"): return False else: print(f"Unrecognized answer: '{reply}'")
f038ffce000e5c39d707dd61d3f1967195df4e6d
38,195
import torch def box_center_to_corners(b): """ Converts a set of oriented bounding boxes from centered representation (x_c, y_c, w, h, theta) to corner representation (x0, y0, ..., x3, y3). Arguments: b (Tensor[N, 6]): boxes to be converted. They are expected to be in (x_c, y_c, w, h, c, s) format. * c, s: unnormalized cos, sin Returns: c (Tensor[N, 8]): converted boxes in (x0, y0, ..., x3, y3) format, where the corners are sorted counterclockwise. """ # print(b.shape) x_c, y_c, w, h, c, s = b.unbind(-1) # [N,] # print(x_c.shape) s = 2 * s - 1 # center = torch.stack([x_c, y_c], dim=-1).repeat(1, 4) # [N, 8] center = torch.stack([x_c, y_c, x_c, y_c, x_c, y_c, x_c, y_c], dim=-1) dx = 0.5 * w dy = 0.5 * h c = c + 1e-5 s = s + 1e-5 cos = c / ((c ** 2 + s ** 2).sqrt() + 1e-10) sin = s / ((c ** 2 + s ** 2).sqrt() + 1e-10) dxcos = dx * cos dxsin = dx * sin dycos = dy * cos dysin = dy * sin dxy = [ -dxcos + dysin, -dxsin - dycos, dxcos + dysin, dxsin - dycos, dxcos - dysin, dxsin + dycos, -dxcos - dysin, -dxsin + dycos, ] return center + torch.stack(dxy, dim=-1)
0ab937e31fc8c2e67748b5d791a7061fa0fb70fe
38,196
from typing import List def get_requirements(fname: str) -> List[str]: """このパッケージが依存しているパッケージのリストをrequirements.txtから取得して返す。 Args: fname (str): requirements.txt Returns: 依存しているパッケージのリスト """ with open(fname) as fp: lines = fp.readlines() requires = [line.strip() for line in lines] return requires
b2e22394f16eeda41ac34f31cdad6e3a3732da31
38,197
def make_iterator(it): """ Create iterator from iterable. """ return iter(it)
ec683c4d109fd9afedc57b1a9476080400930122
38,199
from pathlib import Path def get_datapath_base(data_type: str, filename: str) -> Path: """Return the path to the footprints test data file""" return Path(__file__).resolve(strict=True).parent.joinpath(f"../data/{data_type}/{filename}")
9bccb92b1c4a5dbaa625b2fa52b3c77163104c11
38,200
from typing import Collection def find_valid_words(dictionary: Collection[str], candidates: Collection[str]) -> Collection[str]: """Finds valid words from 'candidates' as found in the given words list. dictionary: the list to be used as a dictionary. Only strings in the dictionary are considered valid words candidates: strings to be tested for validity """ dictionary, perms = set(dictionary), set(candidates) return dictionary & perms
14706ca99787869d0eee1c77a929bc1f34cf2238
38,201
def matchingTest(vector): """ input: a list of corr coeff scores from moments output: match or not criteria: all > 0.9 - yes all > 0.8, all but one > 0.9, four>.99 - yes else - no """ point99 = len([v for v in vector if v>0.99]) point9 = len([v for v in vector if v>0.90]) point8 = len([v for v in vector if v>0.80]) if point9 ==6: testResult = True elif point9 ==5 and point99 >=4 and point8==6: testResult = True else: testResult = False return testResult
bfc0bd830948cb0c5f4f6342c699ef280d0c4480
38,202
def drop_column(df, columns_to_drop): """ Removes columns from a DataFrame del df[name] Args: df (`pandas.DataFrame`): The dataframe to drop columns on columns_to_drop (:type:`list` of :type:`str`): A list of the columns to remove Returns: `pandas.DataFrame`: `df` with the provided columns removed """ for ctd in columns_to_drop: del df[ctd] return df
1eadbf301aff80752c93ca4393910dfa19a76b3a
38,203
def match_children(node, node_function_dict): """Returns. a list of node, function pairs. Matches the children of the passed in node to parse functions in the passed in dictionary. The node tags are used as keys. Ignores nodes which get no match. """ return [(ch, node_function_dict[ch.tag]) for ch in node if ch.tag in node_function_dict]
1de01787a452b7784f6cefe7c37327fe6ce043d4
38,207
def makeScalarProduct(vector1, vector2): """ calculating the scalar product vector1 x vector2 """ return vector1[0]*vector2[0] + vector1[1]*vector2[1] + vector1[2]*vector2[2]
1b8d4f3478dd16630336c02917fc60e3479f689d
38,208
def mini_mock(): """ Allows the tests to assert more accurately about the state of the object's properties than a full-blown mock allows. """ class MiniMock(object): def __init__(self, **kwargs): pass return MiniMock
64d2c6a2377d495c53056421d93f11fe86de5e39
38,209
def add_commas(num: int) -> str: """Adds commas to an integer in the international number format - 1000 -> 1,000 - 100000 -> 100,000 - 1000000 -> 1,000,000 Args: num (int): The number Returns: str: The number with commas """ return "{:,}".format(num)
9f63a6389df5b46ebbe0dc7489d4967919da18d7
38,210
import subprocess import os def upscale_ngx(inpath, outpath, workingImage, settings): """Runs a denoising process on the texture specified""" ISRExe = settings["NGX_ISR_Exe"] ISRScalingFactor = settings["NGX_ISR_ScalingFactor"] cmd = [ISRExe, "--input", inpath, "--output", outpath, "--factor", str(ISRScalingFactor)] proc = subprocess.call(cmd) success = os.path.isfile(outpath) if not success: print("The image failed to generate at " + outpath) return success
acc3cb535b53ed76783803267c0590dba98cc55e
38,211
from typing import Iterable from typing import List import locale def sorted_locale(iterable: Iterable[str], *, reverse: bool = False) -> List[str]: """Sort a list of strings according to locale. Parameters ---------- iterable : iterable of str A list of strings. reverse : bool, default=False If ``True``, reverse the sorted result. Returns ------- sorted_list : list The sorted list of strings. """ return sorted(iterable, key=locale.strxfrm, reverse=reverse)
e5092e7343989757ffe5fad9bfaf26637b6b5834
38,212
def gram_matrix(y): """ for each channel in C of the feature map: element-wise multiply and sum together return a C*C matrix """ (b, ch, h, w) = y.size() features = y.view(b, ch, w*h) features_t = features.transpose(1,2) gram = features.bmm(features_t) /(ch*h*w) return gram
7026a6b6f42f10e1c6c1625a32c75c823c1bb57a
38,214
from math import sin, cos, sqrt, atan, radians def calc_distance(position_start: tuple, position_end: tuple) -> float: """Calculates the distance between two positions in format (lat, lon) """ f = 1 / 298.257223563 a = 6378173 F = radians((position_start[0] + position_end[0]) / 2.0) G = radians((position_start[0] - position_end[0]) / 2.0) l = radians((position_start[1] - position_end[1]) / 2.0) S = sin(G) ** 2 * cos(l) ** 2 + cos(F) ** 2 * sin(l) ** 2 C = cos(G) ** 2 * cos(l) ** 2 + sin(F) ** 2 * sin(l) ** 2 w = atan(sqrt(S / C)) if float(w) == 0.0: return 0.0 D = 2 * w * a T = sqrt(S * C) / w H_1 = (3 * T - 1) / (2 * C) H_2 = (3 * T + 1) / (2 * S) return D * (1 + f * H_1 * sin(F) ** 2 * cos(G) ** 2 - f * H_2 * cos(F) ** 2 * sin(G) ** 2)
c330e7e0e45f7643c7c14c6b066a854a97bd196a
38,215
import pandas as pds def same_missing_modality_tsv(file1, file2): """ Function that is used to compare 2 TSV files generated by the iotool ComputeMissingModalities. Only fields participant_id, pet, t1w, func_task - rest are compared. Line order does not matter. Args: (string) file1: path to first tsv (string) file2: path to second tsv Returns: (bool) True if file1 and file2 contains the same information """ # Read dataframe with pandas df1 = pds.read_csv(file1, sep="\t") df2 = pds.read_csv(file2, sep="\t") # Extract data and form lists for both files subjects1 = list(df1.participant_id) pet_AV45_1 = list(df1["pet_acq-AV45"]) pet_FDG_1 = list(df1["pet_acq-FDG"]) t1w1 = list(df1.t1w) func_task_rest1 = list(df1["func_task-rest"]) subjects2 = list(df2.participant_id) pet_AV45_2 = list(df2["pet_acq-AV45"]) pet_FDG_2 = list(df2["pet_acq-FDG"]) t1w2 = list(df2.t1w) func_task_rest2 = list(df2["func_task-rest"]) # Subjects are sorted in alphabetical order. The same permutation of element is applied on each column subjects1_sorted, pet_AV45_1 = (list(t) for t in zip(*sorted(zip(subjects1, pet_AV45_1)))) subjects2_sorted, pet_AV45_2 = (list(t) for t in zip(*sorted(zip(subjects2, pet_AV45_2)))) subjects1_sorted, pet_FDG_1 = (list(t) for t in zip(*sorted(zip(subjects1, pet_FDG_1)))) subjects2_sorted, pet_FDG_2 = (list(t) for t in zip(*sorted(zip(subjects2, pet_FDG_2)))) subjects1_sorted, t1w1 = (list(t) for t in zip(*sorted(zip(subjects1, t1w1)))) subjects2_sorted, t1w2 = (list(t) for t in zip(*sorted(zip(subjects2, t1w2)))) subjects1_sorted, func_task_rest1 = ( list(t) for t in zip(*sorted(zip(subjects1, func_task_rest1))) ) subjects2_sorted, func_task_rest2 = ( list(t) for t in zip(*sorted(zip(subjects2, func_task_rest2))) ) # Test is positive when all the sorted list s are equals return ( (subjects1_sorted == subjects2_sorted) & (pet_AV45_1 == pet_AV45_2) & (pet_FDG_1 == pet_FDG_2) & (t1w1 == t1w2) & (func_task_rest1 == func_task_rest2) )
3d15797f9070af31c65bc9278cd95a1058e4061b
38,216
def find_common_parent(a, b): """Find a common parent for 2 elements.""" a_parents = list(a.iterancestors()) b_parents = list(b.iterancestors()) a_parents_set = set(a_parents) b_parents_set = set(b_parents) if a == b: return a if b in a_parents_set: return b if a in b_parents_set: return a if len(a_parents) < len(b_parents): for elem in a_parents: if elem == b or elem in b_parents_set: return elem else: for elem in b_parents: if elem == a or elem in a_parents_set: return elem
b99a865cde17f79b159d26528f5db97137d6c162
38,217
def parse_style(style_string: str) -> dict: """[summary] Args: style_string (str): [description] Returns: dict: [description] """ return {kv.split(':')[0]:int(kv.split(':')[1]) if kv.split(':')[1].isdigit() else kv.split(':')[1] for kv in style_string.split(';')}
4aef99cdb82628ee03f183bf9d876032f80d226c
38,218
def simple_features(): """ Create a list of features representing a network There are 3 features in a flat line running from 0,0 to 300,0 Each feature is 100 units in length """ return [ { "properties": {"EDGE_ID": 1}, "geometry": {"type": "LineString", "coordinates": [(0, 0), (100, 0)]}, }, { "properties": {"EDGE_ID": 2}, "geometry": {"type": "LineString", "coordinates": [(100, 0), (200, 0)]}, }, { "properties": {"EDGE_ID": 3}, "geometry": {"type": "LineString", "coordinates": [(200, 0), (300, 0)]}, }, ]
d9692e15ae9e4bb1828cc3035c38616847852a3b
38,219
def strip_schema_version(json_dict): """Returns the given JSON dict after stripping its schema version out :param json_dict: The JSON dict :type json_dict: dict :returns: The JSON dict with its schema version stripped out :rtype: dict """ if 'version' in json_dict: del json_dict['version'] return json_dict
2c5e7b5bfb401e1adef5479f0d787c2788d1d735
38,220
def get_machine_from_parent(self): """Get machine object from parent Parameters ---------- self : EEC an EEC object Returns ---------- machine : Machine a Machine object """ parent = self.parent # Try to find simulation parent (output.simu.elec.eec) while parent is not None and not hasattr(parent, "machine"): parent = parent.parent if parent is not None and hasattr(parent, "machine"): return parent.machine else: # Try to find output parent (output.elec.eec) parent = self.parent while parent is not None and not hasattr(parent, "simu"): parent = parent.parent if parent is not None: return parent.simu.machine else: return None
82ccddac9f58727f36a9a78be741d2b5dba3369c
38,224
def sum_square_difference(ceiling): """Compute the difference between the sum of squares and the square of the sum of the natural numbers up to and including the provided ceiling. """ numbers = range(ceiling + 1) sum_squares = sum(map(lambda number: number**2, numbers)) square_sum = sum(numbers)**2 sum_square_difference = square_sum - sum_squares return sum_square_difference
5898969697c2c8500dda0d0ef9ca5f3e7125ff77
38,225
def sine(r, periods): """Sine-wave primitive""" def _record(pen): dp = type(pen)() pw = r.w / periods p1 = r.point("SW") end = r.point("SE") dp.moveTo(p1) done = False up = True while not done: h = r.h if up else -r.h c1 = p1.offset(pw/2, 0) c2 = p1.offset(pw/2, h) p2 = p1.offset(pw, h) dp.curveTo(c1, c2, p2) p1 = p2 if p1.x >= end.x: done = True else: done = False up = not up pen.record(dp) return _record
b1fdee05797e9e7d3e398f639fd8ed2b7a10668c
38,226
import fnmatch def fnmatch_all(names, patterns): """Determine whether all strings in `names` match at least one of the `patterns`, which should be shell glob expressions. """ for name in names: matches = False for pattern in patterns: matches = fnmatch.fnmatch(name, pattern) if matches: break if not matches: return False return True
52be9c216d222fed331a836d529bc1399ba8e9b4
38,227
import sys def ask_user(dapr_name, query_type, results): """Ask the user to choose from a list of results.""" print("%-8s %-6s got %2d results. Which is it?" % (dapr_name, query_type, len(results))) for i, result in enumerate(results, start=1): info = result['info'].encode(sys.stdout.encoding, errors='replace') print("#%2d: %s" % (i, info)) while True: try: num = int(input("Please choose #[1-%d] (0 to skip): " % len(results))) except ValueError: num = None except EOFError: num = 0 print() if num in range(len(results) + 1): break return [results[num - 1]] if num else results
bac1fae96505d3745c40717937e3523182be84f5
38,228
def read_seq(handle): """ Read sequence from plain text file (no format). Used for importing reference sequence. :param handle: :return: str, sequence """ seq = '' for line in handle: seq += line.strip() return seq
063f9e5300093537d81ed6ee8eb96579ae0dfcf5
38,230
def split(n): """ Split string or Array >>> split("hello") ['he', 'llo'] >>> split([1,2,3,1,2,4]) [[1, 2, 3], [1, 2, 4]] """ return [ n[:len(n)//2:], n[len(n)//2::] ]
ab132de4077bbc390a8b4f2f38c5154ecd75d579
38,231
from datetime import datetime import argparse def valid_date(date): """Validate the user-supplied date is a valid format to parse.""" try: datetime.strptime(date, '%Y-%m-%d') return date except ValueError: raise argparse.ArgumentTypeError('Provide the format: YYYY-MM-DD')
8068aef68c2a7d667e570790b834bfee75b89b3f
38,232
from typing import Union from typing import Tuple from typing import Optional import os def get_job_server_fd( job_server_fd: Union[int, Tuple[()], None]) -> Optional[int]: """Get the job server file descriptor from env var if input is a tuple. Args: job_server_fd: If this is not a tuple, return it directly. Returns: The job server file descriptor, or None. """ if isinstance(job_server_fd, tuple): job_server = os.environ.get("JOB_SERVER_FD") if job_server is not None: job_server_fd = int(job_server) else: job_server_fd = None return job_server_fd
f1a1d0e7a982d0ec3efceeb6399b523d0c1c8f5a
38,235
def parse_tile(tile_string): """ >>> parse_tile("esew") (1, -1) >>> parse_tile("esewnw") (0, 0) >>> parse_tile("nwwswee") (0, 0) """ data = list(tile_string) cur_pos_ew = 0 cur_pos_ns = 0 while len(data) != 0: c1 = data.pop(0) if c1 == 'w': cur_pos_ew -= 1 elif c1 == 'e': cur_pos_ew += 1 elif c1 == 's': c2 = data.pop(0) cur_pos_ns -= 1 if c2 == 'e': cur_pos_ew += 1 pass elif c1 == 'n': c2 = data.pop(0) cur_pos_ns += 1 if c2 == 'w': cur_pos_ew -= 1 pass else: raise RuntimeError("") return cur_pos_ew, cur_pos_ns
7b6ee9725a65e88f3110c288000fb8d9626826b7
38,236
import os def is_newer(src: str, dest: str) -> bool: """ Returns whether the source file is newer than the destination file """ if not os.path.isfile(dest): return True return os.stat(src).st_mtime > os.stat(dest).st_mtime
ddb66cbacfe81d81707784b0dfea74ed3373432a
38,237
def parse_storage_mappings(storage_mappings): """ Given the 'storage_mappings' API field, returns a tuple with the 'default' option, the 'backend_mappings' and 'disk_mappings'. """ # NOTE: the 'storage_mappings' property is Nullable: if storage_mappings is None: return None, {}, {} backend_mappings = { mapping['source']: mapping['destination'] for mapping in storage_mappings.get("backend_mappings", [])} disk_mappings = { mapping['disk_id']: mapping['destination'] for mapping in storage_mappings.get("disk_mappings", [])} return ( storage_mappings.get("default"), backend_mappings, disk_mappings)
ea182c91ff5e2fe1e9a7a7066071a618eb039a5f
38,239
def bbox3d2result(bboxes, scores, labels, attrs=None): """Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor): Bounding boxes with shape (N, 5). labels (torch.Tensor): Labels with shape (N, ). scores (torch.Tensor): Scores with shape (N, ). attrs (torch.Tensor, optional): Attributes with shape (N, ). Defaults to None. Returns: dict[str, torch.Tensor]: Bounding box results in cpu mode. - boxes_3d (torch.Tensor): 3D boxes. - scores (torch.Tensor): Prediction scores. - labels_3d (torch.Tensor): Box labels. - attrs_3d (torch.Tensor, optional): Box attributes. """ result_dict = dict( boxes_3d=bboxes.to('cpu'), scores_3d=scores.cpu(), labels_3d=labels.cpu()) if attrs is not None: result_dict['attrs_3d'] = attrs.cpu() return result_dict
d31481229e17bc25d4f1d6fe5b9ac757b194357e
38,242
import pathlib def _load_file_contents(path: pathlib.Path) -> str: """Return the contents of a file.""" with path.open("r") as fp: return fp.read()
ca90bcc6f346e69f10323388b8bc2faf49e2553d
38,244
import random def mutate_all_even(even_numbers): """ Mutates a string of even numbers in an other string of even numbers :param even_numbers: string of even numbers :return: string of other even numbers """ new_numbers = "" for i in range(len(even_numbers)): new_numbers += str(random.choice(range(0, 9, 2))) return new_numbers
57f2348c3e6a8a8926ebb3ebd130e872c68254f0
38,246
import configparser def __read_option(file_path, section, option): """ Method to parse the config file and read out its values. """ c = configparser.RawConfigParser() c.read(file_path) value = "" try: value = c.get(section, option) except configparser.NoSectionError: value = "" except configparser.NoOptionError: value = "" return str(value)
d414c37247b8008ece9ce7c337799c6ddbe0451d
38,247
def filter_rr(rr): """filtered version of data""" return rr
ba903e89c73d75f741325439bc6cba7f54a8b327
38,248
def recordCounter(x): """ Simple record sizer that just returns 1 for each record """ return 1
a9b735c34b7978a866de6ffcf268875940664160
38,249
def q_mult(q1, q2): """ quarternion-quarternion multiplication """ w1, x1, y1, z1 = q1 w2, x2, y2, z2 = q2 w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 return w, x, y, z
fdf917dafd2a28d4d9105cd5d88d4da2794d71a2
38,250
import hashlib def sha256(byte_array) -> bytes: """ Perform a SHA256 operation on the input. :param byte_array: data to hash. :type byte_array: bytearray or bytes :return: hashed data :rtype: bytes """ return hashlib.sha256(byte_array).digest()
9017ccfa9f548502fcebdc61aedec85924907225
38,251
def get_first_and_last_line(fname): """ Get the first and last line of the file. Since the common_crawl files are alphabetical, we can use this information to determine the alphabetic range of entries covered by the file. This information will later be used to limit the zone comparison to only those zones that would be within that alphabetic range. This is a speed improvement. :param fname: The filename to examine for the first and last lines :return: Two strings representing the first and last lines, respectively. """ with open(fname, "rb") as fh: first = next(fh) offs = -10 while True: fh.seek(offs, 2) lines = fh.readlines() if len(lines) > 1: last = lines[-1] break offs *= 2 # Return lines by converting bytes back to strings return (first.decode("utf-8"), last.decode("utf-8"))
6c69957698cf9357c0c223ec55ba01ecf1aff6ea
38,252
def AAPIUnLoad(): """Execute commands while Aimsun is closing.""" return 0
72814f6b67712ca51f384273f86546558dd637bf
38,253
def get_deltah_multipliers(accuracy_order): #Test Function Written """Get the deltah multipliers""" deltah_multipliers = [[-1.,0.,1.],[-2.,-1.,0.,1.,2.],\ [-3.,-2.,-1.,0.,1.,2.,3.],[-4.,-3.,-2.,-1.,0.,1.,2.,3.,4.]] return deltah_multipliers[accuracy_order/2-1]
e0f026784a78e7670b698147e3a16ed7c679061d
38,255
def obtain_points(func, theta_0, theta_1, min_x, max_x, step=0.1): """ Return a tuple of x and the corresponding points for the given x """ x_values = [] y_values = [] x = min_x while x <= max_x: y_values.append(func(x, theta_0, theta_1)) x_values.append(x) x += step return (x_values, y_values)
a603660d60a8fdc8c99b5fb05c756afe972dd1ab
38,256
def create_aln_expr(id, start=None, stop=None): """ Create an alignment expression, such as ``n2[5:8]`` or ``tw1`` given an id, and start/stop range. :param id: ID with which to align :type id: str :param start: Range at which to start :type start: int :param stop: Range at which to stop :type stop: int """ if start is None and stop is None: return id elif start is not None and stop is not None: return '%s[%d:%d]' % (id, start, stop) else: raise Exception('Invalid alignment expression request')
e06c4e65beffc1ec14bd4b16e34e6f14b22f2576
38,257
def normalize_by_mean(nodes, edges, feature_list, sequence_info): """ Normalize over all existing edges in the batch """ normalizing_feature_names = [feature for feature in feature_list if feature+"_MNORM_" in feature_list] for feature in normalizing_feature_names: data = edges[feature].clone() maximum = data.max() edges[feature + "_MNORM_"] = data / (maximum + 0.0001) return edges
ebbf23de962d9e566c3832e96e18206a6593ce40
38,259
def compressFeatureMap(featureMap, ignoreGaps=0, terse=0): """Given a feature map as returned by ClientDirectory.getFeatureMap, compress the data from each server's server descriptors. The default behavior is: if a server has two server descriptors such that one becomes valid immediately after the other becomes invalid, and they have the same features, compress the two entries into one. If ignoreGaps is true, the requirement for sequential lifetimes is omitted. If terse is true, server descriptors are compressed even if their features don't match. If a feature has different values at different times, they are concatenated with ' / '. """ result = {} for nickname in featureMap.keys(): byStartTime = featureMap[nickname].items() byStartTime.sort() r = [] for (va,vu),features in byStartTime: if not r: r.append((va,vu,features)) continue lastva, lastvu, lastfeatures = r[-1] if (ignoreGaps or lastva <= va <= lastvu) and lastfeatures == features: r[-1] = lastva, vu, features else: r.append((va,vu,features)) result[nickname] = {} for va,vu,features in r: result[nickname][(va,vu)] = features if not terse: continue if not result[nickname]: continue ritems = result[nickname].items() ritems.sort() minva = min([ va for (va,vu),features in ritems ]) maxvu = max([ vu for (va,vu),features in ritems ]) rfeatures = {} for (va,vu),features in ritems: for f,val in features.items(): if rfeatures.setdefault(f,val) != val: rfeatures[f] += " / %s"%val result[nickname] = { (minva,maxvu) : rfeatures } return result
6ec3413b7a8de44eb3e0d872576f4cb02e571075
38,260
import sys def read_input(): """assumes input is provided in dictionary format coverts to dict format""" data = "" for line in sys.stdin: data += line #remove the new lines and convert to dictionary data = data.replace("\n","") return eval(data)
06a5d1d84b856682f851a6bacc646dcb194b1695
38,261
import torch def ellip_gaussian2D(radius, sigma_x, sigma_y, dtype=torch.float32, device='cpu'): """Generate 2D ellipse gaussian kernel. Args: radius (tuple(int)): Ellipse radius (radius_x, radius_y) of gaussian kernel. sigma_x (int): X-axis sigma of gaussian function. sigma_y (int): Y-axis sigma of gaussian function. dtype (torch.dtype, optional): Dtype of gaussian tensor. Default: torch.float32. device (str, optional): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius_y + 1) * (2 * radius_x + 1)`` shape. """ x = torch.arange( -radius[0], radius[0] + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius[1], radius[1] + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x) / (2 * sigma_x * sigma_x) - (y * y) / (2 * sigma_y * sigma_y)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h
97caa00f535321b4c7831c251d3f0d6aaf3d2e32
38,262
import heapq def _simple_chooser(queue, remaining): """Default contraction chooser that simply takes the minimum cost option. """ cost, k1, k2, k12 = heapq.heappop(queue) if k1 not in remaining or k2 not in remaining: return None # candidate is obsolete return cost, k1, k2, k12
5bb92184767ba68247b124d4a935ea9dab327f96
38,263