content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def boolean(flag): """ Convert string in boolean """ s = flag.lower() if s in ('1', 'yes', 'true'): return True elif s in ('0', 'no', 'false'): return False raise ValueError('Unknown flag %r' % s)
9469314a87b048d428691d06722c374898fb848c
32,475
import random def choose(population, weights=None, k=None): """ Chooses k times from the given population with an optional weighted probability. :param population: the population to chose from :param weights: the weights attached to each population element :param k: the amount of times to chose :return: an element of the list if k = None, or a k-sized list of choices """ choice = random.choices(population, weights=weights, k=k or 1) return choice if k else choice[0]
4a78dde05dba4f9774ae64f0b85bd89e61204b89
32,477
def _get_arg_config_file(args): """Return String yaml config file path.""" if not args.config_file: raise ValueError("YAML Config File was not supplied.") return args.config_file
34e4570cee420035cbaeab3c852069c4abf6a3ae
32,478
def normalize_whitespace(value): """Removes repeated whitespace from string""" value = " ".join(value.split()) return value
e268e12665bb50d96c9418dfc999be20a2c96b37
32,480
import json def getEveryoneSingleFrame(path_json4, path_json6, frame_num, filtered_index_C4, filtered_index_C6): """get a list of all the people in one frame Args: -path_json4 path of json for C4 -path_json6 path of json for C6 Returns: -list_kp_4 a list -list_kp_6 a list """ list_kp_4 = [] list_kp_6 = [] with open(path_json4) as json_data: data_dict4 = json.load(json_data) L4 = len(data_dict4) with open(path_json6) as json_data: data_dict6 = json.load(json_data) L6 = len(data_dict6) for counter4 in range (L4): name = data_dict4[counter4]['image_id'] anno_id = data_dict4[counter4]['id'] index = int(name[:-4]) if index == frame_num and anno_id in filtered_index_C4: list_kp_4.append(data_dict4[counter4]['keypoints']) for counter6 in range (L6): name = data_dict6[counter6]['image_id'] anno_id = data_dict6[counter6]['id'] index = int(name[:-4]) if index == frame_num and anno_id in filtered_index_C6: list_kp_6.append(data_dict6[counter6]['keypoints']) return list_kp_4,list_kp_6
34965277c6b2edda15aa41a268e2b47499fd5770
32,481
import requests def get_access_token(access_code, redirect_uri, client_id, client_secret): """ Gets a user's access token from their access code :access_code: The user's access code after they login to their account :returns: The access token linked to the access code is returned """ request_body = { 'grant_type': 'authorization_code', 'code': access_code, 'redirect_uri': redirect_uri, 'client_id': client_id, 'client_secret': client_secret } response = requests.post('https://accounts.spotify.com/api/token', data=request_body) json_response = response.json() return json_response['access_token']
22e855a10e63c2ed86788fca93516abebe6054d2
32,483
def LegalCharacter(character) -> bool: """ description: check if character is legal param {*} character return {*} bool value """ if character.isalnum() or character == "_": return True else: return False
a7fe44e7c0983a3a4e852f5971ae9e2dec6b44a6
32,485
def p0_reciprocal(xs, ys): """ predictor for first guess for reciprocal """ a0 = ys[len(ys) - 1] b0 = ys[0]*xs[0] - a0*xs[0] return [a0, b0, 1]
fe1615294ede978a7c41bff1f9af5eb28c3e8728
32,487
import hashlib def getKeyHashFromKey(keyPath): """ This function get the hashcode for the key, it returns a hash. Args: * keyPath: path to the file used as key Returns: Key's hash in hexdigest format. """ print("Generating hash of key, this might take some time") BLOCKSIZE = 65536 hasher = hashlib.sha512() with open(keyPath, 'rb') as f: buf = f.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = f.read(BLOCKSIZE) return hasher.hexdigest()
4557029a5e7b2250f6e63d559ca103f11689ab16
32,488
def multisample_vcf_file(tmpdir, multisample_vcf): """Return path to multi-sample VCF file""" p = tmpdir.mkdir("input").join("input.vcf") with open(str(p), "wt") as f: f.write(multisample_vcf) return str(p)
314b0fd4f1c37ef3ca6d24e0957a712e37baab15
32,489
def abbr_status(value): """ Converts RFC Status to a short abbreviation """ d = {'Proposed Standard':'PS', 'Draft Standard':'DS', 'Standard':'S', 'Historic':'H', 'Informational':'I', 'Experimental':'E', 'Best Current Practice':'BCP', 'Internet Standard':'IS'} return d.get(value,value)
08025ed44e9c8ea725755f9a07b6a5a04834f896
32,490
def encode_base64(base64_string: str) -> bytes: """ Convert base64 string to bytes. """ return bytes(base64_string, encoding="UTF8")
ba0263624b4ce25bac4f19684cee60db351d9b40
32,491
async def async_handle_webhook(cloud, payload): """Handle an incoming IoT message for cloud webhooks.""" return await cloud.client.async_webhook_message(payload)
588fcae8e6895f3889981ead06a4a72f92b20377
32,492
def CheckHeader(inFile): """ IN: Data to search Out: Bool and file offset where NULL was found Description: This function attempts to determine if the PlugX input file is XOR encoded. XOR encoded files typically have the key in the 1st 10 bytes followed by a NULL If we don't have a NULL after 40 bytes, we bail and assume it's not XOR encoded or unknown. Returns the position found """ for pos in range(40): if inFile[pos] == 0x00: #See if the XOR key repeats in the file. A typical key is 10 bytes in length (up until the NULL byte) KeyCheck = inFile[pos:].find(inFile[pos:]) if KeyCheck == -1: #Key was not found return False,0 return True,pos return False,0
23b3cdd425c2da090f9b775d64e93db5a3fbbcc0
32,493
def between(min, max): """ Returns a function to test if a value lies between min and max """ def op(x): if min < x and x < max: return True print("ERROR: Value must be between {} and {}".format(min, max)) return False return op
2ccc2b78be3f4a85fb910e392f2c9f291e15d19b
32,494
def obtain_common_duration(score): """ Returns the most frequent duration throughout the whole melody """ # Parse durations and filter out 0s durs = list(filter(lambda x: x, list(map(lambda event: event[0], score)))) unique_durs = [] for dur in durs: if dur not in unique_durs: unique_durs.append(dur) # How many such durations occur throughout the melody? counter = [durs.count(dur) for dur in unique_durs] highest_counter = max(counter) # Highest counter dur_n_count = list(zip(durs, counter)) dur_n_count = list(filter(lambda e: e[1] == highest_counter, dur_n_count)) return dur_n_count[0][0]
f14bf116d812806ab8bb42fdb7f0ec2e546274f4
32,495
async def hello_user(name= 'Regina'): """Returns a simple greeting 👋""" return {'user_name' : f'Hello {name}'}
5fb582fed75d9abee001fb2f0c1425d5a0abddf4
32,496
import requests def get_proxyscape_proxies(): """ Loads a list of `{ip_address}:{port}` for public proxy servers. """ PROXY_TIMOUT_LIMIT = "1000" url = "https://api.proxyscrape.com/?request=getproxies" url += "&proxytype=http&country=all&ssl=yes&anonymity=all" url += "&timeout=" + PROXY_TIMOUT_LIMIT r = requests.get(url) return r.text.split("\r\n")
841cfbc94b6c1c198f683886f82de05402edb995
32,497
def atkin_sieve(max_value: int): """ We generate a list of primes below a certain threshold with a Atkin sieve (yassified Eratosthenes). """ primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59] class1 = [1, 13, 17, 29, 37, 41, 49, 53] class2 = [7, 19, 31, 43] class3 = [11, 23, 47, 59] # Generating sieve sieve = [False] * max_value x=1 while x**2<max_value: y=1 while y**2<max_value: # ^= is equal to = not # If we flip the tile an even number of time then we do not change a thing # Class 1 current = 4*x**2+y**2 if (current < max_value) and (current%60 in class1): sieve[current] ^= True # Class 2 current = 3*x**2+y**2 if (current < max_value) and (current%60 in class2): sieve[current] ^= True # Class 3 current = 3*x**2-y**2 if (current < max_value) and (x>y) and (current%60 in class3): sieve[current] ^= True y+=1 x+=1 nb = 7 while nb**2<max_value: if sieve[nb]: for i in range(nb**2, max_value, nb**2): sieve[i]=False nb+=1 for i in range(60, max_value): if sieve[i]: primes.append(i) return primes
e08bb63210dfb91ecd552e3a0ccae357a023bd71
32,499
def children(obj): """Returns the children of the object as items Arguments: obj {object} -- the object to check Returns: dict -- items """ if hasattr(obj, '__dict__'): return obj.__dict__.items() elif hasattr(obj, 'keys'): return obj.items()
8e82502e7554baa117175245edb7a8cade868910
32,500
def _slice_extent_axis(ext_min, ext_max, fctr): """Slice an extent into multiple extents along an axis.""" strd = (ext_max - ext_min) / fctr return [(ext_min + i * strd, ext_max - (fctr - i - 1) * strd) for i in range(fctr)]
3365dd289cc822bed0e7c6075587ddae15806c50
32,501
def encode_special_characters(user_string): """ Encode Special Characters for user's search Strings Args: user_string(string): raw string to encode Returns: Encode string for elasticsearch """ if user_string is None: return "" sp_chars = ['+', '-', '=', '|', '<', '>', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', '\\', '/'] # List of special characters can be found here: https://www.elastic.co/guide/en/elasticsearch/reference/2.4/query-dsl-query-string-query.html#_reserved_characters output_list = [] for char in user_string: if char in sp_chars: # replace output_list.append(char.replace(char, '\\' + char)) else: output_list.append(char) return "".join(output_list)
00d31e32a823028a08333bd59000f52b64dafcf9
32,502
def calc_insurance_premium(exposed_value_euro: float, flood_prob: float,) -> float: """ The insurance premium in a particular year is just the product of the flood probability and the exposed value. This is reasonable for a self-insuring entity, such as a government, and includes zero risk premium -- a strong simplification! """ return flood_prob * exposed_value_euro
831e734751299dc7f5595226be8a330f2e761443
32,503
import configparser def read_config(filename='config.ini', section='default'): """ Reads a config file and returns a section of it. """ config = configparser.ConfigParser() config.read(filename) return config[section]
96f50f0439ddd2c05d6de876d30ae2d4dc5e3a2a
32,505
def getMapFeature2Property(options): """get a map of features to a property.""" if options.filename_properties: map_feature2property = {} infile = open(options.filename_properties, "r") for line in infile: if line[0] == "#": continue feature, property = line[:-1].split("\t")[:2] map_feature2property[feature] = property else: map_feature2property = None return map_feature2property
5c9ff20c9ca9588487bc23eeb7d34aea4ad52eae
32,506
from importlib import import_module from typing import Callable def import_function(function_path: str) -> Callable: """ Import a function from a dotted path. Example: >>> import_function("generate_changelog.pipeline.noop_func") <function noop_func at 0x11016d280> Args: function_path: A dotted path to a function Returns: The callable function """ bits = function_path.split(".") function_name = bits[-1] module = import_module(".".join(bits[:-1])) return getattr(module, function_name)
3105abee396d73e3580d737b1149dbdd00e0751b
32,507
def enable_auto_tray(on=0): """Esconder Automaticamente Icones Inativos na Bandeja DESCRIPTION Este ajuste controla se programas executando na bandeja devem ser escondidos automaticamente quando estao inativos. COMPATIBILITY Windows XP MODIFIED VALUES EnableAutoTray : dword : 00000000 = Mostra icones inativos; 00000001 = Esconde icones inativos. """ if on: return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Explorer] "EnableAutoTray"=dword:00000001''' else: return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Explorer] "EnableAutoTray"=dword:00000000'''
eaad18f19813b1cf8509970fcd543780a29654f5
32,508
def should_include_rustc_srcs(repository_ctx): """Determing whether or not to include rustc sources in the toolchain. Args: repository_ctx (repository_ctx): The repository rule's context object Returns: bool: Whether or not to include rustc source files in a `rustc_toolchain` """ # The environment variable will always take precedence over the attribute. include_rustc_srcs_env = repository_ctx.os.environ.get("RULES_RUST_TOOLCHAIN_INCLUDE_RUSTC_SRCS") if include_rustc_srcs_env != None: return include_rustc_srcs_env.lower() in ["true", "1"] return getattr(repository_ctx.attr, "include_rustc_srcs", False)
a6b4c093d44d77880edc7310980fd27791a47c37
32,509
def get_table_4(air_type): """表4 外皮の内側にある空気層の熱抵抗 Args: air_type(str): 空気層の種類 'AirTight'(面材で密閉された空気層)または'OnSiteNonConnected'(他の空間と連通していない空気層)または 'OnSiteConnected'(他の空間と連通している空気層) Returns: float: 外皮の内側にある空気層の熱抵抗 """ R_dict = {'AirTight': 0.09, 'OnSiteNonConnected': 0, 'OnSiteConnected': 0} try: return R_dict[air_type] except KeyError: raise ValueError(air_type)
66f28b535f9ef69525cf1e74e0af4bbf155ec458
32,511
def create_mapping(dict_times): """ If times are not integers, transform them into integers. :param dict_times: Dict where keys are times. :return: A mapping which is a dictionary maps from current names of time stamps to integers (by their index) """ keys = list(dict_times.keys()) mapping = {} for i in range(len(keys)): mapping.update({keys[i]: i}) return mapping
75893a6419b61d86bc0d4d0693bbc1b25f111a75
32,512
def sphinx_lang(env, default_value='en'): """ Returns the language defined in the configuration file. @param env environment @param default_value default value @return language """ if hasattr(env, "settings"): settings = env.settings if hasattr(settings, "language_code"): lang = env.settings.language_code # pragma: no cover else: lang = "en" else: settings = None # pragma: no cover lang = "en" # pragma: no cover return lang
728d1c066e2f0786bd19a6903afe747791bf7743
32,513
def package(label): """Given the label object supplied, returns its string representation. Args: label: a Label object. Returns: A string, describing the full path of the package. Example: >>> package(Label(":target")) "@enkit//bazel/utils:target" """ return "{workspace}//{package}:{name}".format( workspace = label.workspace_name, package = label.package, name = label.name, )
0f82ce092a806823c6e9248addeea6fabd14100b
32,514
def get_hashtag_counts(key, val, collection): """ Returns a dict of the region from the colletion. For states, you receive a document containing keys: 'name', 'fips', 'counts', 'abbrev', 'landarea' The value associated to 'counts' is a dict of hashtag and hashtag counts. Query by: 'name', 'fips', or 'abbrev'. For counties, you receive a document containing keys: 'name', 'state_fips', 'county_fips', 'geoid', 'landarea' Etc. Examples: hashtag_counts_by_state('name', 'Washington', collection) hashtag_counts_by_state('fips', 53, collection) hashtag_counts_by_state('abbrev', 'WA', collection) """ c = collection.find({key: val}) hashtag_counts = {} first = True for doc in c: if first: hashtag_counts.update(doc) first = False else: # After the first one, we must explictly update the inner dict. hashtag_counts['counts'].update(doc['counts']) return hashtag_counts
ae152f15dd1dd7bb0f15ef9c181adf820c3cb32b
32,515
def normalized_dt(mol, dt, n, T): """ """ mft = mol.mean_free_time(n, T) return dt / mft
7ee48712122eb01a6454fcacb7b5ab3b457ccbe7
32,516
import torch def ex_net_svd(model, in_dim): """Performs a Singular Value Decomposition on a given model weights Args: model (torch.nn.Module): neural network model in_dim (int): the input dimension of the model Returns: U, Σ, V (Tensors): Orthogonal, diagonal, and orthogonal matrices """ W_tot = torch.eye(in_dim) for weight in model.parameters(): W_tot = weight @ W_tot U, Σ, V = torch.svd(W_tot) return U, Σ, V
5a37aed05f8685683f986da2794569c7ae9a2291
32,517
import socket import json def get_json_data(stats_sock): """Returns uwsgi stats data as dict from the socket file.""" data_dict = {} data = "" try: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: s.connect(stats_sock) while True: d = s.recv(4096) if len(d) < 1: break data += d.decode("utf8", "ignore") s.close() data_dict = json.loads(data) except Exception as e: print(e) pass return data_dict
67b6687fabecfeb84c4c460d133a93da21cc8f5e
32,518
import math def pathlength(x, y): """This function will calculate the pathlength based on the above formula, given x=[x_1...x_i] and y=[y_1...y_i]""" indices = range(len(x)) if len(x) != len(y): return 0 else: pl = 0 for i in indices: L = math.sqrt((x[i])**2 + (y[i])**2) pl += L return pl
c90cecf35bf8e7a442463763dca153d513855e8a
32,519
def contains_prefix(root, input_prefix): """Check if prefix exists and return True/False w/ num of words with that prefix.""" output_list = [] cNode = root for char in list(input_prefix): found_match = False for node in cNode.nodes: if node.char == char: found_match = True cNode = node break if not found_match: return False, 0 return True, cNode.num_words_at_letter
b658cb31f54c4d1de2534e27bf1b057d72ccf254
32,520
import warnings import os import shelve def data_read(fname, keys=None, path="data"): """DEPRECATED; USE :func:`load`. Parameters ---------- fname : str File name. keys : str, list of str, or None; optional Name(s) of the values to get from file. If None, returns everything as a dict. Default is None. path : str, optional Absolute or relative path where fname is stored. Default is 'data'. Returns ------- out : values or dict Requested value(s) or dict containing everything if keys=None. """ # Issue warning mesg = ("\n The use of `data_write` and `data_read` is deprecated.\n" " These functions will be removed before v1.0.\n" " Use `save` and `load` instead.") warnings.warn(mesg, DeprecationWarning) # Get absolute path. path = os.path.abspath(path) full_path = os.path.join(path, fname) # Check if shelve exists. for extension in [".dat", ".bak", ".dir"]: if not os.path.isfile(full_path+extension): print(f" > File <{full_path+extension}> does not exist.") if isinstance(keys, (list, tuple)): return len(keys)*(None, ) else: return None # Get it from shelve. with shelve.open(path+"/"+fname) as db: if keys is None: # None out = dict() for key, item in db.items(): out[key] = item return out elif not isinstance(keys, (list, tuple)): # single parameter return db[keys] else: # lists/tuples of parameters out = [] for key in keys: out.append(db[key]) return out
a4ef8af7861edcfc6d66dbbee514d293e202a26d
32,521
from typing import Dict from typing import Any import requests def _execute_monkey_patch(self) -> Dict[str, Any]: """Temporary method to enable syncronous client code.""" method: str = self.http_method.lower() additional_kwargs: Dict[str, Any] = {} if method == "get": func = requests.get elif method == "post": func = requests.post # Additionally requires the json body (e.g on insert, self.json==row). additional_kwargs = {"json": self.json} elif method == "put": func = requests.put elif method == "patch": func = requests.patch elif method == "delete": func = requests.delete else: raise NotImplementedError(f"Method '{method}' not recognised.") url: str = str(self.session.base_url).rstrip("/") query: str = str(self.session.params) response = func(f"{url}?{query}", headers=self.session.headers, **additional_kwargs) return { "data": response.json(), "status_code": response.status_code, }
bbe1a9ec5974e07ce1ad83645cd66b025df44388
32,524
def slope_bound(init_point, final_point, delta): """ Obtain a flow in the hybrid automaton H which is close to the slope form by joining two points: initial and final. """ init_t = init_point[0] # Initial time instant final_t = final_point[0] # Final time instant init_x = init_point[1] # Initial value final_x = final_point[1] # Final value # Exact slope slope = (final_x - init_x) / (final_t - init_t) # this is the slope value # Upper bound for the slope value final_x_up = final_x + delta slope_upper = (final_x_up - init_x) / (final_t - init_t) # Lower bound for the slope value final_x_low = final_x - delta slope_lower = (final_x_low - init_x) / (final_t - init_t) return slope, slope_lower, slope_upper
3e215ce6b9fdfde80386f9cee6887da73cb49d6e
32,525
def add_to_station(instrument, station): """ add instrument <instrument> to station <station>. """ if instrument.name in station.components: del station.components[instrument.name] station.add_component(instrument) return station
902565f2735df5f6fa8c2dbdf0e4fd5eed962c4f
32,526
def get_size_json(doc): """Returns the size of the corresponding tree of the given JSON.""" size = 0 if isinstance(doc, dict): # OBJECT # Count the node of the object and all its keys. size += 1 + len(doc.keys()) # Add the sizes of all values. for key, val in doc.items(): size += get_size_json(val) elif isinstance(doc, list): # ARRAY # Count the node of the array and all its array order nodes. size += 1 + len(doc) # Add the sizes of all values. for val in doc: size += get_size_json(val) else: # VALUE # Add node for the value. size += 1 return size
00dc1e532599359885640c76696e9d05004dbc0f
32,529
import numpy def expected_radius_of_isogloss(time_from_origin, graph_diameter=1): """Calculate the expected number of languages in an isogloss""" # Half life of social influence $t$ to be taken from social # psychology literature t = 30 return ( graph_diameter * numpy.exp(-time_from_origin/t * numpy.log(2)))
4eac63f0d354904f7cb170686098f48ba5f45797
32,530
def normalize_run_info(run_info): """Normalize all dictionaries describing a run. Args: run_info (List[Dict]): The list of dictionaries to be normalized. Returns: List[Dict]: The input run_info but with each dictionary now having all the same keys. Note that there will be empty string values when a key was not originally found on a dictionary. """ keys = set([]) for run in run_info: keys.update(run.keys()) def get_run_info_with_blanks(run): """Ensure all of the required keys are found on a single run dictionary. Args: run (Dict): The run's original dictionary. Returns: Dict: The input run dictionary with all of the required keys. Empty string values will be added when a required key is not present. """ return dict(zip(keys, map(lambda x: run.get(x, ''), keys))) return list(map(get_run_info_with_blanks, run_info))
1f80c1a14bae565dc1e55ba552d281a7249918f1
32,531
def fake_response_accepting_class(response): """Fake a class that accepts a response parameter.""" return response
f4443e73d5e95b0d281f892958cfc436b1743e3d
32,532
def short_stat(decoded_diff): """Get the commit data from git shortstat.""" added = None deleted = None changes = decoded_diff.split(",") for i in changes: if "+" in i: added = [int(s) for s in i.split() if s.isdigit()][0] if "-" in i: deleted = [int(s) for s in i.split() if s.isdigit()][0] if not added: added = 0 if not deleted: deleted = 0 return (added, deleted)
81fe46515f7f10627ae560315791a50bf33741bd
32,533
def linear_interpolate(x, y, x0, y0, x1, y1): """Format a call to linear_interpolate for the given arguments and expected result. """ fmt = """ select linear_interpolate({x}, {x0}, {y0}, {x1}, {y1}), {y} as answer, {y} = linear_interpolate({x}, {x0}, {y0}, {x1}, {y1}) as match ;""" return fmt.format(x=x, y=y, x0=x0, y0=y0, x1=x1, y1=y1)
4a4137f1c004a32f933700618fec3948fcadea50
32,534
import six def replace_with_dict(string, str_func, replacements, count=-1): """:yaql:replace Returns a string with all occurrences of replacements' keys replaced with corresponding replacements' values. If count is specified, only the first count occurrences of every key are replaced. :signature: string.replace(replacements, count => -1) :receiverArg string: input string :argType string: string :arg replacements: dict of replacements in format {old => new ...} :argType replacements: mapping :arg count: how many first occurrences of every key are replaced. -1 by default, which means to do all replacements :argType count: integer :returnType: string .. code:: yaql> "abc ab abc".replace({abc => xx, ab => yy}) "xx yy xx" yaql> "abc ab abc".replace({ab => yy, abc => xx}) "yyc yy yyc" yaql> "abc ab abc".replace({ab => yy, abc => xx}, 1) "yyc ab xx" """ for key, value in six.iteritems(replacements): string = string.replace(str_func(key), str_func(value), count) return string
d2c22d59dc030a600cf886afb98c015a572eed3d
32,535
import string import random def get_random_string(uppercase=True, alphanum=True, length=32): """ Generate random strings. :param uppercase: include uppercase characters :param alphanum: include numbers :param length: result length """ if uppercase: symbols = string.ascii_letters else: symbols = string.ascii_lowercase if alphanum: symbols += string.digits return u''.join([random.choice(symbols) for _ in range(length)])
8a4c26886e9b9ba3acd9b17c84d468cf9c399f9e
32,536
import json def create_user(user_info, app_client): """ Create a user, providing back the id and the token. @param user_dict: dictionary with email and password @param app_client: a Flask app client to create against @returns user_id, token for newly created user """ res = app_client.post( '/api/create_user', data=json.dumps(user_info), content_type='application/json' ) res = json.loads(res.data.decode("utf-8")) return res['id'], res['token']
26661ddb485ab8be600f53b63e2dc5ca744c342c
32,537
def email_is_string(email): """ Check if the email is a string. :param email: The email to be tested. :type email: str :return: True if the email is a string, else false. :rtype: bool """ return isinstance(email, str)
9262fa4fbfdbaeaa2d605f695b94fbba93de813c
32,539
import base64 def np_to_base64(a): """ base64 encode the input NumPy array Args: a (array): numpy array Returns: str: Encoded string """ return base64.b64encode(a).decode("utf-8")
9f88796f019ce1f1191c4b0492029ec8e737ffe2
32,540
def get_transport(url): """ Gets transport type. This is more accurate than the urlparse module which just does a split on colon. First parameter, url Returns the transport type """ url = str(url) result = url.split("://", 1) if len(result) == 1: transport = "" else: transport = result[0] return transport
59ae526154bfdd7b4a6ebfbea6400890ce6b4105
32,541
import math def round_gb_size_up(gb_size, dp=2): """Rounds a GB disk size (as a decimal float) up to suit the platform. Use this method to ensure that new vdisks, LUs, etc. are big enough, as the platform generally rounds inputs to the nearest [whatever]. For example, a disk of size 4.321GB may wind up at 4.32GB after rounding, possibly leaving insufficient space for the image. :param gb_size: A decimal float representing the GB size to be rounded. :param dp: The number of decimal places to round (up) to. May be zero (round to next highest integer) or negative, (e.g. -1 will round to the next highest ten). :return: A new decimal float which is greater than or equal to the input. """ shift = 10.0 ** dp return float(math.ceil(gb_size * shift)) / shift
3e65412f461e8ab2f7bb11ef19102879b9e5782b
32,542
def dict_to_str(dictionary: dict, level: int = 0, ) -> str: """ A helper function to log dictionaries in a pretty way. Args: dictionary (dict): A general python dictionary. level (int): A recursion level counter, sets the visual indentation. Returns: str: A text representation for the dictionary. """ message = '' for key, value in dictionary.items(): if isinstance(value, dict): message += ' ' * level * 2 + str(key) + ':\n' + dict_to_str(value, level + 1) else: message += ' ' * level * 2 + str(key) + ': ' + str(value) + '\n' return message
7af1d272d15174a13ad9aaf192e9cce6624e12ba
32,544
def _str(object, indent=4): """ Helper function: document data object by convert attributes listed in properties into a string. """ props = [a + "=" + str(getattr(object, a)) for a in object._fields] prefix = " "*indent return prefix+("\n"+prefix).join(props)
d17d6168ff08e3807218eb3b2d3a1f0bfbdc2afa
32,546
def string_test_xml_setup_multiple_failures(): """This fixture represents two separate failures in two different teardown classes""" junit_xml = \ """ <testsuite errors="0" failures="2" name="" tests="2" time="1956.921"> <testcase classname="" name="setUpClass (tempest.api.compute.admin.test_agents.AgentsAdminTestJSON)" time="0.000"> <failure type="testtools.testresult.real._StringException">Traceback (most recent call last): testtools.testresult.real._StringException: Traceback (most recent call last): File "/openstack/venvs/tempest-r14.20.0/tempest/lib/common/rest_client.py", line 829, in _error_checker message=message) tempest.lib.exceptions.ServerFault: Got server fault Details: An unexpected error prevented the server from fulfilling your request. </failure> </testcase> <testcase classname="" name="setUpClass (tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON)" time="0.000"> <failure type="testtools.testresult.real._StringException">Traceback (most recent call last): testtools.testresult.real._StringException: Traceback (most recent call last): File "/openstack/venvs/tempest-r14.20.0/tempest/lib/common/rest_client.py", line 829, in _error_checker message=message) tempest.lib.exceptions.ServerFault: Got server fault Details: An unexpected error prevented the server from fulfilling your request. </failure> </testcase> </testsuite> """ # noqa return junit_xml
1913b73087bf7954cc57adfc3b853ddf1bbaee34
32,547
def tvbatch(t, v): """ Convenience method for paring x and y data for training and validation. Returns tuples pairing, e.g., the offsetting each block by one timestep with respect to x and y versions, such that at each timestep t, y[t] = x[t + 1]. Note that here and elsewhere it has been my custom to identify training data with the variables x and y, and validation data with xx and yy. This seems marginally more parsimonious to me than something like "training_x," etc. Returns: x, y, xx, yy """ x, y = t[:,:-1], t[:,1:] xx, yy = v[:,:-1], v[:,1:] return x, y, xx, yy
707acb9d516fac70db6fa049f2820ff554a7758b
32,548
import re def extract_sentence_id(tag): """ Extract the sentence ID of current sentence. Args: tag (str): Sentence tag Returns: str: sentence ID """ if "<s" not in tag: return "" pattern = re.compile('id="[a-z0-9]+?"(?=\s)') res = re.findall(pattern, tag) if len(res) == 0: return None return res[0].replace('"', "").replace("id=", "")
99a24d332e21b5861c74b00fdcb334892eda4b7c
32,550
from typing import List def _make_citation_command_regex(commands: List[str]) -> str: """ A citation command typically has this structure: \\command[prenote][postnote]{keys}[punctuation] where prenote, postnote, and punctuation are all optional. Reference: https://ctan.math.illinois.edu/macros/latex/contrib/biblatex/doc/biblatex.pdf """ command_names = r"(?:" + "|".join([r"\\" + c for c in commands]) + ")" return command_names + r"(?:\[[^\]]*\]){0,2}{([^}]*?)}(?:\[[^\]]*\])?"
3545b58a4e8be5601184d2aa01e45aef53b50560
32,551
import warnings def validate_record(record): """Check that `record` contains a key called "time". Args: record (dict): a dictionary representing a data record, where the keys name the "columns". Returns: True if there is a key called "time" (it actually checks for ``"time"`` (a string) and ``b"time"`` (a binary)). False if there is no key called "time". """ if not any(k in record for k in ("time", b"time")): warnings.warn( 'records should have "time" column to import records properly.', category=RuntimeWarning, ) return True
5e995c438cf197449596622385b9d3aa47846cba
32,552
def _get_range(val, multiplier): """Get range of values to sweep over.""" range_min = max(0, val - val * multiplier / 100.) range_max = val + val * multiplier / 100. ranges = {'initial': val, 'minval': range_min, 'maxval': range_max} return ranges
b0deb1abf80365e3bf48aaca57cc1b1c093dbcd2
32,555
from typing import Callable import inspect import functools def param_attrs(constructor: Callable) -> Callable: """ Decorator for class constructors that sets parameter values as object attributes:: >>> class Foo: ... @param_attrs ... def __init__(self, a, b=1, *, c=True): ... pass >>> foo = Foo('a') >>> foo.a 'a' >>> foo.b 1 >>> foo.c True :param constructor: the ``__init__`` method being decorated """ params = inspect.signature(constructor).parameters positional = [p.name for p in params.values() if p.kind == p.POSITIONAL_OR_KEYWORD] assert positional.pop(0) == 'self' @functools.wraps(constructor) def wrapper(self, *args, **kwargs): for name, param in params.items(): if name in kwargs: setattr(self, name, kwargs[name]) elif param.default != param.empty: setattr(self, name, param.default) for name, value in zip(positional, args): setattr(self, name, value) constructor(self, *args, **kwargs) return wrapper
2c078f22c87154039eba729347c79d17dcb46309
32,556
def invr(): """ For taking space seperated integer variable inputs. """ return(list(map(int,input().split())))
ceb27dac7bea58e14268949c9ba896827db2957d
32,557
def do_duration(seconds): """Jinja template filter to format seconds to humanized duration. 3600 becomes "1 hour". 258732 becomes "2 days, 23 hours, 52 minutes, 12 seconds". """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) tokens = [] if d > 1: tokens.append('{d} days') elif d: tokens.append('{d} day') if h > 1: tokens.append('{h} hours') elif h: tokens.append('{h} hour') if m > 1: tokens.append('{m} minutes') elif m: tokens.append('{m} minute') if s > 1: tokens.append('{s} seconds') elif s: tokens.append('{s} second') template = ', '.join(tokens) return template.format(d=d, h=h, m=m, s=s)
4f995046250d64212e16d8a4f6faf85f6530a38d
32,558
def _DecodeCSVShape(op): """Shape function for the DecodeCSV op.""" input_shape = op.inputs[0].get_shape() # Optionally check that all of other inputs are scalar or empty. for default_input in op.inputs[1:]: default_input_shape = default_input.get_shape().with_rank(1) if default_input_shape[0] > 1: raise ValueError( "Shape of a default must be a length-0 or length-1 vector.") return [input_shape] * len(op.outputs)
22bd949426a595106335a8b68387f4191ce9985b
32,559
def import_solver(SolverSettings): """this function imports a solver named "solver_type" from SolverSettings solver_type is expected to be the FILENAME of the solver to be imported""" obj = __import__(SolverSettings.solver_type) return obj
f2056e025df2d6e94d6cc207089357140c8bc0fa
32,560
def sol(x): """ If the diff. of even set bits and odd set bits is divisible by 3, then the number is divisible by 3 """ n = len(x) oddCount = 0 evenCount = 0 for i in range(n): if i%2 == 0: if x[i] == '1': evenCount += 1 else: if x[i] == '1': oddCount += 1 if abs(evenCount-oddCount)%3 == 0: return 1 return 0
958430a97670cb823478ff04a593ee5f4545fc28
32,561
def locateNodes(nodeList, key, value, noNesting=1): """ Find subnodes in the given node where the given attribute has the given value. """ returnList = [] if not isinstance(nodeList, type([])): return locateNodes(nodeList.childNodes, key, value, noNesting) for childNode in nodeList: if not hasattr(childNode, 'getAttribute'): continue if str(childNode.getAttribute(key)) == value: returnList.append(childNode) if noNesting: continue returnList.extend(locateNodes(childNode, key, value, noNesting)) return returnList
e5e5b888272a58829eb6f90765e33b3f25b286b4
32,562
def get_confusion_matrix(label, prediction, category): """ 生成混淆矩阵 :param label: :param prediction: :param category: :return: """ tp = 0 fn = 0 fp = 0 tn = 0 for index in range(len(label)): if label[index] == category: if prediction[index] == category: tp += 1 else: fn += 1 else: if prediction[index] == category: fp += 1 else: tn += 1 return tp, fn, fp, tn
9cce92365bd06d75d281d8d1e95550016f9b006b
32,564
import torch def compute_kernel(x, y): """ :param x: shape. = (n, k) :param y: shape. = (m, k) :return: """ # (n, m, k) => (n, m) numerator = (x.unsqueeze(1) - y.unsqueeze(0)).pow(2).mean(2) / float(x.size(1)) return torch.exp(-numerator)
78849c493bf6aba35e5a48895f8b1e4e95d24e49
32,565
def prime(a): """Return True if a is prime.""" if a == 2: return True if a < 2 or a % 2 == 0: return False return not any(a % x == 0 for x in range(3, int(a**0.5) + 1, 2))
c086eab2edc2fcb30eec3e2b8e992a266a4fd085
32,566
def generate_project(name='AILE', version='1.0', comment=''): """Generate an Slybot project file""" return { 'name': name, 'version': version, 'comment': comment }
fa73e224754f823177f37fdbe4ba5452ea4388c0
32,568
def linear_forward(current_set, parameter_w, parameter_b): """ linear step for forward propagation :param current_set: current A, numpy arrays :param parameter_w: current parameter W, numpy arrays :param parameter_b: current parameter b, numpy arrays :return: current z, and caches for following calculations, numpy arrays and dictionaries """ current_z = parameter_w.dot(current_set) + parameter_b assert (current_z.shape == (parameter_w.shape[0], current_set.shape[1])) cache = (current_set, parameter_w, parameter_b) return current_z, cache
e00004faa5a66fa7a1390e778edf584eaa63df85
32,570
def check_login(session): """ Function to check if the specified session has a logged in user :param session: current flask session :return: Boolean, true if session has a google_token and user_id """ # Check that session has a google_token if session.get('google_token') and session.get('user_id'): return True return False
cd5651ce622ffd108ea7d0b8c1c4f70b1b4947ab
32,571
def serialize_enum_model(model): """Serializes api model into an json serializable object. :param model: BaseModel instance to serialize :return: serialized object """ return model.value
cb68a5346d4c804e545bf29db966ff93468e1a46
32,573
def sanitize_identifier(identifier): # type: (str) -> str """ Get santized identifier name for identifiers which happen to be python keywords. """ keywords = ( "and", "as", "assert", "break", "class", "continue", "def", "del", "elif", "else", "except", "exec", "finally", "for", "from", "global", "if", "import", "in", "is", "lambda", "not", "or", "pass", "print", "raise", "return", "try", "while", "with", "yield", ) return "{}_".format(identifier) if identifier in keywords else identifier
ec6526bd23b1c6a4b09c87b9db7ca14fce44e28f
32,575
def load32(byte): """ bytearray to int (little endianness) """ return sum((byte[i] << (8 * i)) for i in range(4))
9e33b13fcf1d58b27e6915a319c98db4d951ac69
32,576
import os import base64 import requests def get_authenticated_session(): """ Assuming we already have a refresh token, get a new access token and set the corresponding auth header in a new session object. """ client_id = os.environ.get("SPOTIFY_CLIENT_ID") client_secret = os.environ.get("SPOTIFY_CLIENT_SECRET") refresh_token = os.environ.get("SPOTIFY_REFRESH_TOKEN") raw_auth_header = f"{client_id}:{client_secret}".encode() encoded_auth_header = base64.b64encode(raw_auth_header).decode() session = requests.Session() session.headers["Authorization"] = f"Basic {encoded_auth_header}" payload = {"grant_type": "refresh_token", "refresh_token": refresh_token} response = session.post("https://accounts.spotify.com/api/token", payload) assert response.status_code == 200, response.text access_token = response.json()["access_token"] session.headers["Authorization"] = f"Bearer {access_token}" session.headers["Content-Type"] = "application/json" return session
f2af848885b6374352150b9a5ea96208c97842ba
32,577
def pf(x, A, B, C): """ power function for fitting the CCS vs. m/z data """ return A * (x ** B) + C
6345634ea694564dc66df66c8a50bdaa7b88f0d7
32,578
def get_jpg_images(soup): """Extract the URLs of JPG images from the HTML of a reddit category. This method doesn't extract Flickr images. """ images = [] for tag in soup.findAll('a', href=True): if tag['href'].lower().endswith('jpg'): if tag['href'] not in images: images.append(tag['href']) # no duplicates and keep the order return images
f8feec31958f9694b6bc9644944e4a13f44aa3cd
32,580
def create_report(prediction, answers): """ This function creates the report for a user based on prediction from the model """ report = 'На сегодняшний день ваша вероятность заболевания составляет ' report += str(prediction) report += '%' report += '\n\n Рекомендации: \n' if prediction > 30: report += 'Вероятность того, что вы инфецированы слишком высока. Что делать: \n\ 1. Сохранять спокойствие. 100% гарантии вам может дать только медицинский тест. \n\ 2. Связаться с врачом. \n\ 3. Исключить, на сколько это возможно, любые контакты с родственниками и людьми вокруг.' else: report += 'Хоть бот и вычислил достаточно малую вероятность того, что вы заражены, но это не значит, что вы действительно полностью здоровы. Помните, что самым точным показателем того, что вы здоровы, является тест на Коронавирус' return report
4bf41597561cf9a0b196b67566c854ca8129cd61
32,581
def supply_score(pickups, pickupsfromcarepackages): """ supply_score = 80% * supply score + 20% * care packages score : type pickups: dict : type pickupsfromcarepackages: int : rtype supply_score: int """ # get the total number for each supply category Attachment = pickups["Attachment"] if "Attachment" in pickups else 0 Use = pickups["Use"] if "Use" in pickups else 0 Ammunition = pickups["Ammunition"] if "Ammunition" in pickups else 0 Equipment = pickups["Equipment"] if "Equipment" in pickups else 0 Weapon = pickups["Weapon"] if "Weapon" in pickups else 0 # calculate care package score if pickupsfromcarepackages > 0: care_package_score = 100 else: care_package_score = 0 # calculate attachment score if Attachment <= 5: attachment_score = 50 elif Attachment <= 9: attachment_score = 75 else: attachment_score = 100 # calculate use score if Use <= 5: use_score = 70 elif Use <= 10: use_score = 85 else: use_score = 100 # calculate equipment score if Equipment <= 5: equipment_score = 75 elif Equipment <= 10: equipment_score = 90 else: equipment_score = 100 # calculate weapon score if Weapon <= 1: weapon_score = 75 elif Weapon == 2: weapon_score = 90 else: weapon_score = 100 # calculate ammunition score if Ammunition <= 5: ammunition_score = 50 elif Ammunition <= 10: ammunition_score = 75 elif Ammunition <= 14: ammunition_score = 90 else: ammunition_score = 100 supplies_score = (equipment_score + use_score + weapon_score + ammunition_score) * 0.225 + attachment_score * 0.1 supply_score = int(supplies_score * 0.8 + care_package_score * 0.2) return supply_score
246ec22b7ff2c3367adccb755ee1a2d536d5e056
32,582
def build_repr(instance, fields): """ Build the string representation for an instance. Args: instance: The instance to build the repr for. fields: A list of fields to include in the repr. Returns: A string describing the provided instance including representations of all specified fields. """ values = [f"{f}={repr(getattr(instance, f))}" for f in fields] return f'{instance.__class__.__name__}({", ".join(values)})'
832b36b9dd93de7e7e22b71693d2b5dbac3749ad
32,584
import re from typing import Counter def index_of_coincidence(value, alphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ'): """ Returns the index of coincidence for a given ciphertext string (a value used to determin whether a poly-alphabetic cipher was used or not) :param value: a ciphertext string :param alphabet: The letters to be taken into account for the index :return: The index of coincidence of the string """ value = value.upper() value = re.sub(f'[^{alphabet}]', '', value) counter = Counter(value) n = len(value) index = 0 for c in alphabet: ni = counter[c] index += (ni * (ni - 1)) / (n * (n - 1)) return index
d49e53b179479790903f25b228efcd7299ca43e9
32,585
import re def per_lvl(raw_text): """ Заменяет ~~ на проценты в тексте :param raw_text: Строка с ~~*~~ :return: Строка с % за уровень """ match = re.search('~~.{3,5}~~', raw_text) if match: clean_r = re.compile('~~.{3,5}~~') left, dig, right = raw_text.split('~~', maxsplit=2) dig = float(dig) * 100 clean_text = re.sub(clean_r, '(+{}% за лвл)'.format(dig), raw_text) return clean_text else: return raw_text
7606ddac027f491f2bf954aee164205b604f40ef
32,586
def stringToClass(cls_str): """ Converts the string representation of a class to a class object. :param str cls_str: string representation of a class :return type class: """ import_stg1 = cls_str.split(" ")[1] import_stg2 = import_stg1.replace("'", "") import_stg3 = import_stg2.replace(">", "") import_parse = import_stg3.split(".") cls = import_parse[-1] import_path = '.'.join(import_parse[:-1]) import_statement = "from %s import %s" % (import_path, cls) exec(import_statement) this_class = None assign_statement = "this_class = %s" % cls exec(assign_statement) return this_class
9e64e492a5f9fbd103b463fb14c33973e2d4e037
32,589
import torch def get_entropy_loss(memory, args, i_agent): """Compute entropy loss for exploration Args: memory (ReplayMemory): Class that includes trajectories args (argparse): Python argparse that contains arguments i_agent (int): Index of agent to compute entropy loss Returns: entropy_loss (torch.Tensor): Entropy loss for encouraging exploration """ _, _, entropies, _, _ = memory.sample() entropy = torch.stack(entropies[i_agent], dim=1) assert entropy.shape == (args.traj_batch_size, args.ep_horizon), \ "Shape must be: (batch, ep_horizon)" entropy_loss = -args.entropy_weight * torch.mean(torch.sum(entropy, dim=1)) return entropy_loss
0d031100d17b64402340f1c0626a04fc083be8a0
32,591
from typing import List from typing import Tuple import struct def vox_dict( entries: List[ Tuple[ str, str ] ] ) -> bytes: """Produces the binary representation of a dictionary for the .vox format. Note that all keys and values are strings. Examples -------- >>> vox_dict( [ ( '_t', '10 5 2' ) ] ) This dictionary (from the 'nTRN' chunk) defines a translation. """ w = bytearray( ) w.extend( struct.pack( '<I', len( entries ) ) ) for (key, value) in entries: key_b = bytes( key, 'UTF-8' ) value_b = bytes( value, 'UTF-8' ) w.extend( struct.pack( '<I', len( key_b ) ) ) w.extend( key_b ) w.extend( struct.pack( '<I', len( value_b ) ) ) w.extend( value_b ) return bytes( w )
c8622ad47397fd4b93104ccab578a96ab00ca6dd
32,592
def find_short(strg): """Return length of shortest word in sentence.""" words = strg.split() min_size = float('inf') for word in words: if len(word) < min_size: min_size = len(word) return min_size
87e99a5754ede74d74e76199c176f956d424fc44
32,593
def add_properties(objectclass, property_list): """Generate class properties for a model that provide read-only access to elements from the internal ._data data structure. :param objectclass: The class to which properties should be added :param property_list: A list of property name + data structure key + optional docstring tuples. Property names then read from the given data structure keys. """ for prop_item in property_list: key = prop_item[0] internalkey = prop_item[1] def model_attribute(self, k=internalkey): return self._data[k] if len(prop_item) > 2: model_attribute.__doc__ = prop_item[2] setattr(objectclass, key, property(model_attribute))
268b83bd1b794ede60b7ebce27586dc499004730
32,596
from pathlib import Path import csv def display_playlist_tracks(playlist): """ display playlist tracks from saved csv file """ path = Path('playlist_tracks_csv/') file_name = '{}'.format(playlist) # read from existing csv file fpath = (path / file_name).with_suffix('.csv') with fpath.open(mode='r') as csv_file: csv_reader = csv.DictReader(csv_file) tracks = [] line_count = 1 for row in csv_reader: track = "{} - {}: spotify_url: {}".format(row['name'], row['artists'], row['spotify_url']) tracks.append(track) line_count += 1 i = 0 while i != len(tracks) - 1: print("{}. {}".format(i, tracks[i])) i += 1 print(f'Processed {line_count} lines.') return tracks
20fd48e1d90ef1c86dd0542ce9552666da49742a
32,597
import argparse def parse_args(msg) -> argparse.Namespace: """Custom argument parser. Args: * `msg` ([type]: str): Description help message. Returns: [type]: Namespace of input arguments. """ parser = argparse.ArgumentParser(description = msg, formatter_class = argparse.RawDescriptionHelpFormatter) parser.add_argument("-i", help = "Input fasta file.") parser.add_argument("-cv", help = "Input csv file containing the patterns to look for" "with their corresponding id's and descriptions to use for renaming.") parser.add_argument("-o", default = "output.fasta", type = str, help = "Optional argument: Name of output fasta file. Default is output.fasta") parser.add_argument("-inf", default = False, help = "Optional boolean argument: Display info of header renaming. Default is False.") parser.add_argument("-cnt", default = False, help = "Optional boolean argument: Add a counter when naming the new headers. Default option is False.") parser.add_argument("-id", default = True, help = "Optional boolean argument: Choose whether to rename header id's. Default is True.") parser.add_argument("-de", default = True, help = "Optional boolean argument: Choose whether to rename header descriptions. Default is True.") return parser.parse_args()
e47a986e3d037eb588ae80816764ef27481f32fd
32,598
def every_nd(fn, arr): """ Returns bool, true if fn is true for all elements of arr Parameters ---------- arr: numpy.ndarray Returns ------- bool """ return all(map(fn, arr.flatten()))
f45ea57f23300fe3fa1478085516c1ef5f430cd4
32,600
import getpass def get_user_name(): """Get user name provide by operating system """ user = getpass.getuser() return user
6c45e5ee9b15f2b70dfa76743689b4174b17f8d0
32,602
def turn_up_twice(value: int) -> int: """ Turn up cells in the given region twice, i.e. increase brightness by 2. """ return value + 2
b9f119d5eeb7b1e79ce50befa2f4efc2fdb8396d
32,604
def get_corepy_output(code, inst): """Take an instruction, and return a hex string of its encoding, as encoded by CorePy""" hex_list = inst.render() hex = "" for x in hex_list: hex += "%02x" % (x) return hex
26816cc8d424bfeb6db9e3a404f22a648c0d0d41
32,605
def cleanup_path(orig_path): """ cleanup path string using escape character """ chars_to_escape = ' ()"[]&,!;$' + "'" for ch_ in chars_to_escape: orig_path = orig_path.replace(ch_, r'\%c' % ch_) return orig_path
48d8e75047e2abd1aec9f17a1c0aafccba231a69
32,607
def testFunction(x): """ I{For testing only.} """ return 2*x
355348e4a62eb7a99b9a1ba55fa865f33ffde2ad
32,608