content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def getencoding(path): """Detect encoding string from the leading two lines. :param str path: pathname of the source file :rtype: str or None :return: encoding str or None """ coding = re.compile(r"coding[:=]\s*(\w)+") with open(path, encoding="ascii") as in_: for _ in (0, 1): try: mo = coding.search(in_.readline()) except UnicodeDecodeError: continue if mo: return mo.group(0) return None
8976b094b54d9af60f6ecd29bd077bb4040cb2ab
16,215
import asyncio def in_async_call(loop, default=False): """Whether this call is currently within an async call""" try: return loop.asyncio_loop is asyncio.get_running_loop() except RuntimeError: # No *running* loop in thread. If the event loop isn't running, it # _could_ be started later in this thread though. Return the default. if not loop.asyncio_loop.is_running(): return default return False
9ed888e0d8ee27d18c843a184071d9596880c039
16,216
def list_format(items, fmt): """format each item in a list""" out = [] for i in items: out.append(fmt.format(i)) return out
bf4824ee8726457ace69bedf1532610f3efca2c1
16,217
def dictlist_lookup(dictlist, key, value): """ From a list of dicts, retrieve those elements for which <key> is <value>. """ return [el for el in dictlist if el.get(key)==value]
94e90b29c8f4034357be2b8683115f725c24b374
16,218
import sys def contentSplit(content, fragmentation_len=1000, offset_str="\n\n"): """ 由于企业微信消息发送接口限制单条内容长度为2048字节,因此需对告警内容进行分片,多次发送 :param content: 告警内容 :param fragmentation_len: 分片长度,默认值1000 :return: 以列表形式返回所有分片 为使分片后的消息看起来更连贯,对分片后的内容进行判断,如果不是一条完全的告警,则往前偏移,直至找到完整一条的告警为止 完整的一条告警是以\n\n分隔的 """ content_new = [] while sys.getsizeof(content) > fragmentation_len: _tmp_content = content[:fragmentation_len] if _tmp_content.rindex(offset_str) != fragmentation_len - 1: _tmp_offset = fragmentation_len - _tmp_content.rindex(offset_str) _tmp_content = _tmp_content[:fragmentation_len - _tmp_offset] content = content[fragmentation_len - _tmp_offset:] content_new.append(_tmp_content) if content != offset_str: content_new.append(content) return content_new
9bb3f865882758db1d0f7cca73373cb2793ee577
16,220
def df_to_geojson(df, geometry, coordinates, properties): """ Convert a pandas DataFrame to a GeoJson dictionary. Parameters ---------- df : pandas DataFrame DataFrame containing the geojson's informations. geometry : String The type of the geometry (Point, Polygon, etc.). coordinates : String The DataFrame column's name of the coordinates. properties : list of String eelements. The DataFrame column's names of the properties attributes. Returns ------- geojson : Dict GeoJson dictionary with the geometry, coordinates, and properties elements. """ geojson = {'type': 'FeatureCollection', 'features': []} for _, row in df.iterrows(): feature = {'type':'Feature', 'properties':{}, 'geometry': {'type': geometry, 'coordinates': coordinates}} for prop in properties: normalize_prop = prop.lower().split('.')[-1] feature['properties'][normalize_prop] = row[prop] geojson['features'].append(feature) return geojson
75fa808dca18b89e2e259425b9b9e67bb8ac2415
16,221
def get_bytes(block_nums): """ Takes an array of block integers and turns them back into a bytes object. Decodes using base 256. :param block_nums: Blocks (list of ints) :return: Original data (bytes) """ message = [] for block in block_nums: block_text = [] while block: message_num = block % 256 block = block // 256 block_text.append(bytes([message_num])) block_text.reverse() message.extend(block_text) return b''.join(message)
429325cd37a3f821b751237659626907eb19d4a9
16,222
def check_positive_int(string): """Convert a string to integer and check if it's positive. Raise an error if not. Return the int. """ if int(string) > 0: return int(string) else: raise RuntimeError("Integer must be positive.")
1e32994a2d9d58b1361a9228b78210398c19c94e
16,225
def clamp_value(my_value, min_value, max_value): """Limit value by min_value and max_value.""" return max(min(my_value, max_value), min_value)
f3478b08f3e2e38ba3459d487adc9c462468c32e
16,226
import argparse def usage(): """ Parses arguments and displays usage information on screen """ parser = argparse.ArgumentParser( description="Rfam fasta export tool", epilog='') # group required arguments together req_args = parser.add_argument_group("required arguments") req_args.add_argument("--sql", help="query to execute (string or .sql file)", type=str, required=True) parser.add_argument( "--infile", help="sequence database file (rfamseq11 by default)", type=str, default=None) parser.add_argument( "--filename", help="an output filename", type=str, default=None) req_args.add_argument( "--out", help="path to output directory", type=str, required=True) return parser
a3c749a3310beb0fc97ee900bbaa866e636b847a
16,227
def _buses_with_gens(gens): """ Return a list of buses with generators """ buses_with_gens = list() for gen_name, gen in gens.items(): if not gen['bus'] in buses_with_gens: buses_with_gens.append(gen['bus']) return buses_with_gens
5cf9e918a55e140053bceb538cd1f15b331c253d
16,229
import hashlib def get_key(model_params, mw_range, bins): """ Generate a hash key based on the model parameters used and the fited mw_range and number of bins. """ hasher = hashlib.md5() hasher.update(bytes(str(model_params), 'ASCII')) hasher.update(bytes(str(mw_range), 'ASCII')) hasher.update(bytes(str(bins), 'ASCII')) key = hasher.hexdigest() return key
50fe5d13bf9f897c76ee144c576212ec483e1948
16,230
def list_merge_values(d, d2, in_place=True): """ Parameters ---------- d d2 in_place : bool, optional (default is True) Do the update in-place. Examples -------- >>> list_merge_values({1:[2]}, {1:[3]}) {1: [2, 3]} Returns ------- """ d3 = d.copy() if not in_place else d for key, val in d2.items(): if key not in d3: d3[key] = val else: d3[key].extend(val) return d3
2474d00e61ce3590b9a28590b436ef5e7c7cc470
16,231
from typing import Any import hashlib def md5(item: Any) -> str: """Compute the MD5 hash of an object. Args: item: The object to compute the MD5 hash on. Returns: The MD5 hash of the object. """ return hashlib.md5(str(item).encode()).hexdigest()
5589eb0a38b9f099a7f4b418d719b9c089110d5a
16,237
from typing import List from typing import Tuple import os import logging def path_renaming(path_lst: List[str], search_value: str, new_value: str, renaming: bool = False) -> Tuple[int, int, List[str]]: """ List of filtered files and directories are renamed and their names returned. Furthermore, the number fo directories/files which were renamed are also returned. :returns Tuples containing the number of directories, files and the names of them after renaming """ renamed_paths = [] dircount, filecount = 0, 0 for old_path_name in path_lst: path_base, file = os.path.split(old_path_name) new_name = file.replace(search_value, new_value) full_new = os.path.join(path_base, new_name) renamed_paths.append(full_new) if renaming: os.rename(old_path_name, full_new) if os.path.isdir(full_new): dircount += 1 elif os.path.isfile(full_new): filecount += 1 logging.info(f" working dir: {os.getcwd()!r} | naming: {old_path_name!r} --> {full_new!r}",) return (filecount, dircount, renamed_paths)
15e2026260daed5cd631cec3ef49c88263f3ca3d
16,240
import shlex def parse_value(s: str) -> str: """ >>> parse_value('"a"') 'a' >>> parse_value('"CentOS Linux"') 'CentOS Linux' >>> parse_value('Hello World') 'Hello World' """ tokens = list(shlex.shlex(s, posix=True)) if len(tokens) == 1: return tokens[0] return s
0e3c0e84ed3fa71dceafab85405f8eda8ce0a0ac
16,241
import os def get_label_name(image_path): """Get label name from image path""" items = image_path.split(os.path.sep) if len(items) < 2: raise ValueError("invalid image path:{}".format(image_path)) return items[-2]
8be4f52f377f2c58e4dd84f3067a5c389443c426
16,243
def SortLists(sortbylist,otherlists,reverse=False): """This function sorts lists similar to each list being a column of data in a spreadsheet program and choosing one column to sort by. The sortbylist is the column or list that you wish to sort by and otherlists is a list of the other lists or columns to sort. Reverse is passed to the sort method of sortbylist.""" newlist=sortbylist[:] newlist.sort(reverse=reverse) bigoutlist=[] for list in otherlists: curlist=[] for curitem in newlist: ind=sortbylist.index(curitem) curlist.append(list[ind]) bigoutlist.append(curlist) return (newlist,)+tuple(bigoutlist)
132678fa35f008e4f987ffca57e729840438ca72
16,244
def P_bremsstrahlung(k, Te, ne): """ W m^3 """ return 1.53e-38 * Te**0.5 * (k + 1)**2
11959d1616bdc25bea26f4f4329447d8e570f162
16,246
from typing import Dict def gen_tag_list(db_config: Dict): """ Generate tag list from tag string """ if 'tags' not in db_config: return [] return db_config['tags'].split(',')
f612f5cc85e0f61041d65c6782d3a427b6538a95
16,247
from typing import Any def is_json_string(value: Any) -> bool: """Check if the provided string looks like json.""" # NOTE: we do not use json.loads here as it is not strict enough return isinstance(value, str) and value.startswith("{") and value.endswith("}")
16418c37fc1d348e4d1e5805c485f181e514d537
16,249
def flux2zhr(flux, pop_index=2.0): """Eqn 41 from (Koschak 1990b) Parameters ---------- flux : float [1000^-1 km^-2 h^-2] """ r = pop_index flux = flux / 1000.0 zhr = (flux * 37200.0) / ( (13.1*r - 16.45) * (r - 1.3)**0.748 ) return zhr
945f49d3429989e5914b5753e2d2fa3610172ec6
16,253
def get_filediff_encodings(filediff, encoding_list=None): """Return a list of encodings to try for a FileDiff's source text. If the FileDiff already has a known encoding stored, then it will take priority. The provided encoding list, or the repository's list of configured encodingfs, will be provided as fallbacks. Args: filediff (reviewboard.diffviewer.models.filediff.FileDiff): The FileDiff to return encodings for. encoding_list (list of unicode, optional): An explicit list of encodings to try. If not provided, the repository's list of encodings will be used instead (which is generally preferred). Returns: list of unicode: The list of encodings to try for the source file. """ filediff_encoding = filediff.encoding encodings = [] if encoding_list is None: encoding_list = filediff.get_repository().get_encoding_list() if filediff_encoding: encodings.append(filediff_encoding) encodings += [ encoding for encoding in encoding_list if encoding != filediff_encoding ] else: encodings += encoding_list return encodings
5c951bbc6266daa058de58bdeb1382ea6dc93ada
16,254
def get_group_command_url(environment: str, enterprise_id: str, group_id: str) -> str: """ Build and return pipeline url for scapi endpoint :param environment: :param enterprise_id: :param group_id: :return: Url """ url = f'https://{environment}-api.esper.cloud/api/enterprise/{enterprise_id}/devicegroup/{group_id}/command/' return url
34402dbe263ddd5cd4b04b247beb09701f2b2cc6
16,256
def mutate_query_params(request, mutations): """ Return a mutated version of the query string of the request. The values of the `mutations` dict are interpreted thus: * `None`, `False`: Remove the key. * Any other value: Replace with this value. :param request: A HTTP request. :type request: django.http.HttpRequest :param mutations: A mutation dict. :type mutations: dict[str, object] :return: query string """ new_qs = request.GET.copy() for key, value in mutations.items(): if value in (None, False): new_qs.pop(key, None) else: new_qs[key] = value return new_qs.urlencode()
215a79ce82859eb415454876c1c80cdfdb34d7f6
16,257
import math import random def Move(Location, xT): """ This function will return the new location (as a tuple) of a particle after it has isotropically scattered Location: Location of particle before scattering xT: Total cross section of medium """ # Sample distance to collision Distance = (1/xT)*math.log(random.random()) # Sample new direction phi = 2*math.pi*random.random() u = 2*random.random() - 1 v = math.sqrt(1-u*u)*math.cos(phi) w = math.sqrt(1-u*u)*math.sin(phi) # Move particle x = Location[0] + Distance*u y = Location[1] + Distance*v z = Location[2] + Distance*w return (x, y, z)
5902de845db37e5848fbf64954f971fcdce60c89
16,258
def get_color(col, color): """Function needed for backwards compatibility with the old "col" argument in plt functions. It returns the default color 'C0' if both arguments are None. If 'color' is not None, it always uses that. If 'color' is None and 'col' is an integer, it returns the corresponding 'CN' color. If 'col' is neither None nor integer, an error is raised.""" if color is None and col is None: return 'C0' if col is None: return color if not isinstance(col, int): raise ValueError("`col` must be an integer. Consider using `color` instead.") return 'C{}'.format(col)
b4e7cbeacca0e730cb2fa5f320318da83cabfc1a
16,261
import copy def remove_hidden_options(config, whitelist): """ Remove any hidden options not whitelisted """ for entry in copy.copy(config): for func in entry: if func.startswith("_") and func not in whitelist: config.remove(entry) return config
73435c65559c7982c591e23d46b58048ac101d8c
16,262
from datetime import datetime def played_at_to_date(s): """Return played at string as date '2022-01-09T19:17:33.720Z', """ return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%fZ")
2db8b82cc875275b8a40fd7e6ab752a7b03e221d
16,263
import re def GetAge(description): """ :param description: A description possibly containing the borrower's age. :return: The borrower's age as an int, if not found, returns None. """ match = re.findall( "([1-9][0-9])( |\-)(years old|years of age|year old|year\-old)", description) if len(match) == 0: match = re.findall(" (is|aged?) ([1-9][0-9])", description) if len(match) == 0: match = re.findall("(, )([1-9][0-9])(, )", description) if len(match) == 0: match = re.findall("(born in) (19[0-9][0-9])", description) if len(match) == 0: return None else: try: return 2016 - int(match[0][1]) except: return None else: try: return int(match[0][1]) except: return None else: try: return int(match[0][1]) except: return None else: try: return int(match[0][0]) except: return None
46609ee32c5145e3492c6f314d6324a252a3845b
16,264
def arg_is_natural_num(arg): """Return whether the string arg contains a natural number. >>> arg_is_natural_num('123') True >>> arg_is_natural_num('0') True >>> arg_is_natural_num('-1') False >>> arg_is_natural_num('1.5') False >>> arg_is_natural_num('foo2') False >>> arg_is_natural_num('2foo') False """ return arg.isdigit()
545a127121c168be54f140f42cfbbd6b5ef7eef3
16,265
def _eigenvals(mat,roundto): """ Returns the eigenvalues using QR algorithm """ assert mat.isSquare and mat.d0>=2, "Can't find eigenvalues for non-square matrices" if mat.d0==2: d=mat.det tr=mat.matrix[0][0]+mat.matrix[1][1] return list(set([(tr+(tr**2 - 4*d)**(1/2))/2,(tr-(tr**2 - 4*d)**(1/2))/2])) eigens = [] q=mat.Q a1=q.t@mat@q for i in range(mat.QR_ITERS):#Iterations start qq=a1.Q a1=qq.t@a1@qq #Determine which values are real and which are complex eigenvalues if mat.isSymmetric:#Symmetrical matrices always have real eigenvalues return a1.diags #Wheter or not dimensions are odd isOdd=(a1.d0%2) precision = mat.PRECISION #Decide wheter or not to skip the bottom right 2x2 matrix if a1._cMat: neighbor = a1[-1,-2] if round(neighbor.real,precision)==0 and round(neighbor.imag,precision): eigens.append(a1[-1,-1]) else: if round(a1[-1,-2],precision)==0: eigens.append(a1[-1,-1]) #Create rest of the eigenvalues from 2x2 matrices ind=0 while ind<a1.d0-1: mat = a1[ind:ind+2,ind:ind+2] ind+=1+isOdd #Decide wheter or not to skip the top right corner 2x2 matrix done=0 if a1._cMat: if round(mat[1,0].real,precision)==0 and round(mat[1,0].imag,precision): eigens.append(mat[0,0]) ind-=isOdd done=1 elif round(mat[1,0],precision)==0: eigens.append(mat[0,0]) ind-=isOdd done=1 #2x2 matrices in the middle if not done: ind+=1-isOdd r = mat.trace/2 v = (mat.det - r**2)**(1/2) r = complex(complex(roundto(r.real,precision,True)),complex(roundto(r.imag,precision,True))) v = complex(complex(roundto(v.real,precision,True)),complex(roundto(v.imag,precision,True))) c1 = complex(r,v) c2 = complex(r,v*(-1)) if c1.imag==0: c1 = c1.real if c2.imag==0: c2 = c2.real eigens.append(c1) eigens.append(c2) return eigens
e1f448602625ff144337b8997a2fefbe2d74ab16
16,266
def stations_by_river(stations): """Creates a dictionary of stations that are located on the same river. The river is the key for this dictionary""" river_dictionary = {} for i in stations: station_list = [] for j in stations: if i.river == j.river: station_list.append(j.name) river_dictionary[i.river] = station_list return river_dictionary
cf6d324c10ecb756dfa4c506adbbd16316a0d500
16,267
def _shuffle_leaky(all_ratings_df, n_train): """Shuffles and splits the ratings allowing overlap in the ref sentences.""" all_ratings_df = all_ratings_df.sample(frac=1, random_state=555) all_ratings_df = all_ratings_df.reset_index(drop=True) train_ratings_df = all_ratings_df.iloc[:n_train].copy() dev_ratings_df = all_ratings_df.iloc[n_train:].copy() assert len(train_ratings_df) + len(dev_ratings_df) == len(all_ratings_df) return train_ratings_df, dev_ratings_df
a7db1e2f8730ce6501824fa0aaf775667a9665e6
16,269
def merge_response_func(func, key): """ Use this decorator to set a new merging response function to HTTP endpoints candidate function must have the following signature and be childs of BaseApi: ``` def merge_some_function(self, response, rison_args): ``` :param func: Name of the merge function where the key is allowed :param key: The key name for rison selection :return: None """ def wrap(f): if not hasattr(f, "_response_key_func_mappings"): f._response_key_func_mappings = dict() f._response_key_func_mappings[key] = func return f return wrap
277d7a5b477d3d52aac7a8d93e5cc5468d36279c
16,271
def Mimir_header_handler(header): """Makes some small modifications to the header as its read in""" # Copy the header for manipulation outHeader = header.copy() # Make the ADU units lower case so that astropy.units can handle them. outHeader['BUNIT'] = header['BUNIT'].strip().lower() # Set the gain for the instrument (since it wasn't included in the header) # # http://people.bu.edu/clemens/mimir/capabilties.html # # on July 29, 2017 outHeader['AGAIN_01'] = 8.21 outHeader['ARDNS_01'] = 17.8 return outHeader
cecbd339b366fbd67243719bf161a9b9fc370b51
16,272
def real_letter(character, key): """ Afla caracterul """ if character.isalpha(): character = ord(character)-key if character < ord('a'): character = ord('z') - abs(ord('a') - character) + 1 return chr(character) else: return character
24290f3e7c970f8643034f3c88d06b4369695b16
16,273
def revert_gr(admin_mc, request): """Ensures gr was reverted to previous state, regardless of test results """ def _cleanup(old_gr): def revert(): reverted_gr = admin_mc.client.update_by_id_global_role( id=old_gr.id, displayName=old_gr.name, description=old_gr.description, rules=old_gr.rules, newUserDefault=old_gr.newUserDefault, builtin=old_gr.builtin) assert reverted_gr.name == old_gr.name assert reverted_gr.get("description") == old_gr.description assert reverted_gr.rules[0].data_dict() == old_gr.rules[0].\ data_dict() assert reverted_gr.get("builtin") is old_gr.builtin assert reverted_gr.newUserDefault is old_gr.newUserDefault request.addfinalizer(revert) return _cleanup
2e109bee2796d1ee8dfe5a7fe60e2748ae70b084
16,274
def get_margin(length): """Add enough tabs to align in two columns""" if length > 23: margin_left = "\t" chars = 1 elif length > 15: margin_left = "\t\t" chars = 2 elif length > 7: margin_left = "\t\t\t" chars = 3 else: margin_left = "\t\t\t\t" chars = 4 return margin_left
c50b1253a737d787f696216a22250fb2268216ab
16,275
def unique_list(it): """ Create a list from an iterable with only unique element and where the order is preserved. Parameters ---------- it : iterable Items should be hashable and comparable Returns : list All items in the list is unique and the order of the iterable is preserved. """ unique = set() return [i for i in it if i not in unique and unique.add(i) is None]
5122b3fdb7a489df856ad6ff4a17da58a99a8160
16,277
def f4(x): """Evaluate the estimate x**4+x**3+x**2+x.""" return x*(x*(x*x+x)+x)+x
6a5e258d77992e8c15a6dddb04627a7d5c8467d9
16,278
import os def get_files(directory, exts=('.torrent',)): """Get the shallowest set of files with valid extensions in a directory structure. If no valid files are found in a directory, search all subdirectories. If a valid file is found in a directory, no subdirectories will be searched. Parameters str - path of directory (str,) - tuple of valid file extensions Returns {str: [(float, int), 0]} - dictionary, keyed by file path, of (mtime, length) pairs and an uninitialized hash value {str: str} - dictionary, keyed by file path, of file extension """ files = {} file_type = {} # Ignore '.' files and directories subs = (candidate for candidate in os.listdir(directory) if candidate[0] != '.') # Find valid files subdirs = [] for sub in subs: loc = os.path.join(directory, sub) if os.path.isdir(loc): subdirs.append(loc) continue extmatches = [ext[1:] for ext in exts if sub.endswith(ext)] if extmatches: files[loc] = [(int(os.path.getmtime(loc)), os.path.getsize(loc)), 0] file_type[loc] = extmatches[0] # Recurse if no valid files found if not files and subdirs: for subdir in subdirs: subfiles, subfile_type = get_files(subdir, exts) files.update(subfiles) file_type.update(subfile_type) return files, file_type
5a704db10242e64996a6bc1783c3e62aca39214b
16,279
import re def _is_private_name(name): """ Return true if the given variable name is considered private. Parameters ---------- name : str Variable name to check """ # e.g. __name__ is considered public. is_reserved_public_name = re.match(r"__[a-zA-Z0-9_]+__$", name) is not None return name.startswith("_") and not is_reserved_public_name
1232f6a52ffc9d07d7ed286771c3c75bc80db8b7
16,280
import os def find_file_in_dirs(path, dirs): """ Search for `path` in the list of directories `dirs`. Return the first expansion that matches an existing file. """ if os.path.isabs(path): return path for d in dirs: if d == '.': f = path else: d = os.path.expanduser(d) f = os.path.join(d, path) if os.path.exists(f): return f return path
a7314850fdb68382a370a4fd193c92ff3af1fc48
16,281
import shlex def shell_join(argv, delim=" "): """Join strings together in a way that is an inverse of `shlex` shell parsing into `argv`. Basically, if the resulting string is passed as a command line argument then `sys.argv` will equal `argv`. Parameters ---------- argv : list(str) List of arguments to collect into command line string. It will be escaped accordingly. delim : str Whitespace delimiter to join the strings. Returns ------- cmd : str Properly escaped and joined command line string. """ vv = [shlex.quote(vv) for vv in argv] cmd = delim.join(vv) assert shlex.split(cmd) == list(argv) return cmd
d58fb2d899bc1f72adf0dd22bdc55ddc9ffeecfb
16,282
def inverted_index_add(inverted, doc_id, doc_index): """ Add Invertd-Index doc_index of the document doc_id to the Multi-Document Inverted-Index (inverted), using doc_id as document identifier. {word:{doc_id:[locations]}} """ for word, locations in doc_index.items(): indices = inverted.setdefault(word, {}) indices[doc_id] = locations return inverted
4ef0cad9892a09dfaeb730487f413836ba72b7d3
16,283
import numpy def draw_mask_over_image(image: numpy.ndarray, mask: numpy.ndarray, color=None): """ Draws the mask over the image. All the pixels marked as True will be overwritten in the original image wih a certain colour. Args: image: A numpy array, representing the RGB image in [0, 1] range. mask: A two dimensional array, representing the mask of the staples. It must be white with black background; that is, the objects must be masked as True. color: An RGB colour, representing the color that will be used to draw the objects in the image. Returns: A numpy array, representing the RGB image in [0, 1] range with the drawn objects. """ if color is None: color = [70, 253, 52] colormask = image.copy() colormask[mask[:, :] == True] = color return colormask
6a2db79dcd47559be685e832e851018edf09c8c9
16,287
def append(xs, ys): """ Adds all the elements of xs and ys and returns a combined list. """ return xs + ys
e629ece09214a88465d780bb82f2b6f1a82af18a
16,289
from json import dumps def json_formatter(data): """Method returns parsing result in json format. """ return dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
08614e5d49be595642d751bf48171be3b2a5b62b
16,290
def get_fert_weight(npk, required_nitrogen): """ :param required_nitrogen: this is the required lbs of nitrogen required for the application :return: the amount of product required for an application This method calculates the required amount of product for an application. """ product_nitrogen = npk['N'] / 100.0 app_weight = required_nitrogen / product_nitrogen return app_weight
ac174ec16fe1db68be192bc621a9b6e21b3fddba
16,291
def get_keyfunc(cols, schema, nulls_are_smaller=False): """ Return a function that maps a row to a tuple of some of its columns values """ def key(row): """ Returns a tuple designed for comparisons based on a row Each requested columns is mapped to two columns: - The first indicate whether the column value is None or not - The second is the column value This prevent comparison between None and non-None values It also allows to defined how None values should be considered """ values = [] for col in cols: value = col.eval(row, schema) values += ( (value is None) != nulls_are_smaller, value ) return tuple(values) return key
35d722aaf0eec26f9a17e4b29dbcbd2db5095dc0
16,293
import re def get_all_text(elem): """Returns all texts in the subtree of the lxml.etree._Element, which is returned by Document.cssselect() etc. """ text = ''.join(elem.itertext()) return re.sub(r'\s+', ' ', text).strip()
1c8e2e8743147fd3b09488445d8578a89f5346d7
16,294
def count_offspring(cycle: int, days: int) -> int: """Counts how many offspring will be spawn over a given period""" if cycle >= days: return 1 if cycle == 0: return count_offspring(8, days - 1) + count_offspring(6, days - 1) return count_offspring(cycle - 1, days - 1)
4cc57ab978289828d074c728bfbfdd8d51ecf5d9
16,295
def hashable(obj): """ Convert obj to a hashable obj. We use the value of some fields from Elasticsearch as keys for dictionaries. This means that whatever Elasticsearch returns must be hashable, and it sometimes returns a list or dict.""" if not obj.__hash__: return str(obj) return obj
bf59d420da20f6df5121fb25a1ce8c5c7ce210c7
16,296
def _collect_mexp_symbols(value): """Get symbols in a math expression. :param value: The math expression. :rtype : list of str :return: A list of symbols. """ # Get symbols. fsm = value.free_symbols # Initialize symbol list. ret = [] # Pop symbols. while len(fsm) != 0: ret.append(fsm.pop().name) return ret
c031212e3af5bd485dc572c4cf9bb31cfc2be3db
16,297
def _inBoundaries(value, bounds): """ Tell if an integer value is within a range. Parameters ---------- value : int A value to be tested. bounds : int | tuple(int, int) Low boundary of the range, or a tuple of the form (low, high+1). Returns ------- boolean Whether or not the value fell within the range """ if type(bounds) is tuple: assert 2 == len(bounds) return bounds[0] <= value < bounds[1] else: assert type(bounds) is int return bounds <= value
dcc9fcf993ee7b25bb0f6b8cb8acea7b0474fad5
16,298
def has_neighbor(prop1: str, prop2: str) -> str: """ Builds the statements required for: The person who `prop1` has a neighbor who `prop2`. For example, "The Norwegian lives next to the Blue house." We can solve this by doing a disjunction of all satisfying possibilities. Because we've previously stated that, for example, there's only one Norwegian and only one blue house, we don't need to conjoin to prevent duplicates. Thus, we can say: ```ignore (assert (or (and (norwegian 1) (blue 2)) (and (norwegian 2) (blue 1)) (and (norwegian 2) (blue 3)) (and (norwegian 3) (blue 2)) (and (norwegian 3) (blue 4)) ... )) ``` """ ands = [] for i in range(1, 6): for j in range(1, 6): if abs(i-j) == 1: ands.append(f' (and ({prop1} {i}) ({prop2} {j}))') return '(assert (or\n' \ + '\n'.join(ands) \ + '))\n'
d63dd6e09ce822cdcd87970dd205ee34857ad8f4
16,300
import numpy def isInsideContour(p, xc, yc): """ Check if a point is inside closed contour by summing the @param p point (2d array) @param xc array of x points, anticlockwise and must close @param yc array of y points, anticlockwise and must close @return True if p is inside, False otherwise """ inside = True for i0 in range(len(xc) - 1): i1 = i0 + 1 a = numpy.array([xc[i0], yc[i0]]) - p[:2] b = numpy.array([xc[i1], yc[i1]]) - p[:2] inside &= (a[0]*b[1] - a[1]*b[0] > 1.e-10) return inside
d86813df1abf3d446b11c1e2a1a55b996888b5b5
16,302
from typing import Counter def build_vocab(data, min_token_instances, verbose=True): """ Builds a set that contains the vocab. Filters infrequent tokens. """ token_counter = Counter() for img in data: for region in img['regions']: if region['tokens'] is not None: token_counter.update(region['tokens']) vocab = set() for token, count in token_counter.items(): if count >= min_token_instances: vocab.add(token) if verbose: print('Keeping {} / {} tokens with enough instances'.format(len(vocab), len(token_counter))) vocab = list(vocab) vocab = sorted(vocab, key=lambda token: token_counter[token], reverse=True) if len(vocab) < len(token_counter): vocab = ['<pad>', '<bos>', '<eos>', '<unk>'] + vocab if verbose: print('adding special <pad> <bos> <eos> <unk> token.') else: vocab = ['<pad>', '<bos>', '<eos>'] + vocab if verbose: print('adding special <pad> <bos> <eos> token.') return vocab
20f2ee90abb9e2fd370e434752736a1a02d80399
16,303
def convert_command_results(header, msg, flag): """ 转换管理命令结果集 """ if flag: return '%s:\n' \ 'Errno: %s\n' \ '%s\n\n' % (header, msg[0], msg[1]) else: res = '%s:\n' % header affect_rows, headers, columns = msg[0], msg[1], msg[2] _c, _r = list(), list() for _u in columns: for _x in _u: if _x: _r.append(str(_x)) else: _r.append('NULL') _c.append(_r) _r = list() columns = _c if msg[1]: max_length, line = list(), '+' for item in headers: max_length.append(len(item)) for column in columns: for index, item in enumerate(column): if len(item) > max_length[index]: max_length[index] = len(item) for item in max_length: line += '+'.rjust(item + 3, '-') res += '%s\n' % line for index, item in enumerate(headers): res += '| ' + item.ljust(max_length[index] + 1, ' ') res += '|\n%s\n' % line for column in columns: for index, item in enumerate(column): res += '| ' + item.ljust(max_length[index] + 1, ' ') res += '|\n' res += '%s\n' % line res += '%s rows in set\n\n' % affect_rows else: res += 'Query OK, %s row affected\n\n' % affect_rows return res
6ce65100b212ceda2113115187b41d73e3b7be8f
16,305
import os def addAssets(proj,name,typ,component): """ asset을 등록한다. component,assembly정보에 맞춰 각각 등록한다. """ if component == "assembly": os.system("/lustre/INHouse/CentOS/bin/csi3 -add item -project %s -name %s -type asset -assettype %s -assettags %s,assembly" % (proj, name, typ, typ)) elif component == "component": os.system("/lustre/INHouse/CentOS/bin/csi3 -add item -project %s -name %s -type asset -assettype %s -assettags %s,component" % (proj, name, typ, typ)) result = "AssetName : %s\nAssetType : %s\nComponent : %s\n"%(name,typ,component) return result
f1fa6ee2156b38105ddae950a13cefb7782b2f2e
16,306
def bl_little_mock(url, request): """ Mock for bundle lookup, small output. """ littlebody = b'<?xml version="1.0" encoding="UTF-8"?><availableBundlesResponse version="1.0.0" sessionId="7762de1a-909c-4afb-88fe-57acb69e9049"><data authEchoTS="1366644680359"><status code="0"><friendlyMessage>Success</friendlyMessage><technicalMessage>Success</technicalMessage></status><content><bundle version="10.3.1.2726"><type>system:os</type><type>system:radio</type><type>application</type></bundle></content></data><signature><root><cipher>EC521R1</cipher><shaType>SHA512</shaType><sigR>AN80t5quXy/WiTy0Lw0llAIGmRVogdfRttDOCbWh6uUquvAvAt2YAN1OOCLJbOOFn5SppytUJi34wXOxiopv2RjX</sigR><sigS>APPbsvblhDzEhpef8wQBgxCrTJ851e/BeVBLUzlGG7ovy220QdQHeG8ahk9bMeoTmnIkWc6f/kCs+h7hGkel+OYT</sigS></root><chain ordinal="1"><cipher>EC521R1</cipher><shaType>SHA512</shaType><publicKey notValidUntil="1434252700605" notValidAfter="1434684700605">BAHbALLG0oyfF7ZvmxOjz1NFODaTEd9gvdqaqgSwuTi39Jv87q0ZWfY7kVSxWAyuumQYfIpy+9ruTd4z7cNvXJ0u8ACJvuo3xzNpgCah74Wpqcvr+EuNGkVe0IAZDXZSGPeZC739vkPitYiqJDP8joOuFTIdpUop/qJj+YckijR9RujdUw==</publicKey><sigR>Aaou0PAOGD7njwYckvvAGlepOmDSbRV2clsaskubz01+sQ9YlLhwctCAPS9n9kpdnnYbg2TDvh6XN3lUFgmfGvJl</sigR><sigS>AN34KjWeSGVIBz4KTlHzMGMpKfDKsOQPT5UVsy+tczBhKdAeDYGaU5Yc/YFaAOz7RuxjIbHkohuHESqDcXCnvif6</sigS></chain></signature></availableBundlesResponse>' return {'status_code': 200, 'content': littlebody}
0b35d1e30c32b89351341103ba9013ca95c0ba2e
16,307
from typing import Optional def _transform_op(op: Optional[str]) -> str: """Transforms an operator for MongoDB compatibility. This method takes an operator (as specified in a keyword argument) and transforms it to a ``$`` prefixed camel case operator for MongoDB. """ if op is None: return "$eq" camel = op[0].lower() + op.title()[1:].replace("_", "") return f"${camel}"
6168e84387ade258b2907445e5d3536228053521
16,309
def generate_title(input_file): """Return title fragment.""" title = input_file['title'] subtitle = input_file['subtitle'] return '# {title}\n*{subtitle}*<br>'.format(title=title, subtitle=subtitle)
64150f960a0ee560f6bec600c8a632970afb45f1
16,311
import os def is_git_repo(directory): """ Determine if a folder is a git repo Checks the 'git status' message for error """ files = os.listdir(directory) if '.git' in files: return True return False
9e33250083e7691d2c930e56819ec7bf3df39212
16,312
import requests def get_bitstamp_ticker(symbol='btcusd'): """ doc: https://www.bitstamp.net/api/ limit: 600 / 10 min """ main_path = 'https://www.bitstamp.net' info_path = '/api/v2/ticker/{0}/'.format(symbol) r = requests.get(main_path + info_path) data_json = r.json() return data_json['bid'], data_json['ask']
f8af2dfbf39a0a0eb6d98d0891647a6cc749ca9e
16,314
import sys def install(cls): """Class decorator for installation on sys.meta_path.""" sys.meta_path.append(cls) return cls
02771d8ca6a87f149264523a82abd5b64d696395
16,317
def stop_server(proc): """ Stop server process. proc: ShellProc Process of server to stop. """ return proc.terminate(timeout=10)
c126ca840b56407f1eea8af943f4602532df13df
16,318
import functools def replace_exception(raised, to_raise): """ Parametrized decorator for replacing exception class or tuple of classes ``raised`` by ``to_raise`` called with the previously raised exception as its sole argument. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except raised as exc: raise to_raise(exc) return wrapper return decorator
9cd63635a8b0185ec382f26ab5c75f12a2287390
16,319
def _v3_to_v2_catalog(catalog): """Convert a catalog to v2 format. X_SERVICE_CATALOG must be specified in v2 format. If you get a token that is in v3 convert it. """ v2_services = [] for v3_service in catalog: # first copy over the entries we allow for the service v2_service = {'type': v3_service['type']} try: v2_service['name'] = v3_service['name'] except KeyError: pass # now convert the endpoints. Because in v3 we specify region per # URL not per group we have to collect all the entries of the same # region together before adding it to the new service. regions = {} for v3_endpoint in v3_service.get('endpoints', []): region_name = v3_endpoint.get('region') try: region = regions[region_name] except KeyError: region = {'region': region_name} if region_name else {} regions[region_name] = region interface_name = v3_endpoint['interface'].lower() + 'URL' region[interface_name] = v3_endpoint['url'] v2_service['endpoints'] = list(regions.values()) v2_services.append(v2_service) return v2_services
a7cb008306e7a8c0bdd9ce7dec2c733c50087839
16,320
def batched_input(func): """Decorator to mark an actor method as accepting only a single input. By default methods accept a batch. """ func.ray_serve_batched_input = True return func
e97c2df57c4eb9cfe003545224cc8c942f27d845
16,321
def compose(left, right): """compose(left, right)-->left.compose(right)""" return left.compose(right)
a279b3d30a1bde94f181d3f94e349c34e49473bc
16,322
def create_subnet_raw(conn, subnet_name, parent_network_id, subnet_cidr, gateway): """creates a subnet without checking for existing subnets""" os_subnet = conn.network.create_subnet( name=subnet_name, network_id=parent_network_id, ip_version='4', cidr=subnet_cidr, gateway_ip=gateway) return os_subnet
f88fd35401ec84af39f5256ea117a4c00f22574c
16,323
def TMTOND(N, LX, X): """ trace mode to nd array """ Y = X.reshape(N, LX) return Y
df79cd727919ba628862c93225aef7a23c85a6ca
16,324
def next_node_i(edgeweights, node, next_node): """ find the next node's index in the edgeweights file so the cost can be calculated """ for i in range(len(edgeweights[node])): if edgeweights[node][i][0] == next_node: return i
4a0486e7f862c9036d51ac5ed2107a45ff9af664
16,325
def get_oct(value): """ Convert an integer number to an octal string. """ try: return oct(value) except: return value
0e07b02f7a727c5942c1f6f1d7d1ca12506328f4
16,327
import torch def compute_distance_histograms(a, dx, b, dy): """ Computes the squared distance between histograms of distance of two mm-spaces. The histograms are obtained by normalising measures to be probabilities. Parameters ---------- a: torch.Tensor of size [Batch, size_X] Input measure of the first mm-space. dx: torch.Tensor of size [Batch, size_X, size_X] Input metric of the first mm-space. b: torch.Tensor of size [Batch, size_Y] Input measure of the second mm-space. dy: torch.Tensor of size [Batch, size_X, size_X] Input metric of the first mm-space. Returns ------- lcost: torch.Tensor of size [size_X, size_Y] distances between metric histograms """ h_x = torch.einsum('ij, j->i', dx, a / a.sum()) h_y = torch.einsum('ij, j->i', dy, b / b.sum()) lcost = (h_x ** 2)[:, None] + (h_y ** 2)[None, :] lcost = lcost - 2 * h_x[:, None] * h_y[None, :] return lcost
7758c15cea89d9eabba2fbf95decd6252bec532d
16,329
def gather_3rd(params, indices): """Special case of tf.gather_nd where indices.shape[-1] == 3 Check https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/gather_nd for details. Args: params: Tensor, shaped [B, d_1, ..., d_i], di >= 2 indices: LongTensor, shaped [B, i_1, ...,i_{k-1}, 3] Returns: Tensor, of shape indices.shape[:-1] + params.shape[indices.shape[-1]:] """ return params[indices[..., 0], indices[..., 1], indices[..., 2]]
948acdbb83b283cc0bef743d679e7d30fbaa0ad6
16,330
def apply_date_format(in_data): """ This routine adds the default UTC zone format to the input date time string If a timezone (strting with + or -) is found, all the following chars are replaced by +00, otherwise +00 is added. Note: if the input zone is +02:00 no date conversion is done, at the time being this routine expects UTC date time values. Examples: 2018-05-28 16:56:55 ==> 2018-05-28 16:56:55.000000+00 2018-05-28 13:42:28.84 ==> 2018-05-28 13:42:28.840000+00 2018-03-22 17:17:17.166347 ==> 2018-03-22 17:17:17.166347+00 2018-03-22 17:17:17.166347+00:00 ==> 2018-03-22 17:17:17.166347+00 2018-03-22 17:17:17.166347+00 ==> 2018-03-22 17:17:17.166347+00 2018-03-22 17:17:17.166347+02:00 ==> 2018-03-22 17:17:17.166347+00 Args: the date time string to format Returns: the newly formatted datetime string """ # Look for timezone start with '-' a the end of the date (-XY:WZ) zone_index = in_data.rfind("-") # If index is less than 10 we don't have the trailing zone with - if (zone_index < 10): # Look for timezone start with '+' (+XY:ZW) zone_index = in_data.rfind("+") if zone_index == -1: if in_data.rfind(".") == -1: # there are no milliseconds in the date in_data += ".000000" # Pads with 0 if needed in_data = in_data.ljust(26, '0') # Just add +00 timestamp = in_data + "+00" else: # Remove everything after - or + and add +00 timestamp = in_data[:zone_index] + "+00" return timestamp
2a9cd63f1b0a341c3c859d80b538af5ecf56d21d
16,332
def find_base_match(char, matrix): """Return list of coordinates wherein char matched inside matrix. Args: char (str): A single-length string matrix (list): A list containing lines of string. row_length (int): An integer which represents the height of the matrix. column_length (int): An integer which represents the horizontal length of the matrix. Returns: list: Returns a coordinate list. """ base_matches = [(row_index, column_index) for row_index, row in enumerate(matrix) for column_index, column in enumerate(row) if char == column] return base_matches
836a84b672365908cf81f848f1111840f613dbea
16,335
def startswith(text, starts): """ {% load startswith %} {{ var | startswith: "foo" }} """ return text.startswith(starts) if isinstance(text, str) else False
088437bc630ceb310e85b3995d24dc51aa65860c
16,337
def near(array, value): """Find the nearest point within the array and return its index""" idx = (abs(array - value)).argmin() return idx
0e683e8a94a54cd4e53f6d147ca8d4d907ec84c2
16,338
import time def bench_from_sample( distribution, sample, n=1000 ): """Bench the training of a probability distribution.""" tic = time.time() for i in range(n): distribution.summarize( sample ) return time.time() - tic
c9d97badda06a9b5340dc360e1ba7d043a6e2c29
16,339
def dominant_clade(idx, prev_cutoff=1): """ Which is the dominant clade? """ for k in sorted(idx, key=lambda item: item[0], reverse=True): if idx[k][0] < prev_cutoff: return None else: return k
3961f1bf35522a0226da5bf4b52b612a7c44a5bb
16,340
def packal_username(author): """Format usernames for Packal.""" user = author.lower() user = user.replace(" ", "-") return user
c8f9c5ab67deb95775a787a5cb8cceab67ea73f1
16,341
def map_args(args): """used to filter arguments passed in on the command line that should also be passed as keyword args to make_map""" arg_set = set(['starting_year', 'ending_year', 'ranking_algorithm', 'similarity_algorithm', 'filtering_algorithm', 'number_of_terms', 'include_svg_dimensions', 'file_format', 'only_terms', 'sample_size', 'evaluation_output_path', 'n_layers']) graphattr_set = set(['layerselect']) pass_args = {} for arg in arg_set: if arg in args: pass_args[arg] = args[arg] graph_attrs = { key: args[key] for key in graphattr_set if key in args and args[key]} pass_args['graph_attrs'] = graph_attrs return pass_args
df5bc2599a732c9f75db6e833dc672f135f9a012
16,343
def _get_all_nearest_neighbors(method, structure): """Get the nearest neighbor list of a structure Args: method (NearNeighbor) - Method used to compute nearest neighbors structure (IStructure) - Structure to study Returns: Output of `method.get_all_nn_info(structure)` """ return method.get_all_nn_info(structure)
5e1e33c7b06951933d8603a75006c6895b742293
16,345
def mul_inv(a): """ :type a: int """ if a == 0: a = 0x10000 result = pow(a, 0x10001 - 2, 0x10001) return result
63c1e617600fb93cff3a601668aab1a3ce5d3be6
16,350
import zlib def _compute_crc(file_obj): """To minimize memory use, compute the CRC in chunks.""" crc = 31415 # can initialize to any value while True: content = file_obj.read(1048576) # 1M at a time if not content: break crc = zlib.crc32(content, crc) return crc
3963c560819160636f947f9b5834b69eab666816
16,352
def rolling_average(n, data): """ Function to calculate an n day rolling average given a list of covid data pts @param n: an int representing the number of points to average into the past @param data: a list of dictionaries representing covid data pts @return: the n day death, death increase, infected, and infected increase averages """ # 10 day rolling average data death_total, inc_death_total, inf_total, inc_inf_total = 0,0,0,0 # Get the last n days and add to total counters for i in range(n): curr = data[i] death_total += curr['death'] inc_death_total += curr['deathIncrease'] inf_total += curr['positive'] inc_inf_total += curr['positiveIncrease'] # Divide total counters by n to get n-day average return death_total/n, inc_death_total/n, inf_total/n, inc_inf_total/n
ba02b006176d8b37ed53e5f2cd46c64a92b7dcc2
16,353
def compute_avg_negation_difference(sent_and_contexts1, sent_and_contexts2): """ We compare each context in document 1 to each context in document 2 the difference in negations is abs(# negations in context 1 - # negations in context 2) Summing up the differences in negations between all the contexts and dividing by the total number of combinations gives us the average negation difference between the two documents. Note that this might need to be modified by the Word Mover Distance (WMD). If two sentences have a low WMD then a difference in negation likely means that there is a distinct difference in meaning, however, if the two sentences have a relatively large WMD then the effect of the negations is less defined in terms of meaning. :param sent_and_contexts1: the SentenceAndContexts object from document 1 :param sent_and_contexts2: the SentenceAndContexts object from document 2 :return: the average difference in negations between the documents as a float """ differences_in_negation = list() for cae1 in sent_and_contexts1.contexts_and_entities: for cae2 in sent_and_contexts2.contexts_and_entities: n1 = cae1.get_number_of_negations() n2 = cae2.get_number_of_negations() differences_in_negation.append(abs(n1-n2)) if len(differences_in_negation) > 0: avg_negation_difference = sum(differences_in_negation) / len(differences_in_negation) else: avg_negation_difference = 0.0 return avg_negation_difference
9aecc7a42126da2dde770e218105b88d00a98c3b
16,354
def checkPW(md5str: str, USER_PASSWD: str): """检查本地数据库中密码和计算后的密码是否一致 :param md5str: 计算后的密码(字符串) :param USER_PASSWD: 本地的存的密码(字符串) :return: bool """ return md5str == USER_PASSWD
2bbe3864493700ff907b00ff9f82c45723e1a2d7
16,355
def get_list_of_existing_certificates(ct_collection): """ We don't want to re-download data that we already have. Therefore, we get the list of known crt_sh_ids from the database. """ existing_ids = [] results = ct_collection.find({'crt_sh_min_id': {"$exists": True}}, {'crt_sh_min_id': 1}) for result in results: if result['crt_sh_min_id'] not in existing_ids: existing_ids.append(result['crt_sh_min_id']) return existing_ids
9ada67bba79db41cdbe64d708f6adce2589b5cf9
16,356
def count_letters(word,find): """ Example function with types documented in the docstring. Ce code doit retourner le nombre d'occurences d'un caractère passé en paramètre dans un mot donné également Parameters ---------- param1 : str Le 1er paramètre est une chaine de caractères param2 : char Le 2ème paramètre est un caractère Returns ------- int Nombre d'occurences du caractère Exemples -------- >>> count_letters(abracadabra,a) 5 >>> count_letters(momomotus,u) 1 """ count=0 for i in range(len(word)): if word.find(find,i)!=0: count+=1 return count
327e6b4fd99d03b27473b9620d6147909d0cf6e4
16,357
import socket import struct def ip2num(ipStr): """Converts ipv4 string representation to local number format""" return struct.unpack("!I", socket.inet_aton(ipStr))[0]
e11e713a14cf6d285e0659efbbe3b6567084aea7
16,359
def one_space(value): """"Removes empty spaces, tabs and new lines from a string and adds a space between words when necessary. Example: >>> from phanterpwa.tools import one_space >>> one_space(" My long \r\n text. \tTabulation, spaces, spaces. ") 'My long text. Tabulation, spaces, spaces.' """ result = "" if isinstance(value, str): value = value.strip() value = value.replace("\n", " ").replace("\t", " ").replace("\r", " ") spl = value.split(" ") result = " ".join([x for x in spl if x]) return result
ff3ce941437186c734bd2815fff9d23ba581bec7
16,360
def small_push_dir(tmpdir): """Create a small pg data directory-alike""" contents = 'abcdefghijlmnopqrstuvwxyz\n' * 10000 push_dir = tmpdir.join('push-from').ensure(dir=True) push_dir.join('arbitrary-file').write(contents) # Construct a symlink a non-existent path. This provoked a crash # at one time. push_dir.join('pg_xlog').mksymlinkto('/tmp/wal-e-test-must-not-exist') # Holy crap, the tar segmentation code relies on the directory # containing files without a common prefix...the first character # of two files must be distinct! push_dir.join('holy-smokes').ensure() return push_dir
6f3fa0f6b291f4d18a8f33624ad55045c45b7261
16,361
def _matching_award(cursor, load_object): """ Try to find an award for this transaction to belong to by unique_award_key""" find_matching_award_sql = "select id from awards where generated_unique_award_id = '{}'".format( load_object["transaction_fpds"]["unique_award_key"] ) cursor.execute(find_matching_award_sql) results = cursor.fetchall() return results[0][0] if results else None
5bd95f293a7683b8f9ec53ecccc43fb8055c5486
16,362
import os def check_wand_binaries_exist(package_path: str) -> bool: """ Check if the binaries neccessary to run the DeepSparse Engine are present """ arch_path = os.path.join(package_path, "arch.bin") print("Checking to see if", arch_path, "exists..", os.path.exists(arch_path)) return os.path.exists(arch_path)
53d4e13c371f71aedfae7b17bf95a3646fad9505
16,363
def create_cmnd( analysis, sim_start_date, sim_end_date, use_sim_manager, attributes, no_close ): """Create a command string for automatic processing.""" args = [] if use_sim_manager: args.append("UseSimManager") if sim_start_date: args.append(f"SimStartDate {sim_start_date[0]} {sim_start_date[1]}") if sim_end_date: args.append(f"SimEndDate {sim_end_date[0]} {sim_end_date[1]}") if attributes: args.extend([f"ChangeAttributeValue {attr} {val}" for attr, val in attributes]) if no_close: args.extend(["NoClose"]) types = { "eplus": "miGSS", "sbem": "miGCalculate", "dsm": "miGCalculate", # not working } if analysis == "none": # this is used for cases when it's desired # only to update bunch of models pass else: try: args.append(types[analysis]) except KeyError: raise KeyError("Incorrect analysis type: '{}'.".format(analysis)) args.append("miTUpdate") if len(args) == 1: cmnd = "/process=" + args[0] else: cmnd = "/process=" + ", ".join(args) print(f"Running batch using '{cmnd}' command args. ") return cmnd
b95fc466a9c61a125bf09d6fc5dd77bf4575756d
16,365