content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def gen_closure(): """ Just prints out the closure tags for the audit file (to a list) Returns: A list of strings to close a SQL audit file for nessus. """ out = [] out.append(' ' ) out.append(' </group_policy>') out.append('</check_type>') out.append(' ') return out
84b3cf190b7365c14326694d24ebde6c1873c325
32,057
import sys from io import StringIO def __get_exec__(commands): """ Execute a command, trick Python into writing to our stream instead of STDOUT, and return the contents of our stream """ stdout_stream = sys.stdout string_stream = StringIO() try: sys.stdout = string_stream exec(commands) # Execute any function that might print to STDOUT/STDERR. except Exception as error: print(str(error)) # Print to our StringIO finally: sys.stdout = stdout_stream return string_stream.getvalue()
a77c55a168b6f761af325cbaa389d6dc0ba63e9f
32,059
def field_isomorphism_factor(a, b): """Construct field isomorphism via factorization.""" p = a.minpoly.set_domain(b) _, factors = p.factor_list() for f, _ in factors: if f.degree() == 1: root = -f.rep[(0,)]/f.rep[(1,)] if (a.ext - b.to_expr(root)).evalf(chop=True) == 0: return root.rep.all_coeffs()
ae04232763d2bbce98e62952b38121446a5b40c7
32,060
def numerifyId(string): """ Given a string containing hexadecimal values that make up an id, return a new id that contains all digits and no letters. """ for i in range(0, len(string)): if string[i] < "0" or string[i] > "9": string = string[:i] + "{}".format(ord(string[i]) % 10) + string[i + 1:] return string
25407581b9f60f260c01cc7339ed670ce6edc412
32,066
import csv def load_state_manifest(state_manifest_in): """ Load dict of Roadmap ChromHMM states """ states = [] with open(state_manifest_in) as infile: reader = csv.reader(infile, delimiter='\t') for state, name in reader: code = '_'.join([state.replace('E', ''), name]) states.append(code) return states
23b287c5cfd5611bb9b3c8374c3d5dff6d6487fe
32,068
def electionsWinners(votes, k): """Find number of candidates that have chance to win election Args: votes(int): List of number of votes given to each candidate so far. k(int): Number of voters who haven't cast their vote yet. Return: Number of candidates that still have chance to win election Raises: """ highest = max(votes) if k == 0: counter = votes.count(highest) if counter == 1: return 1 return 0 return len([vote for vote in votes if vote + k > highest])
8daa21263b9098db47204e9842e4169b7e3ede4d
32,069
import math def round_up_to_multiple(x, base): """Round up the given number to the nearest multiple of the given base number.""" return math.ceil(float(x) / float(base)) * base
09ef5da40f94bd713bfab3cd5e610f952bf0bf92
32,070
def get(self, url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs)
8b63a8e62d8185048448f93e6511b8069bc295c6
32,071
def rotate(items: list, k: int) -> list: """Rotate a list by k elements.""" items = items[k:] + items[:k] return items
c649f79d4ce3d501a9042288e6f83369a7899a84
32,073
import math def f(t): """ :param t: Аргумент функции :return: значение заданной по варианту функции от t """ return math.log(t) - 1
624337e49085fc8680e22832bab535c68a9cadc9
32,074
def _resolve_combined_names(predecessors): """Creates a unique name from the list of :class:`UmiBase` objects (predecessors) Args: predecessors (MetaData): """ # all_names = [obj.Name for obj in predecessors] class_ = list(set([obj.__class__.__name__ for obj in predecessors]))[0] return "Combined_%s_%s" % ( class_, str(hash((pre.Name for pre in predecessors))).strip("-"), )
e92401909a372ec0d6359575b64091fef76a90a2
32,075
def get_lines_len(word_sizes): """return the length of all the lines that are 0 weperated""" line_lens=[] current_line_len=0 for dim in word_sizes: if dim==0: line_lens.append(current_line_len) current_line_len=0 else: current_line_len+=dim[0] return line_lens
7afa1f4109b77932f6125cf50d5e51cd37db2011
32,076
import os import subprocess from datetime import datetime def get_commit_time(): """Get the timestamp of the last commit on the project.""" try: cmd = ["git", "log", "-1", "--format=format:%ct", os.path.dirname(__file__)] proc = subprocess.check_output(cmd) time_str = datetime.fromtimestamp(float(proc)).strftime( '%Y%m%d%S') return time_str except Exception: return 0
3667acf4c23f8d4f75b8bb9e7a7ffbd1988b64f3
32,077
from typing import Dict from typing import Any def _get_fp_len(fp_params: Dict[str, Any]) -> int: """ Return the length of the fingerprint with the given parameters. Parameters ---------- fp_params : Dict[str, Any] Parameters to get the fingerprint length from Returns ------- int The fingerprint length belonging to the given fingerprint parameters """ return fp_params['nBits'] if 'nBits' in fp_params else fp_params['fpSize'] if 'fpSize' in fp_params else 166
944e952ad07fa0fa5ea11d5bff8e46b98c1ab87e
32,078
import requests def get_trades(): """Retrieve the latest set of trades.""" resp = requests.get("https://api.thetagang.com/trades") return resp.json()['data']['trades'] # with open('trades', 'rb') as fileh: # return json.load(fileh)['data']['trades']
5e25ea7170181bed34a4e7d3a260cbed9d2ba161
32,079
def flatesteam_feed(Q_feed, r_steam): """ Calculates the flow rate steam of boiler. Parameters ---------- Q_feed : float The heat load feed of heat exchanger, [W] [J/s] r_steam : float The heat vaporazation of dist [J/kg] Returns ------- flatesteam_feed : float The flow rate steam of feed heat exchanger, [W] [J/s] References ---------- Дытнерский, формула 2.3, стр.45 """ return Q_feed / r_steam
ad4a0aa995c9333d70b8fbd003bb03f8bb231018
32,081
def make_hyperlink(text, target): """ Makes hyperlink out of text and target and retuns it https://stackoverflow.com/questions/44078888/clickable-html-links-in-python-3-6-shell """ return f"\u001b]8;;{target}\u001b\\{text}\u001b]8;;\u001b\\"
4e9b3f69e5d6c48afed5261f7cf70fbee785f8ab
32,084
def load_constants(): """Returns constants frequently used in this work""" params = {'vtl_max': 20 , #Max translation speed in AA/s 'm_Rb': 7459, # Proteinaceous mass of ribosome in AA 'Kd_cpc': 0.03, # precursor dissociation constant in abundance units 'Kd_cnt': 5E-4, # Nutrient monod constant in M 'Y': 2.95E19, # Yield coefficient in precursor mass per nutrient mass nutrient per L 'OD_conv': 1.5E17, # Conversion factor from OD to AA mass. 'Kd_TAA': 3E-5, # uncharged tRNA dissociation constant in abundance units 'Kd_TAA_star': 3E-5, # Charged tRNA dissociation constant in abundance units 'kappa_max': (64 * 5 * 3600) / 1E9, # Maximum tRNA synthesis rate in abundance units per unit time 'tau': 1, # ppGpp threshold parameter for charged/uncharged tRNA balance 'phi_O': 0.55, # Fraction of proteome deoveted to `other` proteins for E. coli. } params['gamma_max'] = params['vtl_max'] * 3600 / params['m_Rb'] return params
cc5a25875c95676d2cb89a14ff11a8291af0e586
32,085
def task_wrapper(pid, function, batch, queue, *args, **kwargs): """ Wrapper to add progress bar update """ result = [] for example in batch: result.append(function(example, *args, **kwargs)) queue.put(f'update{pid}') return result
8191ea4875f642c172a3a5d22742f66187067298
32,086
from typing import List import os def get_all_json_paths(dir_path: str) -> List[str]: """Gets all json paths from a given directory.""" paths = [os.path.join(dir_path, f) for f in os.listdir(dir_path)] return [p for p in paths if p.endswith(".json") and os.path.isfile(p)]
ca3c2e1152f39d8db233448f39c532ab99ad3b1c
32,087
def convert_to_ids(dataset, vocabulary): """Convert tokens to integers. :param dataset a 2-d array, contains sequences of tokens :param vocabulary a map from tokens to unique ids :returns a 2-d arrays, contains sequences of unique ids (integers) """ return [[vocabulary[token] for token in sample] for sample in dataset]
153094a0fcc57880193a441fde0927010b583d19
32,088
def getColumnLocations(columnNumber): """ Return a list of all nine locations in a column. :param int rowNumber: Column :return: List of tuples :rtype: list """ return [(row, columnNumber) for row in range(9)]
8a7876284327c52badc15ba26a28856018790341
32,089
def transform_date(date): """Encodes date and timke into url format. :param date: Date and time. :type date: str :return: encoded date. :rtype: str """ index = date.find(' ') date = (date[:index] + 'T' + date[index + 1:] + ":00.12345+00:00").replace(':', '%3A').replace('+', '%2B') return date
03de08bdb7e15bf74c8ae61eef90ca861e0aef0e
32,090
from typing import Dict import torch def clone_tensors(tensors: Dict[int, torch.Tensor]) -> Dict[int, torch.Tensor]: """ Clones all tensors in dictionary. Args: tensors (dict): Dictionary of tensors with string keys. Returns: New dictionary with cloned tensors. """ return {idx: tensor.clone() for idx, tensor in tensors.items()}
af5ae8f368c450d34ec412bd769abe03d77fd257
32,091
def lstripw(string, chars): """Strip matching leading characters from words in string""" return " ".join([word.lstrip(chars) for word in string.split()])
17b7bffd3e6f5e02cc184c1976eeedd93ebb4f3e
32,092
import logging import os import json def load_featured(filename): """Load featured themes from a previously saved featured.json""" log = logging.getLogger('load_featured') log.info('Started load_featured, opening %s' % filename) data = {} if os.path.isfile(filename): with open(filename, "r") as f: data = json.load(f) theme_count = 0 for theme in data['themes']: log.info("%s: Updated %s" % (theme['name'], theme['last_updated'])) theme_count += 1 log.info('Loaded from %s, %s themes total.' % (filename, theme_count)) return data
1e332c3c8f0bf9470608ff2db4b360e816a32d2a
32,093
def is_folder_url(url_parts): """ Determine if the given URL points to a folder or a file: if URL looks like: - www.site.com/ - www.site.com then ==> Return True if URL looks like: - www.site.com/index.php - www.site.com/index.php?id=1&name=bb - www.site.com/index.php/id=1&name=bb then ==> Return False :param url_parts: Parsed URL to test. :type url_parts: ParsedURL :return: True if it's a folder, False otherwise. :rtype: bool """ return url_parts.path.endswith('/') and not url_parts.query_char == '/'
c5ba46005e6c82cbbcb2ef914947b5a154bdd3b0
32,094
def formatear_camino(pila): """Convierte una lista de ciudades en un string separado por ->""" return " -> ".join(map(str,pila))
3b85be818e8202e1b3626ce2020d91dd687e5383
32,095
import re def check_word(word, string): """ function will check if the word exists in a string uses word boundary for search word: is the word to be searched string: string to perform the operation on """ regexStr = re.search(r'(\b%s\b)' % word, string) if regexStr is not None: return True return False
da0f559b714bff6ec7a41a892e4c313a4eef13c0
32,096
def GetSettingTemplate(setting): """Create the template that will resolve to a setting from xcom. Args: setting: (string) The name of the setting. Returns: A templated string that resolves to a setting from xcom. """ return ('{{ task_instance.xcom_pull(task_ids="generate_workflow_args"' ').%s }}') % ( setting)
8d8c1c7b58d91b1d0a9561fa504887e725416fae
32,097
import subprocess import json def nix_prefetch_git(url, rev): """Prefetches the requested Git revision (incl. submodules) of the given repository URL.""" print(f'nix-prefetch-git {url} {rev}') out = subprocess.check_output(['nix-prefetch-git', '--quiet', '--url', url, '--rev', rev, '--fetch-submodules']) return json.loads(out)['sha256']
57624f63317df2541fa1e16ed3f8332612a6bd30
32,099
def get_nombre_articles(page, dico_balise) -> int: """ Permet d'avoir le nombre d'articles par page dans la catégorie surlignage. :param page : parsing d'une page HTML correspondant à la catégorie surlignage. :param dico_balise : fichier JSON contenant les balises et les Xpath. :return : le nombre d'articles présent sur la page. """ articles = page.find_all(class_=dico_balise['class']['container_article']) return len(articles)
540a86c6ec84affee72a06e8e75b60ee23b49b6d
32,100
import ast def evalTF(string): """ Given a string, evaluates that string and returns True or False depending if the string is "true" or "false", and most-important, case insensitive @throws Exception if string is not evaluable @param string to check if true or false @return True if string is "true", false if it's "false" (case insensitive) """ return ast.literal_eval(string.title())
3dadb12d40814d2c7f62776a6a9497ee701d31d1
32,101
import traceback def __eval_all_locators(input_list, return_exec=False, return_exec_name="evaluated_locators"): """ :param input_list: :type list of namedtuple(locator,key,value). An example of this is the ValueFinder tuple :param return_exec: :type boolean: flag for whether to return a code string that can be run through exec(*) :return: If return_executable is false, returns a list of all the locators run. This often returns the actual object that the string was found in. if return_executable is true, this function runs nothing and just returns a string of code that can be run as an arg to the exec function. After running the exec function on this arg, a variable called evaluated_locators will be referenceable through the locals dictionary using return_exec_name's actual value as the key i.e. by default, locals()['evaluated_locators'] """ executable_code = return_exec_name + " = []\n" \ "for x in " + repr(input_list) + ":\n" \ " " + return_exec_name + ".append(eval(x.locator))" try: if not return_exec: exec(executable_code) return locals()[return_exec_name] except KeyError: traceback.print_last() print("Key not found in this scope. " "Consider using this function with the return_exec flag instead to run the function in the proper scope.") else: return executable_code
0947478f84c8e10220eff6522b4804e269d850df
32,102
import inspect import re def getargspec(fn): """ Similar to Python 2's :py:func:`inspect.getargspec` but: - In Python 3 uses ``getfullargspec`` to avoid ``DeprecationWarning``. - For builtin functions like ``torch.matmul`` or ``numpy.matmul``, falls back to attempting to parse the function docstring, assuming torch-style or numpy-style. """ assert callable(fn) try: args, vargs, kwargs, defaults, _, _, _ = inspect.getfullargspec(fn) except TypeError: # Fall back to attempting to parse a PyTorch/NumPy-style docstring. match = re.match(r"\s*{}\(([^)]*)\)".format(fn.__name__), fn.__doc__) if match is None: raise parts = re.sub(r"[\[\]]", "", match.group(1)).split(", ") args = [a.split("=")[0] for a in parts if a not in ["/", "*"]] if not all(re.match(r"^[^\d\W]\w*\Z", arg) for arg in args): raise vargs = None kwargs = None defaults = () # Ignore defaults. return args, vargs, kwargs, defaults
3aa76a3915e42e9f90f0326c37b94d434eed588a
32,103
import platform def format_build_command(command): """Format a command string so that it runs in the Anroid build environment. Args: command: Command to format. Returns: Command modified to run in the Android build environment. """ environment = [] if platform.system() == 'Darwin': environment.append('export BUILD_MAC_SDK_EXPERIMENTAL=1') return ' && '.join(environment + ['source ./build/envsetup.sh', command])
4a0eb92d85f99c01c14a94b8df4fd996d9c23ba2
32,104
import os def document_function_ace(function): """returns the function documentation in the style of ace""" str_list = [] # Title str_list.append("\n## ace_{}_fnc_{}\n".format(function.component, os.path.basename(function.path)[4:-4])) # Description str_list.append("__Description__\n\n" + '\n'.join(function.description) + '\n') # Arguments if function.arguments: if function.arguments[0][0]: str_list.append("__Parameters__\n\nIndex | Description | Datatype(s) | Default Value\n--- | --- | --- | ---\n") for argument in function.arguments: str_list.append("{} | {} | {} | {}\n".format(*argument)) str_list.append("\n") else: str_list.append("__Parameters__\n\nDescription | Datatype(s) | Default value\n--- | --- | ---\n{} | {} | {} \n\n".format(\ function.arguments[0][1], function.arguments[0][2], function.arguments[0][3])) else: str_list.append("__Parameters__\n\nNone\n\n") # Return Value if function.return_value: if function.return_value[0][0]: str_list.append("__Return Value__\n\nIndex | Description | Datatype(s) | Default Value\n--- | --- | --- | ---\n") for argument in function.return_value: str_list.append("{} | {} | {} | {}\n".format(*argument)) str_list.append("\n") else: str_list.append("__Return Value__\n\nDescription | Datatype(s)\n--- | ---\n{} | {} \n\n".format(\ function.return_value[0][1], function.return_value[0][2])) else: str_list.append("__Return Value__\n\nNone\n\n") # Example str_list.append("__Example__\n\n```sqf\n{}\n```\n\n".format(function.example)) # Authors str_list.append("\n__Authors__\n\n") for author in function.authors: str_list.append("- {}\n".format(author)) # Horizontal rule str_list.append("\n---\n") return ''.join(str_list)
c289a73c6de505782bf3fc411723366d9f769660
32,105
def create_additive_function(increment): """ return the addition of a fixed value as a function :param increment: float value that the returned function increments by :return: function function that can increment by the value parameter when called """ return lambda value: value + increment
b7432eaa11dcea49bb98ec2c6d3e0cc9dd979145
32,108
def insertionSort(array): """ input: array of integers return : sorted array """ for i in range(1, len(array)): target = array[i] hole = i while hole > 0 and array[hole - 1] > target: array[hole] = array[hole - 1] hole = hole - 1 array[hole] = target return array
ab7d76a0f03c4f78e8673082d95599ffdf0909a5
32,109
import random def split_unseen(data, rand=False, prop_dev=0.2, rnd_sd=1489215): """ Split data into completely separate sets (i.e. non-overlap of headlines and bodies) Args: data: FNCData object rand: bool, True: random split and False: constant split prop_dev: float, target proportion of data for dev set rnd_sd: int, random seed to use for split Returns: train: list, of dict per instance dev: list, of dict per instance """ # Initialise n = len(data.instances) n_dev = round(n * prop_dev) dev_ind = {} r = random.Random() if rand is False: r.seed(rnd_sd) train = [] dev = [] # Identify instances for dev set while len(dev_ind) < n_dev: rand_ind = r.randrange(n) if not data.instances[rand_ind]['Stance'] in ['agree', 'disagree', 'discuss']: continue if rand_ind not in dev_ind: rand_head = data.instances[rand_ind]['Headline'] rand_body_id = data.instances[rand_ind]['Body ID'] dev_ind[rand_ind] = 1 track_heads = {} track_bodies = {} track_heads[rand_head] = 1 track_bodies[rand_body_id] = 1 pre_len_heads = len(track_heads) pre_len_bodies = len(track_bodies) post_len_heads = 0 post_len_bodies = 0 while pre_len_heads != post_len_heads and pre_len_bodies != post_len_bodies: pre_len_heads = len(track_heads) pre_len_bodies = len(track_bodies) for i, stance in enumerate(data.instances): if not data.instances[i]['Stance'] in ['agree', 'disagree', 'discuss']: continue if i != rand_ind and ( stance['Headline'] in track_heads or stance[ 'Body ID'] in track_bodies): track_heads[stance['Headline']] = 1 track_bodies[stance['Body ID']] = 1 post_len_heads = len(track_heads) post_len_bodies = len(track_bodies) for k, stance in enumerate(data.instances): if k != rand_ind and ( stance['Headline'] in track_heads or stance[ 'Body ID'] in track_bodies) and ( stance['Stance'] in ['agree', 'disagree', 'discuss']): dev_ind[k] = 1 # Generate train and dev sets for k, stance in enumerate(data.instances): if k in dev_ind: dev.append(stance) else: train.append(stance) return train, dev
9d77b11c0f77de5ead90fb6674e9f8a54f362156
32,110
def get_dense_network_shapes(n_layers, hidden_size, n_features, n_outputs): """ Helper function to generate the input/output shapes for the layers of a densely connected network :param n_layers: Number of hidden layers in the network :param hidden_size: How many hidden neurons to use :param n_features: Number of features in the original input :param n_outputs: Output size/number of target variables :return: """ shapes = {'input': (n_features, hidden_size), 'hidden': [], 'output': (hidden_size * (n_layers+1) + n_features, n_outputs)} for i in range(n_layers): shapes['hidden'].append((hidden_size * (i + 1) + n_features, hidden_size)) return shapes
ea5e74fcdc3fe0b923f1377e202284f0576bff87
32,112
def list_type_check(lst, data_type): """ Checks if each element of lst has a given type. :param lst: List = list of objects :param data_type: type = estimated type for the objects :return: bool = true if all objects in list have the type data_type """ return all([type(e) == data_type for e in lst])
e0c774ddf09a843e5f2f52f7cbf1e332f3f862ad
32,113
import collections def characters(info, error, otext, tFormats, *tFeats): """Computes character data. For each text format, a frequency list of the characters in that format is made. Parameters ---------- info: function Method to write informational messages to the console. error: function Method to write error messages to the console. otext: iterable The data of the *otext* feature. tFormats: Dictionary keyed by text format and valued by the tuple of features used in that format. tFeats: iterable Each tFeat is the name and the data of a text feature. i.e. a feature used in text formats. Returns ------- dict Keyed by format valued by a frequency dict, which is itself keyed by single characters and valued by the frequency of that character in the whole corpus when rendered with that format. """ charFreqsByFeature = {} for (tFeat, data) in tFeats: freqList = collections.Counter() for v in data.values(): freqList[v] += 1 charFreq = collections.defaultdict(lambda: 0) for (v, freq) in freqList.items(): for c in str(v): charFreq[c] += freq charFreqsByFeature[tFeat] = charFreq charFreqsByFmt = {} for (fmt, tFeatures) in sorted(tFormats.items()): charFreq = collections.defaultdict(lambda: 0) for tFeat in tFeatures: thisCharFreq = charFreqsByFeature[tFeat] for (c, freq) in thisCharFreq.items(): charFreq[c] += freq charFreqsByFmt[fmt] = sorted(x for x in charFreq.items()) return charFreqsByFmt
d8e4cf16a3df05c18394483fc008fb453b6ab352
32,116
def initialize_from_function_name(state_name, env): """ Initializes a handle from its name and the environment it has been defined in""" result = {} optionals = ['pre_func', 'post_func', 'enter_func'] mandatorys = ['func'] for optional in optionals: if (value := env.get(f'{optional}_{state_name}')) is not None: result[optional] = value for mandatory in mandatorys: result[mandatory] = env[f'{mandatory}_{state_name}'] return result
83237b5dcb4fbd77ab53d731b651251e9e375b24
32,117
import os def get_cmplog_build_directory(target_directory): """Return path to CmpLog target directory.""" return os.path.join(target_directory, 'cmplog')
2bf2922d0ca11621043971a27bf5e0dd0d931aab
32,120
def get_cournot_problem(alpha, beta, q): """Get cournot problem.""" qsum = q.sum() P = qsum ** (-alpha) P1 = -alpha * qsum ** (-alpha - 1) return P + (P1 - beta) * q
56fac2f38968242897d7c90027657f4e9a3df88e
32,121
from typing import List import pkg_resources def read_csv(path: str, keep_headers: bool = False) -> List: """ Reads a csv file by splitting by "\n" and then "," -- creating a 2d list """ path = pkg_resources.resource_filename(__name__, path) with open(path, "r") as f: data = f.read() data = data.split("\n") if not keep_headers: data = data[1:] data = [x.split(",") for x in data] return data
69aa799910eb3cd52b970472df0a18033086105f
32,122
import xml.dom.minidom def format_xml(xml_str: str, exceptions: bool=False): """ Formats XML document as human-readable plain text. :param xml_str: str (Input XML str) :param exceptions: Raise exceptions on error :return: str (Formatted XML str) """ try: return xml.dom.minidom.parseString(xml_str).toprettyxml() except Exception: if exceptions: raise return xml_str
517dcd73dfeebaeb4828be2e57e3ab02042001fd
32,126
def make_tree(table): """ Makes Huffman's binary tree from analysis table """ def tree_maker(table): if len(table) == 1: return table new_table = sorted([(table[0][0] + table[1][0], table[0], table[1])] + table[2:], key = lambda entry: entry[0]) return tree_maker(new_table) return tree_maker(table)[0]
5352fd3d02e59ed3cb3e68cb52d285d505186132
32,127
def read_index(fhandle): """Reads an already open index file and returns a list of tuples (groupname, list fo lines)""" result = [] current_label = None current_value = [] for line in fhandle: line = line.strip() if line.startswith('[') and line.endswith(']'): if current_label: result.append((current_label, current_value)) current_value = [] current_label = line[1:-1] continue if len(line) == 0: continue current_value.append(line) if current_label: result.append((current_label, current_value)) else: raise IOError('File {0} not readble: missing [group]'.format(fhandle.name)) return result
a43f2571a5e581afbe5edfa8feef643c8bd13118
32,128
def sub(x,y): """ Returns the difference x-y Parameter x: The value to subtract from Precondition: x is a number Parameter y: The value to subtract Precondition: y is a number """ return x-y
9c7d9fcef236dff3e5d4b9840c082cbeacc9c7e5
32,129
def excel_column_label(n): """ Excel's column counting convention, counting from A at n=1 """ def inner(n): if n <= 0: return [] if not n: return [0] div, mod = divmod(n - 1, 26) return inner(div) + [mod] return "".join(chr(ord("A") + i) for i in inner(n))
1555dcc33420d107c9aa74ce4d7f0395ae6b3029
32,130
def lectureArbre(): """ Lit un fichier et de créer une liste correspondant à l'arbre indiqué. """ fichier = open("fichiers/arbre.txt", "r") # ouverture du fichier en lecture arbre = [] try: arbre = eval((fichier.readline()).strip("\n")) except NameError: print("Erreur dans le fichier arbre.txt : poids inconnu présent.") fichier.close() # fermeture du fichier return arbre
67d42f5f94ea6c4d96ee3f50e1289f7635a8b457
32,133
def find_data_source_url(a_name, url_prefs): """Return the url prefix for data source name, or None.""" for row in url_prefs: if row[0] == a_name: return row[1] return None
d91960040d4e572ff4c882a53f6ce66460253d9c
32,135
def hourglass(my_arr): """ Takes in my_arr (6x6 array) and returns hourglass elements in a list of list """ s_glass = [] for i in range(len(my_arr) - 2): for j in range(len(my_arr) - 2): h_glass = [] h_glass += my_arr[i][j:3+j] h_glass.append(my_arr[i+1][j+1]) h_glass += my_arr[i+2][j:3+j] s_glass.append(h_glass) return s_glass
05e7d48261a991e5d030c8c156583057c935ffc0
32,136
import torch def predict(img_data, model,device, topk): """ Classify image """ model.to(device) model.eval() inputs = img_data.unsqueeze(0) inputs = inputs.to(device) output = model(inputs) ps = torch.exp(output).data ps_top = ps.topk(topk) idx_class = model.idx_to_class probs = ps_top[0].tolist()[0] classes=[] for i in ps_top[1].tolist()[0]: classes.append(idx_class[i]) return probs, classes
64b492e1222638fa4b0662c035524f2ec8d30a7f
32,137
def tile(tensor, dim, repeat): """Repeat each element `repeat` times along dimension `dim`""" # We will insert a new dim in the tensor and torch.repeat it # First we get the repeating counts repeat_dims = [1] * len(tensor.size()) repeat_dims.insert(dim + 1, repeat) # And the final dims new_dims = list(tensor.size()) new_dims[dim] = 2 * tensor.size(dim) # Now unsqueeze, repeat and reshape return tensor.unsqueeze(dim + 1).repeat(*repeat_dims).view(*new_dims)
a8386c5ed8d6f89f226d64271a8fbddbf0ead543
32,138
def get_lowest_bits(n, number_of_bits): """Returns the lowest "number_of_bits" bits of n.""" mask = (1 << number_of_bits) - 1 return n & mask
086a48a359984bf950e44e49648bfcac05382c84
32,140
def search_step(f, x_k, alf, p_k): """ This function performs an optimization step given a step length and step direction INPUTS: f < function > : objective function f(x) -> f x_k < tensor > : current best guess for f(x) minimum alf < float > : step length p_k < tensor > : step direction OUTPUTS: x_(k+1) < tensor > : new best guess for f(x) minimum f_(k+1) < tensor > : function evaluated at new best guess """ x_k1 = x_k + alf*p_k f_k1 = f(x_k1) return x_k1, f_k1
51d634ef8a6196a884a0c2ec855fb785acf65db5
32,141
def split_str(string): """Split string in half to return two strings""" split = string.split(' ') return ' '.join(split[:len(split) // 2]), ' '.join(split[len(split) // 2:])
01268b6c47a4181c7a2e04cacf7651a8c0c81c50
32,143
from typing import Tuple def get_default_span_details(scope: dict) -> Tuple[str, dict]: """Default implementation for get_default_span_details Args: scope: the asgi scope dictionary Returns: a tuple of the span name, and any attributes to attach to the span. """ span_name = ( scope.get("path", "").strip() or f"HTTP {scope.get('method', '').strip()}" ) return span_name, {}
6177c4f32c5837752cce9c8b346350b480bfdcd2
32,144
import numpy def lpc2spec(lpcas, nout=17): """ Convert LPC coeffs back into spectra nout is number of freq channels, default 17 (i.e. for 8 kHz) :param lpcas: :param nout: :return: """ [cols, rows] = lpcas.shape order = rows - 1 gg = lpcas[:, 0] aa = lpcas / numpy.tile(gg, (rows,1)).T # Calculate the actual z-plane polyvals: nout points around unit circle zz = numpy.exp((-1j * numpy.pi / (nout - 1)) * numpy.outer(numpy.arange(nout).T, numpy.arange(order + 1))) # Actual polyvals, in power (mag^2) features = ( 1./numpy.abs(aa.dot(zz.T))**2) / numpy.tile(gg, (nout, 1)).T F = numpy.zeros((cols, rows-1)) M = numpy.zeros((cols, rows-1)) for c in range(cols): aaa = aa[c, :] rr = numpy.roots(aaa) ff = numpy.angle(rr.T) zz = numpy.exp(1j * numpy.outer(ff, numpy.arange(len(aaa)))) mags = numpy.sqrt(((1./numpy.abs(zz.dot(aaa)))**2)/gg[c]) ix = numpy.argsort(ff) keep = ff[ix] > 0 ix = ix[keep] F[c, numpy.arange(len(ix))] = ff[ix] M[c, numpy.arange(len(ix))] = mags[ix] F = F[:, F.sum(axis=0) != 0] M = M[:, M.sum(axis=0) != 0] return features, F, M
92e7dcb63a3d48a0275450debd594154a224718b
32,148
def process_name_strings(language_data, df): """ Returns a dictionary of names for each of the different items specified in the DataFrame (df). The key denotes the row_number of each item. A nested dictionary is the value, with each language code as the key, and the value the name. If a language does not have a name/translation, None is provided in its place. The default language (typically EN) is specified as 'default'. This should be used as a fallback. """ return_object = {} included_languages = [] def map_to_dict(return_object, language_code, string_dict): """ Nested function that takes data from the DataFrame and puts it into the return_object. """ for row_number, name in string_dict.items(): if row_number not in return_object.keys(): return_object[row_number] = {} name = name.strip() if name == '': return_object[row_number][language_code] = None else: return_object[row_number][language_code] = name # Iterate over each of the columns, extracting the "name " fields. # Determines what languages we have available in the DataFrame. for column in df.columns: if column.startswith('name'): included_languages.append(column.lower().split()[-1]) # Build up the dictionary to return. for language_code in language_data: try: string_name = df[f'name {language_code}'] map_to_dict(return_object, language_code, string_name.to_dict()) if language_data[language_code]['DEFAULT']: map_to_dict(return_object, 'default', string_name.to_dict()) except KeyError: continue # Fill in the blanks - check each language we should be providing support for is mentioned. # If not, specify None in its place. for row_number in return_object.keys(): for language_code in language_data: if language_code not in return_object[row_number].keys(): return_object[row_number][language_code] = None return return_object
f77fd8a83c524f0bca8b0c8c15c2216437492e1f
32,150
def convert_list_type(x, type=int): """Convert elements in list to given type.""" return list(map(type, x))
36957a24aaeff11cedd2dcb0715c757b2c627083
32,151
def getExperimentAccuracy(list_of_results): """Returns a simple accuracy for the list of Result objects.""" num_correct, num_total = 0, 0 for result in list_of_results: if result.isCorrect(): num_correct += 1 num_total += 1 return num_correct / num_total
8276e06a41a1105700232ed1ccfb38bd2b3d5063
32,152
import json def parser(chunks): """ Parse a data chunk into a dictionary; catch failures and return suitable defaults """ dictionaries = [] for chunk in chunks: try: dictionaries.append(json.loads(chunk)) except ValueError: dictionaries.append({ 'unparsed': chunk }) return dictionaries
385b73026c079b635b6e33b35bfd8f5ebb453f64
32,154
def torch_diff(tensor,n=1, dim=-1): """ tensor : Input Tensor n : int, optional The number of times values are differenced. If zero, the input is returned as-is. axis : int, optional The axis along which the difference is taken, default is the last axis. """ nd = len(tensor.size()) slice1 = [slice(None)] * nd slice2 = [slice(None)] * nd slice1[dim] = slice(1, None) slice2[dim] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) for _ in range(n): tensor = tensor[slice1] - tensor[slice2] return tensor
02277b8d881ec3bc2806f6ddd06acef8d26f83ac
32,155
import time import random def get_data(): """ Obtiene datos a enviar. :return: diccionario con los datos de las variables :rtype: dict """ data_list = [] now = time.time() for inverter in range(2): for var in range(3): data = {} data["timestamp"] = now + random.randint(1, 10) data[ "var_name" ] = f"NombreConsulta__{inverter}__NombreVariable{var}" data["value"] = now % 100 data["plugin"] = "MODBUS" data["context"] = { "request": "NombreConsulta", "var_name": f"NombreVariable{var}", "device": f"{inverter}", } data_list.append(data) for inverter in range(2): data = {} data["timestamp"] = now + random.randint(1, 10) data["var_name"] = f"NombreConsulta__{inverter}__OtroNombreVariable" data["value"] = now % 100 data["plugin"] = "MODBUS" data["context"] = { "request": "NombreConsulta", "var_name": "OtroNombreVariable", "device": f"{inverter}", } data_list.append(data) for inverter in range(3): data = {} data["timestamp"] = now + random.randint(1, 10) data["var_name"] = f"OtroNombreConsulta__{inverter}__NombreVariable" data["value"] = now % 100 data["plugin"] = "MODBUS" data["context"] = { "request": "OtroNombreConsulta", "var_name": "NombreVariable", "device": f"{inverter}", } data_list.append(data) return {"TIMESERIES": data_list}
da9ee0fef6c55e1af02dca223fc7e978c340f3b6
32,156
from datetime import datetime import time def get_date(): """ Returns the current date. Format: Month day, year. Example: January 15, 2020 """ MONTHS = ("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December") dt = datetime.fromtimestamp(time.time()) text = "{month} {day}, {year}".format(month=MONTHS[dt.month - 1], day=dt.day, year=dt.year) return text
71848adf618f2e339f7e3f0cc56d629a78e947f8
32,158
def filter_deinterlace(): """Yadif deinterlace""" return "yadif=0:-1:0"
06d4e7f20ed4c6af2cb0f2525847184484301423
32,160
def update_dict(d, e, copy=True): """ Returns a new dictionary updated by another dictionary. Examples -------- Consider a dictionary `d` which we want to update: >>> d = {0: 'a', 1: 'b'} Now consider the dictionary for update: >>> e = {1: 'c', 2: 'd'} We can update the `d` as follows: >>> f = update_dict(d, e) >>> f {0: 'a', 1: 'c', 2: 'd'} Note that the previous dictionary is unchanged: >>> d {0: 'a', 1: 'b'} """ if copy: d = d.copy() d.update(e) return d
4cd3f53c651be577b45a35aaacfef658b852faf3
32,161
from pathlib import Path import re def rglob(self: Path, regex=".*"): """Like path.glob, but uses a regex to match Paths""" return (f for f in self.glob("*") if re.match(regex, str(f)))
f16d8d0c5bb990d8faac0e7ad56c5b9157c4c9a4
32,162
def vlan_range_expander(all_vlans): """ Function for expanding list of allowed VLANs on trunk interface. Example: `1-4096` -> range(1, 4097). Can be used when trying to figure out whether certain VLAN is allowed or not. Reverse function is ``vlan_range_shortener``. :param all_vlans: Either list (`["1-10", "15", "20-30"]`) or string (`"1-10,15,20-30"`) of VLAN range. :return: List of VLAN numbers (integers). """ if isinstance(all_vlans, list): pass elif isinstance(all_vlans, str): all_vlans = all_vlans.split(",") elif isinstance(all_vlans, int): all_vlans = [str(all_vlans)] full_list = [] for vlan in all_vlans: if "-" in vlan: temp = vlan.split("-") full_list = full_list + list(range(int(temp[0]), int(temp[1])+1)) else: full_list.append(int(vlan)) return full_list
5224436c8bf10509df6d8ad52321e7dd9214792a
32,163
import torch def to_cuda(data): """ put input data into cuda device """ if isinstance(data, tuple): return [d.cuda() for d in data] elif isinstance(data, torch.Tensor): return data.cuda() raise RuntimeError
af9820bccbce3369357bf7c5b853efe3e88e052a
32,164
def sliding_point_cloud(df, width): """ Returns a sliding window point cloud from a list (or dataframe) of points. width (int or np.timedelta64): if int, window goes by iloc. if timedelta, window goes by time. """ if type(width) == int: ind = list(df.index) dfdict = df.T.apply(tuple).to_dict() return [[dfdict[d] for d in ind[i - width:i]] for i in range(width, len(ind))] else: return [[x for x in df.index if dt - width <= x <= dt] for dt in df.index]
1446f37eff8722bb8732c8854260c1587040dfac
32,165
from typing import Optional import ast def get_version_using_ast(contents: bytes) -> Optional[str]: """Extract the version from the given file, using the Python AST.""" tree = ast.parse(contents) # Only need to check the top-level nodes, and not recurse deeper. version: Optional[str] = None for child in tree.body: # Look for a simple string assignment to __version__ if ( isinstance(child, ast.Assign) and len(child.targets) == 1 and isinstance(child.targets[0], ast.Name) and child.targets[0].id == "__version__" and isinstance(child.value, ast.Str) ): version = child.value.s break return version
690d4d04fd17263b90fa51a982519f685f9622a4
32,167
def _arg_scope_func_key(op): """Returns a key that can be used to index arg_scope dictionary.""" return getattr(op, '_key_op', str(op))
b713202ef1d53650996041bd15655b58f348423a
32,168
def load_html(html_file: str): """Used to load html file from template dir and pass into homepage string.""" with open(html_file, "r") as f: html_stream = f.read() return html_stream
99b5c2d51172cac33bab32454a67c84eefe9021d
32,169
def complement(sequence): """ Params: * *sequence(str) *: DNA sequence, non ATGC nucleotide will be returned unaltered Returns: * *sequence.translate(_rc_trans)(str) *: complement of input sequence """ _rc_trans = str.maketrans('ACGTNacgtn', 'TGCANtgcan') return sequence.translate(_rc_trans)
2e0419c865968e0e24a8bae87c020158fc14768c
32,170
def fallback_key_exists(verbose, fallback_language, obj_id, key, object_json): """ Checks if there was source language to be translated from """ if fallback_language in object_json: return True if verbose: print(f"No {fallback_language} reference for {obj_id} string '{key}'" f" in dump file -- probably shouldn't exist; skipping") return False
29bfa4bc3cf39de127d5c07a4e70abd152b7ec28
32,171
def ptoc(width, height, x, y, zxoff, zyoff, zoom): """ Converts actual pixel coordinates to complex space coordinates (zxoff, zyoff are always the complex offsets). """ zx_coord = zxoff + ((width / height) * (x - width / 2) / (zoom * width / 2)) zy_coord = zyoff + (-1 * (y - height / 2) / (zoom * height / 2)) return zx_coord, zy_coord
a0d49a0180b620f08b478a0b5ee9a313e1da468e
32,172
def calc_standard_yield(crop): # Standard yield per year """ Taken from table from Shao Economic Estimation Tool (2017)""" if crop == 'lettuce': return 78.5 # kg/m2/year else: raise RuntimeError("Unknown crop: {}".format(crop))
1aa1edb085e29f1c217e4acc5a0c9f98b8aa0629
32,173
def _maybe_convert_to_int(value): """Returns the int representation contained by string |value| if it contains one. Otherwise returns |value|.""" try: return int(value) except ValueError: return value
2e67c4a8f6aa3ef5a0f982c85127f37b60f979ad
32,174
def try_helper(f, arg, exc=AttributeError, default=''): """Helper for easy nullable access""" try: return f(arg) except exc: return default
5f1d97a1d138981831ee00b1f71b97125ff40370
32,175
def get_centroid_idx(centroids, c): """ Returns the index of a given centroid c. Assumes that centroids is the ndarray of shape (k, d) where k is a number of centroids and d is a number od dimensions. """ return centroids.tolist().index(c.tolist())
08eae6aaa3ac7933c5f8bca08a9c1c75da26daf0
32,176
import os def expand( filePath, fileName=None ): """Combine a directory and file name and expand env variables and ~. A full path can be input in filePath. Or a directory can be input in filePath and a file name input in fileName. """ if fileName: filePath = os.path.join( filePath, fileName ) filePath = str( filePath ) if "$" in filePath: filePath = os.path.expandvars( filePath ) if "~" in filePath: filePath = os.path.expanduser( filePath ) return filePath
033b439127719500ff140602900f3fa50b106d6c
32,177
def undo_pad(data, pad_size): """Remove padding fromt edges of images Parameters ---------- data : array-like padded image pad_size : array-like amount of padding in every direction of the image Returns ------- data : array-like unpadded image """ if pad_size.ndim == 1 and data.ndim != 1: raise ValueError("Dimensions do not match") if data.ndim != pad_size.shape[0]: raise ValueError("Dimensions do not match") start = pad_size[:, 0].astype(int) end = (data.shape - pad_size[:, 1]).astype(int) coords = list(zip(start, end)) slices = tuple(slice(coord[0], coord[1]) for coord in coords) data = data[slices] return data
873feb3cf4daaf6153dfe87662ba10b531ba222f
32,178
import glob import os def get_csv_names(output_url, suffix): """ This function goes to a folder location and returns a list of all the names of the csvs within it. Input: output_url - the file directory to look in suffix - the suffix of the csv name Output: csv_names - a list of csv folder/filenames """ csv_names = glob.glob( os.path.join(output_url, '**/*{}.csv'.format(suffix)), recursive=True ) return csv_names
05920446bfa8c0f9f88416b8a623a50445c02aac
32,180
import warnings def null_observation_model(arg): """ A callable that returns ``arg`` directly. It works as an identity function when observation models need to be disabled for a particular experiment. """ warnings.warn( "`null_observation_model` is deprecated. " "Use `<MeasurementType>.observation_model(backend='null')` instead", DeprecationWarning, ) return arg
05edc6846617400fca9ab5dc1f0614237372bc8e
32,181
def df_to_vega(df): """ Convert a Pandas dataframe to the format Vega-Lite expects. """ return [row[1].to_dict() for row in df.reset_index().iterrows()]
6ecbddde38cfc1420370c70a48161e21efd79980
32,183
import torch def angle(x, eps=1e-11): """ Computes the phase of a complex-valued input tensor (x). """ assert x.size(-1) == 2 return torch.atan(x[..., 1] / (x[..., 0] + eps))
8cfbf6c9aefddfcb7de5af3d1fca89f7fb3dfd32
32,184
def progressive_step_function_maker(start_time, end_time, average_value, scaling_time_fraction=0.2): """ Make a step_function with linear increasing and decreasing slopes to simulate more progressive changes :param average_value: targeted average value (auc) :param scaling_time_fraction: fraction of (end_time - start_time) used for scaling up the value (classic step function obtained with scaling_time_fraction=0, triangle function obtained with scaling_time_fraction=.5) :return: function """ assert scaling_time_fraction <= 0.5, "scaling_time_fraction must be <=.5" def my_function(time): if time <= start_time or time >= end_time: y = 0 else: total_time = end_time - start_time plateau_height = average_value / (1.0 - scaling_time_fraction) slope = plateau_height / (scaling_time_fraction * total_time) intercept_left = -slope * start_time intercept_right = slope * end_time if ( start_time + scaling_time_fraction * total_time <= time <= end_time - scaling_time_fraction * total_time ): y = plateau_height elif time < start_time + scaling_time_fraction * total_time: y = slope * time + intercept_left else: y = -slope * time + intercept_right return y return my_function
066ae4415c942248511c04b6732f98587f2f524f
32,186
import re def ratio_caps(text: str, ratio: float) -> bool: """ Checks the ratio of capital letters to words in a sentence ##TO DO: better way to clean. placeholder for removing DISTRIBUTION STATEMENTS/jargon fragments """ if len(re.findall(r"[A-Z]", text)) / len(text.split()) < ratio: return True else: return False
b8f6a140ab51a188134bafb58d3eb06eecffd213
32,188
import os def _get_exec_path(exec_name): """ If the HOTKNOTS environment variable is set, use that as the directory of the hotknots executables. Otherwise, have Python search the PATH directly. """ if 'HOTKNOTS' in os.environ: return os.environ['HOTKNOTS'] + '/bin/' + exec_name else: return exec_name
0caf332f85195f4e2d2aaa4ce9581f8360ea5899
32,189
def win_for_player(board, player_token): """ Four in a row, column or a diagonal :param board: :param player_token: 'r' / 'y' :return: """ for r in range(6): for c in range(7): if board[r][c] == player_token and r <= 2: if board[r + 1][c] == board[r + 2][c] == board[r + 3][c] == player_token: # vertical return True if board[r][c] == player_token and c <= 3: if board[r][c + 1] == board[r][c + 2] == board[r][c + 3] == player_token: # horizontal return True if board[r][c] == player_token and c <= 3 and r <= 2: if board[r + 1][c + 1] == board[r + 2][c + 2] == board[r + 3][c + 3] == player_token: # down diagonal return True if board[r][c] == player_token and c <= 3 and r >= 3: if board[r - 1][c + 1] == board[r - 2][c + 2] == board[r - 3][c + 3] == player_token: # up diagonal return True return False
9fae699021fdc0d7169d30e0edec18161bd2ae8c
32,192
import argparse import os def recources_exist( argv: argparse.Namespace, resources: str ) -> bool: """Проверяем существование необходимых для работы файлов и папок""" if not os.path.exists(resources): print("Не найдена папка ресурсов!") print("Заканчиваю работу") return False if not os.path.exists(resources + "stations.json"): print("Не найдена база станций!") print("Заканчиваю работу!") return False if not os.path.exists(resources + "chunks"): print("Не найдена папка с частями карты!") print("Запускаю программу в легковесном режиме!") argv.light = True return True
c7b1e7780e6f4c36d0fafb4ec6ccfb1d64d6e37d
32,193
def _GetVocabulary(vocab_filepath): """Maps the first word in each line of the given file to its line number.""" vocab = {} with open(vocab_filepath, 'r') as vocab_file: for i, line in enumerate(vocab_file): word = line.strip('\r\n ').split(' ')[0] if word: vocab[word] = i return vocab
2db9fd70180e9fc2c64e604609fc007a533f2aa9
32,194
import argparse def prepare_options(): """ Prepare the option parser. """ parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("filename", nargs='+') return parser
99957b4f235f62702f528a823bb1625d6f4d8acb
32,196
def filter_to_region(node, contig=None, coords=None): """Return True iff a node is within a given region (and region is specified).""" ((seq, coord), miss) = node if contig and seq != contig: return False if coords and coord < coords[0]: return False if coords and coord > coords[1]: return False return True
bbbde3a35d464883de4e92c62f2a574eda11ff2f
32,197
def is_setuptools_enabled(pkginfo): """Function responsible to inspect if skeleton requires setuptools :param dict pkginfo: Dict which holds the package information :return Bool: Return True if it is enabled or False otherwise """ entry_points = pkginfo.get("entry_points") if not isinstance(entry_points, dict): return False # We have *other* kinds of entry-points so we need # setuptools at run-time if set(entry_points.keys()) - {'console_scripts', 'gui_scripts'}: return True return False
1faf21c804aa0b0a5b681d09ca4577d15a264ae7
32,198