content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def compute_time_difference_between_tweets(tweet_times): """ This function computes the time intervals between two successive tweet times The times should be absolute times in milli seconds :param tweet_times: Time of successive tweets in milli seconds :return: Time interval between two tweets """ intervals = list() # add a single value so that input and output arrays are the same length intervals.append(0) for idx in range(0, len(tweet_times) - 1): # Convert to epoch time and find the difference intervals.append(tweet_times[idx].timestamp() - \ tweet_times[idx+1].timestamp()) return intervals
14cb59d575d08de3160aa644e7b4cd369cd132f3
50,065
def currency_codes() -> dict: """ Retrives HTML Entity (decimal) for currency symbol. Returns: dict: Currency Symbols. """ codes = {"AED": ("United Arab Emirates Dirham", "\u062f."), "ANG": ("Netherlands Antilles Guilder", "&#402"), "EUR": ("Euro Member Countries", "€"), "GBP": ("United Kingdom Pound", "£"), "USD": ("United States Dollar", "$"), } return codes
e69bcf6fd3a2a259aeae82a243249446d37e586b
50,066
import os def create_salt() -> bytes: """文字列から鍵を生成する際の乱数バイナリ(salt)を生成 一度作ったら、文字列から鍵を使う際にはそのsaltを使う。 Returns: bytes: salt """ return os.urandom(16)
f63295387358a62812af2594e0d00d853e10a993
50,067
from bs4 import BeautifulSoup def parse_html(html, issn): """Takes in HTML and parses as XML.""" xml_soup = BeautifulSoup(html, 'xml') return xml_soup, issn
bebe084acb86b70a250e327c915ec9301eb28949
50,068
def epoch_to_timestamp(time_raw): """converts raw time (days since year 0) to unix timestamp """ offset = 719529 # offset between 1970-1-1 und 0000-1-1 time = (time_raw - offset) * 86400 return time
761bc5ba55e0f603e35879a8676890b25e1d049a
50,069
def parse_config(config, value): """Parse config for string interpolation""" config["tmp_value"] = value return config["tmp_value"]
e357bb022b24fb0e01f8d085f025a0e8b659dcac
50,070
import time def ctime(val): """ Convert time in milliseconds since the epoch to a formatted string """ return time.ctime(int(val) // 1000)
0512a79ebaccc19422c797cb00e509494d0debf2
50,071
import torch def repackage_hidden(h, init0): """Wraps hidden states in new Tensors, to detach them from their history.""" # init0: if init0, we will make h to be zero tensor if isinstance(h, torch.Tensor): return (1 - init0) * (h.detach()) else: return tuple(repackage_hidden(v, init0) for v in h)
56535d1f615a82962f0cd439724ac709e3b7e0d5
50,072
import re def natural_sort(string_): """ Human expected alnum sort """ return [int(s) if s.isdecimal() else s for s in re.split(r'(\d+)', string_)]
89e5aaa2a010585ce2d54fca0d779e4afcdef69d
50,074
import asyncio async def py_normal(title: str): """Normal exposed function Enter -> async sleep 10sec -> Exit Respond to calls from other clients, even if the call is from one client. Parameters ---------- title: str Characters to print out on the server Returns ---------- True """ print(f'Enter py_normal: {title}') await asyncio.sleep(10) print(f'Exit py_normal: {title}') return True
27c58d8906a344374f66ed7046385cf1c614063e
50,075
def name_predicate(self, obj, request): """match name argument with request.view_name. Predicate for :meth:`morepath.App.view`. """ return request.view_name
fee9afdac4f44aabb7ee03083c2ced7bca851512
50,077
def to_rgba_bytes(v: int) -> bytes: """ Converts an RGBA color int to raw bytes. """ return bytes(i & 0xFF for i in (v >> 24, v >> 16, v >> 8, v))
242dbe2505c83513a32d1de34be0d5dfe34eabfa
50,079
def prefixCombiner(prefix, itemlist, glue=''): """Returns a list of items where each element is prepend by given prefix.""" result = [] for item in itemlist: result.append(prefix + glue + item) return result
be17cd03d246c03abadc932b296726e59c4ef667
50,082
def add_source_init(module): """Looks for a function named ``source_init`` in ``module``; if found, returns its content as a string. """ if hasattr(module, "source_init"): return getattr(module, "source_init")() return ""
deec406ec3ff1c91557514de6eebad0090977665
50,083
def build_list(comments, p_id): """Takes a query set of comments and a parent id and returns a list of comments sorted in the appropriate parent-child order such that first comment = first toplevel comment, second commend = first child of first comment, third comment = first child of second comment or second child of first comment and so on""" comment_list = [] for comment in comments.filter(nparent_id=p_id): children = comments.filter(nparent_id=comment.id) if not children: comment_list.append(comment) else: comment_list.append(comment) comment_list.extend(build_list(comments, comment.id)) return comment_list
8c8e2cfc2168dc27a070cc6b2d3b8086ad9cd352
50,084
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_elasticsearch] es_datastore_url = <ELASTICSEARCH_URL> es_datastore_scheme = <https OR http> es_auth_username = <ELASTICSEARCH_USERNAME> es_use_http = <True OR False> es_auth_password = <ELASTICSEARCH_PASSWORD> es_cafile = <CA_FILE_TO_BE_USED> """ return config_data
1c97b7220a31f058932660c7bcb20a00c0f3e929
50,086
from typing import List from pathlib import Path def textfile_to_list(filename: str) -> List[str]: """Reads a text file and returns a list of its non-empty lines. Args: filename: name of the text file Returns: list of the non-empty lines. """ returned_list = [] with Path(filename).open() as fhandle: for line in fhandle: if line.strip(): returned_list.append(line.strip()) return returned_list
3d26740706cbb91c5d9c9c8b60b6224b187c7899
50,087
def has_tokens(node): """Has the node any tokens?""" # node.get_tokens() is a generator, so check if there is at least one # token (a token object always evaluates to True). return any(node.get_tokens())
85abb999cf3641b36120f696e8517b7bf07ac52d
50,088
import re def find_version(*file_paths): """ Reads out software version from provided path(s). """ version_file = open("/".join(file_paths), 'r').read() lookup = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if lookup: return lookup.group(1) raise RuntimeError("Unable to find version string.")
466472107748c7920cbca46c2e78210cbdecb2f4
50,089
def get_bright_thresh(medianvals): """Get the brightness threshold for SUSAN.""" return [0.75*val for val in medianvals]
53251094d223cb64ce4a780ed2ff4f59cd6c4a73
50,090
from pathlib import Path import os def get_jwks_path() -> Path: """Get the canonical location for a JWKS file""" return Path(os.environ["JWKS_PATH"])
552f30df9eadece0307145a00c81d33f1c15df00
50,092
def __extract_nested(child): """ Helper function to extract nested entries """ if len(child) > 0: temp_dict = {} for xx in child: temp_dict[xx.tag] = __extract_nested(xx) return temp_dict else: if child.text == 'True': info = True elif child.text == 'False': info = False else: info = child.text return info
5ca7a7253ae7e47a33d68a390608a851bb06ce9e
50,093
import argparse def get_arguments(): """Parse command line arguments.""" parser = argparse.ArgumentParser( description='Wikipedia episode summary spider.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('-s', '--start_url', type=str, required=True, help='start URL for the spider.' 'Should be: ' 'https://en.wikipedia.org/wiki/<Show_Title_With_Underscores_And_Capitalized_Words>. ' 'Example: https://en.wikipedia.org/wiki/Star_Trek') parser.add_argument('-u', '--url_substring', type=str, required=True, help='Wikipedia urls must include this substring otherwise the spider will not enter the URL.' 'Ideally, it should be something like: ' '<Show_Title_With_Underscores_And_Capitalized_Words>. Example: "Star_Trek"') parser.add_argument('-t', '--title_keywords', nargs='*', required=True, help='The title of the Wikipedia page must include these keywords, ' 'otherwise the spider will not extract anything from the page. ' 'Good practice: use the lowercase version of the words from the title of the show. ' 'Example: star trek') parser.add_argument('-o', '--output_path', type=str, required=False, default='wiki_episode_summaries.json', help='Path to the output JSON file. If the file already exists, it will be overwritten.') args = parser.parse_args() return args
5e6c4153f490d72720f7b016666472c702d0de01
50,094
import time def oauth_token(): """Return a valid oauth token for resuming an existing OAuth client.""" token = { "access_token": "access_H35x4awgfxPKPuHRjpKMAkP2bOgUs", "expires_in": 3600, "token_type": "bearer", "scope": [ "profiles.read", ], "refresh_token": "refresh_asfqqJCxj9TjU8B544r44Tsu9bOgUs", "expires_at": time.time() + 300 # token is valid for another 5 minutes } return token
2381d692c59755c90c7288ecb68833487f2935bc
50,095
def selection_sort(lyst): """This is a selection sort """ for num in range(len(lyst)): for num2 in range(len(lyst)): if lyst[num] < lyst[num2]: lyst[num], lyst[num2] = lyst[num2], lyst[num] return lyst
35243badb02b43a1df0dd03b8840fae7647fa6b3
50,096
def _create_dscfg_dict(cfg, dataset): """ creates a dataset configuration dictionary Parameters ---------- cfg : dict config dictionary dataset : str name of the dataset Returns ------- dscfg : dict dataset config dictionary """ dscfg = cfg[dataset] # Path related parameters dscfg.update({'configpath': cfg['configpath']}) dscfg.update({'basepath': cfg['saveimgbasepath']}) dscfg.update({'path_convention': cfg['path_convention']}) dscfg.update({'procname': cfg['name']}) dscfg.update({'dsname': dataset}) dscfg.update({'solarfluxpath': cfg['solarfluxpath']}) dscfg.update({'colocgatespath': cfg['colocgatespath']}) dscfg.update({'excessgatespath': cfg['excessgatespath']}) dscfg.update({'dempath': cfg['dempath']}) dscfg.update({'cosmopath': cfg['cosmopath']}) dscfg.update({'CosmoRunFreq': cfg['CosmoRunFreq']}) dscfg.update({'CosmoForecasted': cfg['CosmoForecasted']}) dscfg.update({'metranet_read_lib': cfg['metranet_read_lib']}) dscfg.update({'lastStateFile': cfg['lastStateFile']}) dscfg.update({'timeinfo': None}) # Instrument parameters dscfg.update({'RadarName': cfg['RadarName']}) dscfg.update({'ScanPeriod': cfg['ScanPeriod']}) dscfg.update({'lrxh': cfg['lrxh']}) dscfg.update({'lrxv': cfg['lrxv']}) dscfg.update({'ltxh': cfg['ltxh']}) dscfg.update({'ltxv': cfg['ltxv']}) dscfg.update({'lradomeh': cfg['lradomeh']}) dscfg.update({'lradomev': cfg['lradomev']}) # PAR and ASR variable if 'par_azimuth_antenna' in cfg: dscfg.update({'par_azimuth_antenna': cfg['par_azimuth_antenna']}) if 'par_elevation_antenna' in cfg: dscfg.update({'par_elevation_antenna': cfg['par_elevation_antenna']}) if 'asr_highbeam_antenna' in cfg: dscfg.update({'asr_highbeam_antenna': cfg['asr_highbeam_antenna']}) if 'asr_lowbeam_antenna' in cfg: dscfg.update({'asr_lowbeam_antenna': cfg['asr_lowbeam_antenna']}) if 'target_radar_pos' in cfg: dscfg.update({'target_radar_pos': cfg['target_radar_pos']}) # indicates the dataset has been initialized and aux data is available dscfg.update({'initialized': False}) dscfg.update({'global_data': None}) # Convert the following strings to string arrays strarr_list = ['datatype', 'FIELDS_TO_REMOVE'] for param in strarr_list: if param in dscfg: if isinstance(dscfg[param], str): dscfg[param] = [dscfg[param]] # variables to make the data set available in the next level if 'MAKE_GLOBAL' not in dscfg: dscfg.update({'MAKE_GLOBAL': 0}) if 'SUBSTITUTE_OBJECT' not in dscfg: dscfg.update({'SUBSTITUTE_OBJECT': 0}) if 'FIELDS_TO_REMOVE' not in dscfg: dscfg.update({'FIELDS_TO_REMOVE': None}) return dscfg
dccf2fb826a66a5704ff6fc092b3d9bd7e0982f8
50,098
from typing import Any def get_avatar_upload_dir(instance: Any, filename: str) -> str: """Determine upload dir for avatar image files """ return '/'.join(['avatars', instance.username, filename])
366e3891b038f9d6a2f8fd413151f38f90423291
50,099
def bit_length(n): """negatif olmayan bir tamsayının bit boyutunu döndürür https://stackoverflow.com/questions/2654149/bit-length-of-a-positive-integer-in-python""" bits = 0 while n >> bits: bits += 1 return bits
a71b093490d4a0f97fd4dcfe023163f447fa339d
50,101
import os def short_path(path, cwd=None): """ Return relative or absolute path name, whichever is shortest. """ if not isinstance(path, str): return path if cwd is None: cwd = os.getcwd() abspath = os.path.abspath(path) relpath = os.path.relpath(path, cwd) if len(abspath) <= len(relpath): return abspath return relpath
191fc17c9d6df681fd8010ce7fbe018c39c503df
50,102
import json def get_set_fields_command(editor, *, field_overrides=None): """ Get data for note fields and create JavaScript command that will set the fields in the UI. This is based on editor.loadNote, however it only sets the fields, rather than everything else. field_overrides can be set to override the current value for a field, thus ignoring whatever value is found in the note. This is needed in some cases because the note data may be stale compared to the UI. The UI has the most up to date field value, which may not yet be persisted in the note. """ data = [] for fld, val in editor.note.items(): if field_overrides and fld in field_overrides: val = field_overrides[fld] data.append((fld, editor.mw.col.media.escape_media_filenames(val))) return "setFields({});".format( json.dumps(data) )
c0e0fe0db499a0b0b048baba58b92f23ae09f1c1
50,103
def create_pid_pname_from_path(pid, pname): """ :param pid: :param pname: :return: PID.xxxx.PNAME.... """ return 'PID.' + str(pid) + '.PNAME.' + pname.upper()
e67b5322c6ec34561285ee5e4a3a319df709f351
50,104
def parse_input(input_str): """Parse the wires path """ w_one, w_two = input_str.strip().split("\n") w_one = w_one.split(",") w_two = w_two.split(",") return w_one, w_two
205d22bf2354747246c5203431e418103f30d615
50,105
def sieve_of_eratosthenes(n): """ function to find and print prime numbers up to the specified number :param n: upper limit for finding all primes less than this value """ primes = [True] * (n + 1) # because p is the smallest prime p = 2 while p * p <= n: # if p is not marked as False, this it is a prime if primes[p]: # mark all the multiples of number as False for i in range(p * 2, n + 1, p): primes[i] = False p += 1 # getting all primes primes = [element for element in range(2, n) if primes[element]] return primes
5f108c7264ac1cf7c32abe718b4bc148879c19cb
50,107
def get_realization_created_nodes(realization_created_nodes, node_id): """helper function to help get the nodes and the realization when a node first created""" nodes = [] for node_org_id, r in realization_created_nodes: if node_org_id == node_id: nodes.append((node_org_id, r)) return nodes
0b1204de94404ac4ccc779d8285168c332323023
50,108
import argparse def create_argparser(): """Create the argparser. Returns ------- argparse.ArgParser """ parser = argparse.ArgumentParser() parser.add_argument( '-a', '--account', required=False, help='Add file to which account' ) parser.add_argument( '-c', '--config', required=False, help='Custom YAML config file to use' ) parser.add_argument( '-f', '--file', required=True, help='File to load' ) return parser
b8bfcb67222c56d4c9b8234ce2d61d3e95579c2d
50,110
def list_usages(client, resource_group_name, account_name): """ List usages for Azure Cognitive Services account. """ return client.get_usages(resource_group_name, account_name).value
1ad73a4f1926895afc768f8f63341ff1a652ebf0
50,111
import re def urly (tin): """ make urls refs """ return re.sub (r'(https?://[-a-zA-Z0-9:/_%()?=+.,#]+)', r'<a href="\1" target=song>\1</a>', tin)
806a85c3fe7b649588c07513ec00cd4724f25e86
50,112
def argument_sink(f): """sink the ``callbacks`` and ``login_context`` arguments passed to scripts""" def wrapper(callbacks, login_context): # noqa return f() return wrapper
03dfa84fb78d882d020d5b02584b356551dd8460
50,113
def SplitString(value): """simple method that puts in spaces every 10 characters""" string_length = len(value) chunks = int(string_length / 10) string_list = list(value) lstring = "" if chunks > 1: lstring = "\\markup { \n\r \column { " for i in range(int(chunks)): lstring += "\n\r\r \\line { \"" index = i * 10 for i in range(index): lstring += string_list[i] lstring += "\" \r\r}" lstring += "\n\r } \n }" if lstring == "": indexes = [ i for i in range( len(string_list)) if string_list[i] == "\r" or string_list[i] == "\n"] lstring = "\\markup { \n\r \column { " if len(indexes) == 0: lstring += "\n\r\r \\line { \"" + \ "".join(string_list) + "\" \n\r\r } \n\r } \n }" else: rows = [] row_1 = string_list[:indexes[0]] rows.append(row_1) for i in range(len(indexes)): start = indexes[i] if i != len(indexes) - 1: end = indexes[i + 1] else: end = len(string_list) row = string_list[start:end] rows.append(row) for row in rows: lstring += "\n\r\r \\line { \"" lstring += "".join(row) lstring += "\" \r\r}" lstring += "\n\r } \n }" return lstring
6d6dac529644ab4b7f627ba5d2826dae3158d8e9
50,114
def query_filters(composed): """ Given a composed query, return list of all filters; for migration purposes, we only really need first. """ r = [] for group in composed: for rfilter in group: r.append(rfilter) return r
3f62b9a5f9745330832926bc1b72b3daf66239ef
50,115
def dict_char_value(data, raw=False): """ Simplify the characteristic value in dict format """ try: if raw: values = { k: {"Value": data[k]["Value"], "Symbol": data[k]["Symbol"]} for k in data } else: values = { k: "{} {}".format(data[k]["Value"], data[k]["Symbol"]) for k in data } except Exception as e: values = {} if raw: for k in data: if "Symbol" in data[k]: values[k] = {"Value": data[k]["Value"], "Symbol": data[k]["Symbol"]} else: values[k] = {"Value": data[k]["Value"]} else: for k in data: if "Symbol" in data[k]: values[k] = "{} {}".format(data[k]["Value"], data[k]["Symbol"]) else: values[k] = data[k]["Value"] return values
1ffa43bcac9f9ecd3289966f2e159d177ef8d582
50,117
def parse_float(float_str, default=0): """Parses the float_str and returns the value if valid. Args: float_str: String to parse as float. default: Value to return if float_str is not valid. Returns: Parsed float value if valid or default. """ try: return float(float_str) except ValueError: return default
156f1e12c3e74f3be0452c5f4122c142252d5026
50,118
def get_all_run_results(client, run_id, sort_mode=None, filters=None): """ Get all the results for a run. Query limit limits the number of results can be got from the server in one API call. """ offset = 0 query_limit = client.max_query_size results = [] if not sort_mode: sort_mode = [] while True: partial_res = client.getRunResults([run_id], query_limit, offset, sort_mode, filters, None, False) offset += len(partial_res) if not partial_res: break results.extend(partial_res) return results
6532035cd35c28b13048211784957fb2c1ab4af5
50,119
import numpy def log_to_axis_angle(w): """OI """ theta = numpy.linalg.norm(w) n = numpy.zeros((3,)) if theta != 0.0: n = w/theta return (n, theta)
9449a99ab9f880f08a497451668371755116ba8b
50,120
def euclidean_heuristic(pos, problem): """ The Euclidean distance heuristic for a PositionSearchProblem ((int, int), PositionSearchProblem) -> float """ return ((pos[0] - problem.goal_pos[0]) ** 2 + (pos[1] - problem.goal_pos[1]) ** 2) ** 0.5
5e46c1b66a040e5dcdc03ea972e99926ab678e2c
50,121
def get_projectname() -> str: """ Acquire project name """ project_name = input("Project name: ") return project_name
3f28d71ca78bd8e3ef39d11d4bd0c74d390e8f1f
50,122
def plus_one(x): """Adds unity to any number""" return x + 1
366f11329e8353af2751e58d0dce873606313d46
50,123
def pixels_to_EMU(value): """1 pixel = 9525 EMUs""" return int(value * 9525)
c9be7deacae47819ab30d5589dbae555124d6409
50,124
import os def _get_all_files(directory): """ Returns set of all files in directory Args: directory (str): the directory Returns: set -- set with all filenames (including relative paths) """ f = set() for path, subdirs, files in os.walk(directory): for name in files: p = path + '/' + name # os.path.join(directory, name) p = p.replace(directory, '') #path.replace(directory, "") + name if p[0] == '\\' or p[0] == '/': p = p[1:] f.add(p) return f
61bd711950f4a94b71bed1ca4099809d679ba8cf
50,127
def decimal_normalize(value): """ Normalize decimal value like cut zero ending. """ return value.normalize()
e9737bcb3d0b09a247ec89c3db257ca62550d5c6
50,129
from typing import Dict from typing import Any from typing import List def set_user_defined_floats(fha: Dict[str, Any], floats: List[float]) -> Dict[str, Any]: """Set the user-defined float values for the user-defined calculations. :param fha: the functional hazard assessment dict. :param list floats: the list of float values. :return: fha; the functional hazard assessment dict with updated float values. :rtype: dict """ _key = "" for _idx in [0, 1, 2]: try: _key = list(fha.keys())[_idx] fha[_key] = float(floats[_idx]) except IndexError: fha[_key] = 0.0 return fha
9677337a413eb3f79b7745a0c2546d781c22ee43
50,132
import copy import os def join_workdir_to_lm_paths(lm_params, workdir): """Set paths in LM parameter dictionary under workdir""" new = copy.deepcopy(lm_params) if new.get('filename'): new['filename'] = os.path.join(workdir, new['filename']) if new.get('interpolate'): new['interpolate'] = [[os.path.join(workdir, t[0]), t[1]] for t in new['interpolate']] if new.get('segmentation', {}).get('model'): new['segmentation']['model'] = os.path.join(workdir, new['segmentation']['model']) return new
ee0b7b9bc3c0d5c86826c7a7d0f2a86994f80d86
50,133
import os def create_dirs(dirs): """ dirs - a list of directories to create if these directories are not found :param dirs: :return exit_code: 0:success -1:failed NOTE: Input should be a list """ if type(dirs) is not list: print('Input is not a list') exit(-1) try: for dir_ in dirs: os.makedirs(dir_, exist_ok=True) return 0 except Exception as err: print("Creating directories error: {0}".format(err)) exit(-1)
55398abb6b69651d781a4b32b65192803684d1d0
50,134
def Lydia(message, log): """String*String -> String Retourne une phrase formatée contenant un message et sa date. """ response = (message) log.append(response) return response
be730af0695fc2b5817c3795397c3656fd20e8a3
50,135
import os def get_pydtf_dir(): """Return the location of the dtf dist-packages directory.""" return os.path.dirname(os.path.split(os.path.abspath(__file__))[0])
843d73f58018bb5c8bdbd33b6246924be930050b
50,136
def explode_tokens(tokenlist): """ Turn a list of (token, text) tuples into another list where each string is exactly one character. :param tokenlist: List of (token, text) tuples. """ result = [] for token, string in tokenlist: for c in string: result.append((token, c)) return result
123d7788bfb8e47c1ae9618142d9785690f833fe
50,137
def models_report(results): """ Genera reporte de resultados por label o nivel. Args: results (dict): Diccionartio con los resultados por metrica Returns: dict: Reporte de desempeño, particionado por label o nivel. """ def union(metric_name, metric_value, report): for key in metric_value: if key in report: report[key][metric_name] = metric_value[key] else: report[key] = {} report[key][metric_name] = metric_value[key] report = {} metrics_results = results.copy() while metrics_results != {}: metric_list = metrics_results.popitem() union(metric_list[0],metric_list[1], report) return report
043f97e4206eeaeaf2d1228fa79905af6e404558
50,138
def _is_option_provided(config, option, empty_value_is_valid=False): """ Checks whether a parameter is provided in the config. If empty value is not valid then the option will be considered as not provided if no value is provided for the option. """ if not config.has_configuration_option(option): return False elif (config.get_configuration_option(option) == '') and (empty_value_is_valid is False): return False return True
eacc6132470cc97fd167b6b97ebd022b1742e0f7
50,139
def strip(s: str): """strips outer html tags""" start = s.find(">") + 1 end = len(s) - s[::-1].find("<") - 1 return s[start:end]
4c9161a710cdcf800c92db7c89c65f14b517a04c
50,141
import re def strip_var_name(var_name): """Strips variable name of sub-strings blocking variable name matching. Removes sub-strings that should be ignored when matching checkpointed variable names to variable names in the training graph, namely: - trailing colon + number, e.g. "W:0" --> "W" - partitioning info., e.g. "/a/part_12/b" --> "a/b". (Note that checkpointed variables do not have partitioning info in their name, while model variables do). Args: var_name: str, variable name. Returns: stripped variable name. """ # Strip trailing number, e.g. convert "lstm/W_0:0" to "lstm/W_0". var_name = re.sub(r':\d+$', '', var_name) # Strip partitioning info, e.g. convert "W_0/part_3/Adagrad" to "W_0/Adagrad". var_name = re.sub(r'/part_\d+', '', var_name) return var_name
c7e9ee2a1eae8ff1c5e1bff45cd5aa2c8dcf6012
50,142
def extractdata(line): """For each line, return the x and y values, check whether there is reference value and if so return the reference value, otherwise return a reference value of 1 """ newArray = (line.split(',')) # if len(newArray) == 8: # convert the strings to floats xvalue = float(newArray[3]) yvalue = float(newArray[5]) refvalue = float(newArray[7]) return xvalue, yvalue, refvalue if len(newArray) == 6: # convert the strings to floats xvalue = float(newArray[3]) yvalue = float(newArray[5]) refvalue = 1 return xvalue, yvalue, refvalue else: print("Houston, we have a problem, This line does not appear to be data!:") print(line)
c7cb553bedef333cf9950588757906c7f114d535
50,143
def validate_ints(data): """ Method to validate data of type integer :params: data :response: True, False """ if not isinstance(data, int): return False return True
67279db3b592bf415ec5cf3522a404a246d7a655
50,144
def extract_task_info(luigi_task): """Extract task name and generate a routine id from a luigi task, from the date and test fields. Args: luigi_task (luigi.Task): Task to extract test and date parameters from. Returns: {test, routine_id} (tuple): Test flag, and routine ID for this task. """ test = (luigi_task.test if 'test' in luigi_task.__dict__ else not luigi_task.production) task_name = type(luigi_task).__name__ routine_id = f'{task_name}-{luigi_task.date}-{test}' return test, routine_id
134e740c7d228c5a078cfe6a5c7a493ad394c0e0
50,145
def split_evr(version): """ Return a tuple of epoch, version, release from a version string """ if '~' in version: epoch, version = version.split('~', 1) else: epoch, version = ('0', version) release = None if '-' in version: version, release = version.rsplit('-', 1) else: version, release = (version, None) return epoch, version, release
9e437c473b3fb0275f62b5fbf9dad64058d56b50
50,146
def get_set_of_list_and_keep_sequence(list): """ Returns the set of the specified list but keeps the sequence of the items.""" seen = set() return [x for x in list if not (x in seen or seen.add(x))]
2730a866d76318f0a4cee1fec6c19c8d562bb2fb
50,147
import torch from typing import Tuple from typing import Optional def split_features(x: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Split complete point cloud into xyz coordinates and features.""" xyz = x[:, :3, :].transpose(1, 2).contiguous() features = ( x[:, 3:, :].contiguous() if x.size(1) > 3 else None ) return xyz, features
2a60afd8fdb1c7b2be0fe1a7a6bac2fbe5ce5069
50,148
def unzip(zipped_list, n): """returns n lists with the elems of zipped_list unsplitted. The general case could be solved with zip(*zipped_list), but here we are also dealing with: - un-zipping empy list to n empty lists - ensuring that all zipped items in zipped_list have lenght n, raising ValueError if not. """ if not zipped_list: return tuple([[]] * n) else: if not all(isinstance(x, tuple) and len(x) == n for x in zipped_list): raise ValueError return zip(*zipped_list)
73e9774ca196dd358d5f6a5fbd78ad60ef3ed1ff
50,150
def _from_r_alias(obj, r, lat_and_inv=None): """ Alias for instance method that allows the method to be called in a multiprocessing pool """ return obj._from_r(r, lat_and_inv=lat_and_inv)
6f84cb13d97fbf7c1d87f149c691625ef6b9f562
50,151
import pkg_resources def get_without_builtins(): """ Get all installed XBlocks but try to omit built-in XBlocks, else the output is less helpful """ xblocks = [ entry_point.name for entry_point in pkg_resources.iter_entry_points('xblock.v1') if not entry_point.module_name.startswith('xmodule') ] xblocks = sorted(xblocks) return xblocks
ef79b167e813c15cbf48439ac2364c2976faed27
50,152
def splitdigits(i, base=10): """ >>> splitdigits(0) [] >>> splitdigits(13) [1, 3] """ digits = [] while i: digits.append(i % base) i //= base digits.reverse() return digits
e5789037ef345b602860fb9a1e3a0a1eee32d6f7
50,153
def colWidth(collection, columnNum): """Compute the required width of a column in a collection of row-tuples.""" MIN_PADDING = 5 return MIN_PADDING + max((len(row[columnNum]) for row in collection))
9a300106cf57fa6a78af37caa3f6b2a74c3e5b2c
50,155
def divisores_xy(x,y): """ Función para calcular entre dos números, el mayor y mostrarlo. Args: Dos números reales. Salida: número real mayor de los dos introducidos. """ if x > y: return (f'\nEntre los dos números ingresados {x} y {y}; el número mayor es el: {x}.') elif x < y: return (f'\nEntre los dos números ingresados {x} y {y}; el número mayor es el: {y}.') elif x == y: return (f'** Los dos números ingresados son iguales.\n')
e6deea1e43000353b44696c6371c7f61a275b4f3
50,156
import uuid import hashlib def generate_uuid(basedata=None): """Provides a random UUID with no input, or a UUID4-format MD5 checksum of any input data provided. :param str basedata: provided data to calculate a uuid """ if basedata is None: return str(uuid.uuid4()) elif isinstance(basedata, str): checksum = hashlib.md5(basedata.encode()).hexdigest() return str(uuid.UUID(checksum)) else: raise TypeError("The 'basedata' must be string or None")
60d4f696c796f8ffcc0353e4b71f0e108a13986d
50,157
def make_terminal(word): """returns a terminal tree node with label word""" return word
3ece500a79d9e42b0cd59d44c2c3eda6d1ca8adf
50,160
import builtins def max(*args, **kwargs): """ Add support for 'default' kwarg. >>> max([], default='res') 'res' >>> max(default='res') Traceback (most recent call last): ... TypeError: ... >>> max('a', 'b', default='other') 'b' """ missing = object() default = kwargs.pop('default', missing) try: return builtins.max(*args, **kwargs) except ValueError as exc: if 'empty sequence' in str(exc) and default is not missing: return default raise
d46610fd653c416f9084e2ae3f5f8cd02424cb9b
50,161
def unwrap_args(args_ns): """ "Unwrap" the given `argparse` namespace into a dict. :type args_ns: argparse.Namespace :rtype: dict """ args_dict = {} for key, value in vars(args_ns).items(): if isinstance(value, list): value = value[0] args_dict[key] = value return args_dict
017708d7d8695a5920586c416083e8b99e791a33
50,163
def is_start_state(state): """ Checks if the given state is a start state. """ return state.g_pos.value == 0 and state.theta.value == 'N'
56e7db462a3e971fd4f894f70d2efd66188f3405
50,165
async def utm(websocket, team): """Uses a specified team to battle. """ return await websocket.send(f'|/utm {team}')
f46a1197eb4ff9bb5279d8ec66397fd092e8452a
50,166
import subprocess def update_node_codebase(): """Updates the codebase using git pull, note if any local changes are present this might fail horribly """ update_log = subprocess.check_output(["git", "pull"]) return update_log
169a5d1a83239c00feb2cc35adf22dac850051ef
50,167
def being_declared(string): """ Helper method used to see if the function or subroutine is being defined :param string: the string being checked against the forbidden words :return: a boolean indicating if it is being declared or called """ if 'write' in string or 'function' in string or 'subroutine' in string \ or 'character' in string or 'if' in string or 'result' in string: being_declared = True else: being_declared = False return being_declared
a2346eebcd3832c5db640a05bc6435a99c9127ea
50,168
def _find_pool(ip_addr, ipv4_pools): """ Find the pool containing the given IP. :param ip_addr: IP address to find. :param ipv4_pools: iterable containing IPPools. :return: The pool, or None if not found """ for pool in ipv4_pools: if ip_addr in pool.cidr: return pool else: return None
f12d4ee4d1f73ff054b0194d31744d31c1f58ad2
50,169
def word_len_filter(sent, max_sgl_word_len=40, max_avg_word_len=20, min_avg_word_len=2): """ condition: too long words or too long/short average words """ sent_list = sent.strip().split() word_lens = [len(word) for word in sent_list] avg_word_len = sum(word_lens) / (len(sent_list) + 1e-7) if max(word_lens) > max_sgl_word_len or avg_word_len > max_avg_word_len or avg_word_len < min_avg_word_len: return False return True
6fae4c5e34a08aefc4d0533feff091064395154a
50,170
def VolumetricFlow(self): """Volumetric flow (m^3/hr).""" stream, mol, phase = self.data if mol: c = self.name # c: compound c.T = stream.T c.P = stream.P c.phase = phase return (c.Vm * mol[0] * 1000.) else: return 0.
e4a15301a99d43c346df9be465f8bc1a45abe3d7
50,171
def event_happened(**kwargs): """Function that takes as input various prices (today, yesterday, etc) and returns True if an "event" has been triggered Examples: Event is found if the symbol is down more then 3% while the market is up more then 2%: return bool(kwargs['return_today'] <= -0.03 and kwargs['market_return_today'] >= 0.02) """ return bool(kwargs['price_today'] < 8.0 and kwargs['price_yest'] >= 8.0)
0eea69418b78aca6b7a0d95aeb6ffc2fd0fe713e
50,172
import re def substr(requestContext, seriesList, start=0, stop=0): """ Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the metric name is a list or array, with each element separated by dots. Prints n - length elements of the array (if only one integer n is passed) or n - m elements of the array (if two integers n and m are passed). The list starts with element 0 and ends with element (length - 1). Example: .. code-block:: none &target=substr(carbon.agents.hostname.avgUpdateTime,2,4) The label would be printed as "hostname.avgUpdateTime". """ for series in seriesList: left = series.name.rfind('(') + 1 right = series.name.find(')') if right < 0: right = len(series.name)+1 cleanName = series.name[left:right:] if int(stop) == 0: series.name = '.'.join(cleanName.split('.')[int(start)::]) else: series.name = '.'.join(cleanName.split('.')[int(start):int(stop):]) # substr(func(a.b,'c'),1) becomes b instead of b,'c' series.name = re.sub(',.*$', '', series.name) return seriesList
7c4d3b425fc608ec7b1e2651ec39719b6c8a1510
50,173
def maf_binary(sorted_array, value): """ In this algorithm, we want to find whether element x belongs to a set of numbers stored in an array numbers[]. Where l and r represent the left and right index of a sub-array in which searching operation should be performed. """ err = "Not Found" min = 0 max = len(sorted_array) - 1 while min <= max: mid = (min + max) // 2 if sorted_array[mid] > value: max = mid - 1 elif sorted_array[mid] < value: min = mid + 1 else: return mid
9cdd6932084feab1af4795056cb4a139a3f3d996
50,174
def make_pairs(left_list, right_list, exclude_doubles=False): """Takes two lists of words and returns all pairs that can be formed between them.""" pairs = [[left_word, right_word] for left_word in left_list for right_word in right_list] if exclude_doubles: pairs = [[word1, word2] for word1, word2 in pairs if word1 != word2] return pairs
19bc84b82d59512c75eb78042740f6ad2f094c05
50,175
def list2dict(L): """Returns a dictionary from input list, originating from the Concordance TSV file. :parameter L: a list from a Concordance summary output file. :return D: a dict with key / value pairs from input list """ dd = {i: L[i].split('\t') for i in range(len(L))} # auxiliary dict D = {} # Construct output dictionary of key-value pairs: D[dd[0][0]] = {dd[1][0]: dict(zip(dd[0][1:], dd[1][1:])), dd[2][0]: dict(zip(dd[0][1:], dd[2][1:]))} return D
cf304a9411887a472105ab1bf7264a90a006128c
50,176
import os def bearer_token(): """possible to specify a bearer token""" return os.environ.get('DF_BEARER_TOKEN', None)
ecc8a05a06f58d82a9b1d79f937ab6736ab1656b
50,177
def choices_to_dict(t): """ Converts a ChoiceField two-tuple to a dict (for JSON) Assumes i[0][0] is always unique """ d = {} for i in t: k = str(i[0]) d[k] = i[1] return d
38e77f3a8389d03943cf29d120818a234573afc5
50,178
def get_colab_github_url(relative_path: str, repository: str, branch: str) -> str: """Get the URL that a file will have on Google Colab when hosted on GitHub.""" return f"https://colab.research.google.com/github/{repository}/blob/{branch}/{relative_path}"
3a1bc73294e1e4f77a6631b71fdd0d05d1ba6400
50,180
import requests import pickle def get_server_weights(master_url='0.0.0.0:5000'): """ This will get the raw weights, pickle load them, and return. """ r = requests.get('http://{0}/parameters'.format(master_url)) weights = pickle.loads(r.content) return weights
1c66bf09e453414eb0050c251f0401791f90d3b5
50,181
def rise_and_fall_times( logger_data, pin, start_time=0.01, end_time=float("Inf"), stop_function=None, initialized=False, pulse_direction=True): """rise_and_fall_times. [description] Arguments: logger_data {[type]} -- [description] pin {[type]} -- [description] Keyword Arguments: start_time {float} -- [description] (default: {0.01}) end_time {[type]} -- [description] (default: {float("Inf")}) stop_function {[type]} -- [description] (default: {None}) initialized {bool} -- [description] (default: {False}) pulse_direction {bool} -- [description] (default: {True}) Returns: [type] -- [description] """ pin_value = False rise_times = [] fall_times = [] # Loop over all gpio samples for timestamp, pin_values in logger_data.gpio: # Skip all samples until initialization has finished if not initialized: if all(pin_values): continue else: initialized = True if stop_function is not None and stop_function(pin_values): break # Detect inside start and end time if timestamp > start_time and timestamp <= end_time: # Detect rising edge (if pulse_direction else falling edge) if not pin_value and (pin_values[pin] ^ (not pulse_direction)): pin_value = pulse_direction rise_times.append(timestamp) # Detect falling edge (if pulse_direction else rising edge) if pin_value and (pin_values[pin] ^ pulse_direction): pin_value = False fall_times.append(timestamp) return rise_times, fall_times
8476ec9c407f733d85fd6481c9466af64db0aa99
50,182
import logging def basic_logger(name=''): """ The most basic logger type, log is just: logger_name: <basic_info> name: logger name """ if name: name = "." + name return logging.getLogger('cat' + name)
f3ec34d0f473c8b5f2b5023128a559c788e5e142
50,183
import tempfile import os def is_file_system_case_sensitive() -> bool: """Check whether the file system is case-sensitive.""" with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file: return not os.path.exists(tmp_file.name.lower())
52b33323183c076d85dcdc4325600f53a5491667
50,184
import os def file_exists(fpath, specific_permission=None): """ Check if a specified file path exists, and is readable. Args: fpath (str): The path to check. specific_permission (int): By default we ensure read, if this is specified, also check another permission. """ if os.path.exists(fpath) and os.access(fpath, os.R_OK): if specific_permission is not None: if os.access(fpath, specific_permission): return True else: return True return False
a77b599c2b17b47b3a477a942827cdb5e6d926bc
50,186
def poissons_ratio(vp, vs): """ Calculate Poisson's Ratio based on the definition given in the Specfem3D source code :type vp: float or np.array :param vp: P-wave velocity :type vs: float or np.array :param vs: S-wave velocity :rtype: float or np.array :return: Poissons ratio """ return 0.5 * (vp * vp - 2 * vs * vs) / (vp * vp - vs * vs)
1f90fe04bb326c1117a38fd46c48347c6d5577bc
50,187
import os import torch def reloadModel(model,config): """Load checkpoint of the model using torch.load() Args: model: which pytorch model to load the parameters. config: information of loader. the checkpoint is at contPath/resume_file Returns: model: the model with parameters loaded. """ checkpoint = os.path.join(config['contPath'], config['opt'].resume_file) print("=> Reloading checkpoint '{}': model".format(checkpoint)) checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage) # model.load_state_dict(self.checkpoint['state_dict']) model_dict = model.state_dict() # 1. filter out unnecessary keys pretrained_dict = {} for k, v in checkpoint['state_dict'].items(): if(k in model_dict): pretrained_dict[k] = v # 2. overwrite entries in the existing state dict model_dict.update(pretrained_dict) # 3. load the new state dict model.load_state_dict(model_dict) return model
1fc12ef8d1cca588cdeebf9ac1e7194f43b66516
50,188
def _discover(netdisco, zeroconf_instance): """Discover devices.""" results = [] try: netdisco.scan(zeroconf_instance=zeroconf_instance) for disc in netdisco.discover(): for service in netdisco.get_info(disc): results.append((disc, service)) finally: netdisco.stop() return results
85dc1cb32b4d926908bea268a8fadc1b09cbb21f
50,189
import torch import math def angle(input_, deg=False): """Wrapper of `torch.angle`. Parameters ---------- input_ : DTensor Input dense tensor. deg : bool, optional If true, result is in degree format. Otherwise, return in radians. By default False """ if deg: ret = torch.angle(input_) * 180 / math.pi else: ret = torch.angle(input_) return ret
8c2ae8b274bff1442350185ffcd66dbb2c49ec71
50,190