content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def user_owns_file(path, uid=None): """ Returns whether a file located at *path* is owned by the user with *uid*. When *uid* is *None*, the user id of the current process is used. """ if uid is None: uid = os.getuid() path = os.path.expandvars(os.path.expanduser(path)) return os.stat(path).st_uid == uid
c1c3f24bce287f38d6acb104ab01bc58f5ab7cfe
19,366
def _isascii(string): """Tests if a given string is pure ASCII; works for both Python 2 and 3""" try: return len(string) == len(string.encode()) except UnicodeDecodeError: return False except UnicodeEncodeError: return False
1ef5c22b15b4953b8b9bba8ad9f5d2da0c68ee03
19,368
def result_contains_unexpected_pct(result): """ :param result: :return: """ return 'unexpected_percent' in result['result'] \ and result['expectation_config']['expectation_type'] != 'expect_column_values_to_be_in_set'
e038707d09f6afe25fec257db7915fca0a0ca026
19,369
def get_high_cardinality_features(X, threshold=50): """Get features with more unique values than the specified threshold.""" high_cardinality_features = [] for c in X.columns: if X[c].nunique() > threshold: high_cardinality_features.append(c) return high_cardinality_features
4ab222c12a68c2ce259b9de41998d649d80b30ef
19,370
import argparse import ast def setup_parser(): """ Setup input arguments. """ parser = argparse.ArgumentParser( description="Generate a bunch of " "magnitudes with comparisons against MCAT values for random points on " "the sky within given legal ranges. Write it to a CSV.") parser.add_argument("-a", "--aperture", action="store", dest="radius", help="Aperture radius in decimal degrees", type=float) parser.add_argument("-i", "--inner", action="store", dest="annulus1", help="Inner annulus radius for background subtraction", type=float) parser.add_argument("-o", "--outer", action="store", dest="annulus2", help="Outer annulus radius for background subtraction", type=float) parser.add_argument("--annulus", action="store", type=ast.literal_eval, dest="annulus", help="Annulus inner and outer radius" " definition as '[inner,outer]'") parser.add_argument("-f", "--file", action="store", type=str, dest="file", default=None, help="File name (full path) for the" " output CSV.", required=True) parser.add_argument("-b", "--band", action="store", type=str.upper, dest="band", help="Band of NUV or FUV.", default='FUV', required=True, choices=["NUV", "FUV", "nuv", "fuv"]) parser.add_argument("-n", "--nsamples", action="store", type=int, dest="nsamples", help="Number of random locations to" " draw from sky.", default=10) parser.add_argument("--seed", action="store", type=int, dest="seed", help="Seed for the random number generator -- for" " reproducibility.", default=323) parser.add_argument("--rarange", action="store", dest="rarange", type=ast.literal_eval, default=[0., 360.], help="Two element list denoting valid ra range.") parser.add_argument("--decrange", action="store", dest="decrange", type=ast.literal_eval, default=[-90., 90.], help="Two element list denoting valid dec range.") parser.add_argument("--exprange", action="store", dest="exprange", type=ast.literal_eval, default=[0., 5000.], help="Two element list denoting valid observation" " depths (in seconds).") parser.add_argument("--maglimit", action="store", type=float, default=24., dest="maglimit", help="Lower limit of MCAT magnitudes" " to use.") parser.add_argument("-v", "--verbose", action="store", type=int, default=0, dest="verbose", help="Level of verbosity.", choices=[0, 1, 2]) return parser
2c2eb0700df45055d86cf76b72f253f084bf0a90
19,371
def _make_grpc_auth_func(auth_func): """Creates the auth func expected by the grpc callback.""" def grpc_auth(dummy_context, callback): """The auth signature required by grpc.""" callback(auth_func(), None) return grpc_auth
b65e72222193448d80385eaa4ca543c49494f1e3
19,372
from typing import Dict import base64 import json def encoded_to_json(encoded_string: str) -> Dict: """ Transform your encoded string to dict. Parameters ---------- encoded_string: str your string base64 encoded. Returns ------- Dict your string cast to a dict. """ decode = base64.b64decode( encoded_string + "=" * (-len(encoded_string) % 4), ) return json.loads(decode)
37ab5fe52db9f25926f30d0e763177c8f399ff4b
19,374
def prod_client(production_gateway, application, request): """Prepares application and service for production use and creates new production client Parameters: app (Application): Application for which create the client. promote (bool): If true, then this method also promotes proxy configuration to production. version (int): Proxy configuration version of service to promote. redeploy (bool): If true, then the production gateway will be reloaded Returns: api_client (HttpClient): Api client for application """ def _prod_client(app=application, promote: bool = True, version: int = -1, redeploy: bool = True): if promote: if version == -1: version = app.service.proxy.list().configs.latest()['version'] app.service.proxy.list().promote(version=version) if redeploy: production_gateway.reload() client = app.api_client(endpoint="endpoint") request.addfinalizer(client.close) return client return _prod_client
29dd152a3d15fde1976eda3959922bc7776937e1
19,376
def count_dollar(sentence): """ count $ """ count = 0 for w in sentence: if w == '$': count += 1 return count
c07fe8d9fd227dee4792d442cec85af223bbe094
19,377
def is_ground_area_almost_the_same(city, min_house_area, nodelist_buildings, variance = 2): """ Get the buildings with nearly the same ground area within a city. :param city: str, name of city-object :param min_house_area int, default: 35 minimal area for a considered building :param variance: int, default = 2 variance of the ground area in m^2 :return: same_ground_areas Dict with the area as the key and buildings with the same area appended. """ area = [] for i in nodelist_buildings: x = round(city.nodes[i]["area"], 0) if i not in area: area.append(x) same_ground_areas = {key: [] for key in area} for i in nodelist_buildings: for areas in area: if (areas - variance ) < city.nodes[i]["area"] < (areas+variance): if i not in same_ground_areas[areas]: same_ground_areas[areas].append(i) return same_ground_areas
7c4d7c213fe8fb23ca783a2a5dabac4d5a92ed4b
19,378
def createBatchSystem(config, batchSystemClass, kwargs): """ Returns an instance of the given batch system class, or if a big batch system is configured, a batch system instance that combines the given class with the configured big batch system. :param config: the current configuration :param batchSystemClass: the class to be instantiated :param kwargs: a list of keyword arguments to be passed to the given class' constructor """ batchSystem = batchSystemClass(**kwargs) return batchSystem
afff2df95a353c3138b137ccbfe5e16f22843589
19,379
def sort_in_wave(array): """ Given an unsorted array of integers, sort the array into a wave like array. An array arr[0..n-1] is sorted in wave form if arr[0] >= arr[1] <= arr[2] >= arr[3] <= arr[4] >= """ n = len(array) for i in range(n - 1): if i % 2 == 0 and array[i] < array[i+1]: array[i], array[i+1] = array[i+1], array[i] return array
1904a8715d2b6a8b8cbd29ed7a129ca45b1d57d0
19,380
def interpret_config_value(value_str): """ Determine the data type from the input string and convert. Conversions: 'True', 'Yes' --> True 'False', 'No' --> False Starting and ending with ' or " --> string If can be converted to int, float or complex --> converted numeric value Starting and ending with [] --> list If all the above fails keep as string """ if ((value_str == 'True') or (value_str == 'Yes')): return True if ((value_str == 'False') or (value_str == 'No')): return False if ((value_str[0] == "'") and (value_str[-1] == "'") or (value_str[0] == '"') and (value_str[-1] == '"')): return value_str[1:-1] convert_types = [int, float, complex] if ((value_str[0] == '[') and (value_str[-1] == ']')): values = value_str[1:-1].split(',') values_list = [] for val in values: if ((val == 'True') or (val == 'Yes')): values_list.append(True) continue if ((val == 'False') or (val == 'No')): values_list.append(True) continue if ((val[0] == "'") and (val[-1] == "'") or (val[0] == '"') and (val[-1] == '"')): values_list.append(val[2:-1]) continue for t in convert_types: try: value = t(val) values_list.append(value) break except ValueError: pass else: values_list.append(val) return values_list for t in convert_types: try: value = t(value_str) return value except ValueError: pass return value_str
10994b48647b102032fcb02b5cd4ce401c1044b8
19,381
import os def isDS(path): """Returns a boolean that indicates if a '.DS_Store' is on the indicated path isDS(path) """ try: return '.DS_Store' in os.listdir(path) except KeyError: raise ValueError("Not any '.DS_Store' file on the folder")
61c5d63a7ff0350cab14795589301d054bca7e74
19,382
import glob import os def get_cur_ali_num(alignmentdir): """Gets number of alignment with largest number in its filename. """ file_paths = glob.glob(os.path.join(alignmentdir, '*.nex')) if len(file_paths) == 0: return 1 else: return max([int(os.path.basename(x).split('.')[0]) + 1 for x in\ file_paths])
be9b4ef76ce4513d3615036faeda9c3140cb4dc2
19,383
import re def any_match(expressions, value): """ Return true if any expression matches the value """ for exp in expressions: if re.match(exp, str(value)): return True return False
34e7aac238be92769965390a702f9a61741f9289
19,385
def getKFold(X): """ param1: pandas.DataFrame return : integer Function return number of kfold to consider for Cross validation cross on the basis of dataset row counts """ k=2 rows=X.shape[0] if(rows>300 and rows<=500): k=2 elif(rows>500 and rows <=5000 ):k=5 elif rows>5000 : k=10 return k
220df5cb8b262085db25bfca10128c373c64ef3e
19,386
import argparse def parse_args(): """ Parse command line args """ parser = argparse.ArgumentParser(description='Pass number of days back to start from') parser.add_argument('-d', '--days_back',dest='days_back', type=int, default=1, help='Number of days back') return parser.parse_args()
a429aa78735f08cd5fa5dc52ae82ae42b16e70e6
19,388
def subtract(a, b): """Subtraction applied to two numbers Args: a (numeric): number to be subtracted b (numeric): subtractor Raises: ValueError: Raised when inputs are not numeric """ try: return a - b except: raise ValueError("inputs should be numeric")
7cfa9607145fb1713309fcb2e543a3a528aa26f1
19,389
def retr_smaxkepl(peri, masstotl): """ Get the semi-major axis of a Keplerian orbit (in AU) from the orbital period (in days) and total mass (in Solar masses). Arguments peri: orbital period [days] masstotl: total mass of the system [Solar Masses] Returns smax: the semi-major axis of a Keplerian orbit [AU] """ smax = (7.496e-6 * masstotl * peri**2)**(1. / 3.) # [AU] return smax
3b5833151888490cd632b8d9a76d5bfcb268a42c
19,390
import csv def slang_translator(filepath, text): """ Translate slang to normal text. :param filepath: path to the .txt files containing the slang translations :param text: text to translate """ #filepath = 'data/preprocessing/slang.txt' with open(filepath, "r") as csv_file: # Reading file as CSV with delimiter as "=", # so that abbreviation are stored in row[0] and phrases in row[1] data_from_file = csv.reader(csv_file, delimiter="=") mapping = {row[0] : row[1] for row in data_from_file} translated = [] for j, word in enumerate(text.split()): translation = mapping.get(word, None) if translation: translated.append(translation) else: translated.append(word) return ' '.join(translated)
7c70a3ae6612e4c322400ecbc20873566a7e53e5
19,392
def cal_Phi_div_Phiast_conv(phiw, phi_bulk, F1_Z, F2_Z): """ [Auxiliary function] Calculate Phi(z)/Phi_ast using definition of F1_Z and F2_Z in cal_int_Fz, and Eqs. (49), (50), (D1) in [1]. The original definition of Phi(z) in Eq. (50) in [1] is divided by Phi_ast=pi*R^2*phi_bulk*u_ast in accordance with caption of Fig. 11 in [1]. By definition of T_z[phi] in Eq. (D1) in [1], Phi(z)/Phi_ast = (2/phi_bulk)*T_z[phi]. By definition of matched asymptotic phi in Eq. (49) in [1], phi = (phiw - phi_bulk)*exp(-s_bar) + phi_bulk*(1 - s_bar*exp(-s_bar)). Therefore, we have Phi(z)/Phi_ast = (2/phi_bulk)*((phiw - phi_bulk)*F1_Z + phi_bulk*F2_Z), where F1_Z and F2_Z are defined on cal_int_Fz function. """ return (2./phi_bulk)*((phiw-phi_bulk)*F1_Z + phi_bulk*F2_Z)
06fa1eac90de6ffe1f898c1c80136dd238f29541
19,393
def title_case(sentence): """ Convert a string ti title case. Title case means that the first character of every ward is capitalize.. Parameters sentence: string string to be converted to tile case Returns ------- ret: string Input string in title case Examples -------- >>>> title_case("ThIs iS a STring to be ConverteD") >>>> This Is A String To Be Converted. """ # Check that input is string if not isinstance(sentence, str): raise TypeError("Invalid input must be type string.") if len(sentence) == 0: raise ValueError("Cannot apply title function to empty string") return sentence.title()
b4e2b58c2270af6022aaa82bbae5099ed7d07ea8
19,394
import configparser import click def get_token(ctx, param, config_auth): """ Extract token from auth config and do the checks config_auth: ConfigParser with loaded configuration of auth """ try: cfg_auth = configparser.ConfigParser() cfg_auth.read_file(config_auth) return cfg_auth.get('github', 'token') except Exception: raise click.BadParameter('incorrect configuration format')
f0e51627ef27a998671605a45109ab8866d1bc07
19,395
def get_count_df(tokenized_df, delimiter=None): """ Returns count of each token in each document """ count_df = ( tokenized_df[["doc_id", "token"]] .groupby(["doc_id", "token"]) .size() .reset_index() ) count_df = count_df.rename({0: "value"}) return count_df
f5d0c44a94469ab95de6839da9b54c81c2470c1c
19,397
def set_spat_info(location=None, primary_spatial_reference="string", resolution="string", srid=0): """ function to create a dictionary with typicial values of spatial information for the dataset :param location: :param primary_spatial_reference: :param resolution: :param srid: :return: returns dictionary of spatial information :rtype: dict : """ if location is None: location = {} spat_info = { "location": location, "primarySpatialReference": primary_spatial_reference, "resolution": resolution, "srid": srid } return spat_info
4690bbf16de136df194a517d800192a60298fcd1
19,398
def is_duckument_type(obj): """Internal mapping type checker Instead of using `isinstance(obj, MutableMapping)`, duck type checking is much cheaper and work on most common use cases. If an object has these attritubes, is a document: `__len__`, `keys`, `values` """ doc_attrs = ("__len__", "keys", "values") return all(hasattr(obj, attr) for attr in doc_attrs)
31aa4066d5810bb088720f96c889b36e6015583e
19,400
import re def getPrefix(name): """ Get common prefix from source image file name. """ result = re.search(r'([0-9]{2}[A-Z]{3}[0-9]{8})[-_]', name) return None if not result else result.group(1)
633223a5ff29005f815a29dfcaed5900c8449b83
19,401
def mod_powers(pwr, div): """Prints all possible residues when raised to ```pwr``` mod ```div```.""" res = set() for i in range(div): res.add(pow(i, pwr, div)) return res
24400bc8cc70dc04fb44f463230291516c9a8cd2
19,403
def search_parenthesis(str_,start=0,end=None): """ ------------------------------------------------------- | Busca el inicio y el fin de un parentesis, | | innorando los que sean hijos o decen- | | diente del parentesis origina: "(())" | | retorna [0,3]. | Example: | | >>> search_parenthesis("hola(como ((estas)) )"); | | [4, 20] | Nota: Comprueba si el primer parentesis quedo | | abierto, en ese caso retorna -1. | | Example: | | | >>> search_parenthesis("hola(como ((estas) )"); | | | -1 | | Pero no comprueba si se cierra mas | | parentesis de los que se abre. -------------------------------------------------------- """ LEN=len(str_);#For higher speed. #Comprobamos que ingreso un buen dato final. if not isinstance(end,int): end=LEN; elif end > LEN or end<0: end=LEN; del LEN;#Desde aqui ya no se necesita. i_parenthesis=0; init_end=[0,0]; for i in range(start,end): if str_[i]=='(' or str_[i]==')': if str_[i]=='(': if i_parenthesis==0:#Si es el inicio marcamos la ubicacion inicial. init_end[0]=i; i_parenthesis+=1; else: i_parenthesis-=1; if i_parenthesis==0:#Si es el final marcamos esta ubicacion y retornamos las coordenadas. init_end[1]=i; return init_end; return -1;
19efe04b051e0715dbb1c0bda5238a9b2e3c0de3
19,404
def field_to_attr(field_name): """Convert a field name to an attribute name Make the field all lowercase and replace ' ' with '_' (replace space with underscore) """ result = field_name.lower() if result[0:1].isdigit(): result = "n_" + result result = result.replace(' ', '_') result = result.replace('/', '_') result = result.replace('-', '_') result = result.replace('.', '_') result = result.replace('+', '_plus') return result
b6aa8a55f660648e3d259a6b6df0f560550c7d8a
19,406
import requests def create_messages(name='', content='', fans_id=''): """ 创建留言 :param name: :param content: 留言内容 :param fans_id: :return: """ url = 'http://ahasmarter.com/api/v1/ycy/messages/create' data = { 'name': name, 'fans_id': fans_id, 'context': content, } r = requests.post(url, json=data) status = r.json()['status'] return status
1db0871b605c13f45142c59e0460b5162d7ff921
19,408
def _CreateSampleCoverageSummaryMetric(): """Returns a sample coverage summary metric for testing purpose. Note: only use this method if the exact values don't matter. """ return [{ 'covered': 1, 'total': 2, 'name': 'region' }, { 'covered': 1, 'total': 2, 'name': 'function' }, { 'covered': 1, 'total': 2, 'name': 'line' }]
d05de65e9ee3a7e092823b8967adde852a613dbd
19,409
def dec_to_bin(n): """ :param n: decimal number :return: decimal converted to binary """ return bin(int(n)).replace("0b", "")
ba5371f560c5456024852030c2ec650dd9002fee
19,412
def get_words_list() -> list[str]: """Returns a list of words from the file wordle-answers-alphabetical.txt.""" with open("wordle-answers-alphabetical.txt", "r", encoding="utf-8") as file: words = file.read().splitlines() return words
fa14e3ac576f93d92e6453df36010f1b51b2082b
19,414
def find_idx(words, idx): """ Looks for the named index in the list. Test for the index name surrounded by quotes first, if it is not present then the error will return the index name without quotes """ try: return words.index('"' + idx + '"') except ValueError: return words.index(idx)
4c76fcd752de0545cea77def672d1555c071815f
19,415
def is_unicode_list(value): """Checks if a value's type is a list of Unicode strings.""" if value and isinstance(value, list): return isinstance(value[0], str) return False
c6c88770be2d1e75f800b3c22ec52a9a17384d73
19,416
def bdev_wait_for_examine(client): """Report when all bdevs have been examined """ return client.call('bdev_wait_for_examine')
b60c54be4db9756302ba1005a839e9a3e0999750
19,417
import binascii def hexarray_to_str(hexa): """Convert hex array into byte string.""" hstr = ''.join(["{:02x}".format(h) for h in hexa]) return binascii.unhexlify(hstr)
59af9dad6833bb417cb9c29f60c498b8452951d7
19,418
def filter_resources(resource, filter_name, filter_value): """Filter AWS resources Args: resource (object): EC2 resource filter_name (string): Filter name filter_value (string/list): Filter value(s) Returns: List: Filtered AWS resources """ values = [filter_value] if type(filter_value) is list: values = filter_value filters = [{ "Name": filter_name, "Values": values }] return list(resource.filter(Filters=filters))
d72f091af78ec73a81a50dc02cbeefde94de58d9
19,419
def isprime(n): """memorized prime evaluation, 2x more faster on that algorithm""" for p in range(2, int(abs(n) ** 0.5) + 1): if n % p == 0: return False return True
5d7be1cd8019fb2f31b319ba9225a74f81001fba
19,420
def parent(groups,ID): """given a groups dictionary and an ID, return its actual parent ID.""" if ID in groups.keys(): return ID # already a parent if not ID in groups.keys(): for actualParent in groups.keys(): if ID in groups[actualParent]: return actualParent # found the actual parent return None
7aadaecb901f08a81d60bd1c7920f4b8498e8c6f
19,422
def get_size_class(earlength): """Determine the size class of earlength based on Dr. Grangers specification""" if earlength > 15: size_class = 'extralarge' elif earlength > 10: size_class = 'large' if earlength < 8: size_class = 'medium' else: size_class = 'small' return earlength
3d0a16639250b45dd9f273244cdba44efd87e880
19,423
def toggle_manual_match_form(method): """ Hide/show field based on other values """ # This one is for manually picking years. # 1=override (manual match). 0= auto match # Test if method == 1: return "visible" return "hidden"
9ba3b63898f4a3c4a0bba19abe77ae218ae23275
19,424
def _detect_repeating_header(header): """Input: header (list of str) Return: n(int) or None, labelindices[] """ # sanity check if header is None or len(header) == 0: return None, [] # find periodicity label1 = header[0] if label1 == '': return None, [0] labelindices = [] n = None for a in range(0, len(header)): if header[a] == label1: labelindices.append(a) if a > 0 and n is None: n = a # special case, if the first header=='.' but it's not otherwise periodic # then the period is 1 if n is None and label1 == '.': n = 1 return n, labelindices
80829a47d5831acba503a349e3520368cd2530f8
19,425
import os def filename(fullname: str) -> str: """ "Extract the filename from filename with extension.""" return os.path.splitext(fullname)[0]
3934e62c148b275483b6fce40d02ea17081bbb10
19,426
def calculate_stimulation_freq(flash_time: float) -> float: """Calculate Stimulation Frequency. In an RSVP paradigm, the inquiry itself will produce an SSVEP response to the stimulation. Here we calculate what that frequency should be based in the presentation time. PARAMETERS ---------- :param: flash_time: time in seconds to present RSVP inquiry letters :returns: frequency: stimulation frequency of the inquiry """ # We want to know how many stimuli will present in a second return 1 / flash_time
069165f62416a79a0ba59893c9a61d8c99d3c903
19,431
from datetime import datetime def current_date(): """Return the current date.""" return datetime.now().date()
566de85bfbaac89d983dadf3680e96c279dc2ee6
19,432
from typing import Set import re def parse_story_from_pr_body(body: str) -> Set[str]: """ parse keyword (Fixes [ch-xxx]) from pr_body :param body: the body of pull request :return: stories set """ candidates = [] stories: Set[str] = set() regexp = re.compile( r"(fix|fixes|fixed|resolve|resolved|resolves|close|closed|closes)" r"\s+" r"(\[ch-\d+\]|\[sc-\d+\])", re.IGNORECASE, ) for match in regexp.finditer(body): match_string = match.group() print("matched :", match_string) candidates.append(match_string) if not candidates: print("no matching stories") return stories for candidate in candidates: story = candidate.split()[-1][4:-1] print("story :", story) stories.add(story) return stories
340b9d939389d1b7035fee1d81f6d6fb4ab0eb41
19,433
def parse_ants(antstr): """Split apart command-line antennas into a list of baselines.""" rv = [s.split('_') for s in antstr.split(',')] rv = [(int(i[:-1]), int(j[:-1]), i[-1]+j[-1]) for i,j in rv] return rv
438abdf11acaebdc471d6d9e6f007e4d27b3f293
19,434
def subtest2_weight(sample, beta): """Provides a weight function to integrate on the sampled phase space""" z = sample[:,0]; theta = sample[:,1] # for subtest1, linweight = 1. * np.ones(len(sample)) c = z * theta**beta linweight = (c/z)**(1./beta) / (beta * c) return linweight
0acb5a544d378429c591ebf166045175e2c17538
19,435
def get_prop_range_def(rng_defs): """ Returns unique range defintion for current instance args: rng_defs: the output from 'get_prop_range_defs' """ if len(rng_defs) > 1: pass #! write function to merge range defs try: return rng_defs[0] except IndexError: return {}
541220421a00359820048e8aefc4af5212f5da0f
19,436
def luhn_check_generator_10(N): """ Luhn mod 10 check digit generator Generates the check digit of a numeric sequence using the Luhn Algorithm :return: Generated check digit :rtype: int """ # Digital sum of numbers digital = {0: 0, 2: 2, 4: 4, 6: 6, 8: 8, 10: 1, 12: 3, 14: 5, 16: 7, 18: 9} # Storing the sum s = 0 for i in range(len(N)): if i&1: # Double the odd index numbers, 0-index notation s += digital[int(N[i])*2] else: # Keep the even index numbers as is, 0-index notation s += int(N[i]) s *= 9 # Mod 10 check = s%10 return check
d124d69181fd525d27459b1eb5f81e7414ebe860
19,437
from datetime import datetime def get_utcnow(): """Task version of `datetime.utcnow`""" return datetime.utcnow()
42851e1ca822ddb76912605eceeb8d164a8f8723
19,438
def clean_text(x): """Helper function to clean a string.""" x = str(x) x = x.lower() x = x.strip() x = " ".join(x.split()) # removes extra whitespace between words return x
74dbb5e07c42c668cdbe380670e87e81f4678407
19,439
def getIncomparable(rs1, rs2): """ Return the set of problem IDs which are only solved by rs1 :param rs1: result set of the 1st approach :param rs2: result set of the 2nd approach :return: a set {pid, stim} """ inc = set() for pid in rs1.keys(): r2 = rs2[pid] if r2 is None: inc.add(pid) return inc
a8b4baa5fc43b2d9f136c63f5aabe624149a9d47
19,441
def get_n_overlapping_chunks(data_size, chunk_size, chunk_overlap): """Compute the number of overlapping chunks Args: data_size (int) chunk_size (int) chunk_overlap (int) Returns: The number of chunks. """ hop_size = chunk_size * (1 - chunk_overlap) return int((data_size - chunk_size) / hop_size + 1)
8ada086145d136638e5ba7703cc2095ee696fbd0
19,443
def _dynamic_outputs_creation(dynamic_io_settings): """Creates a list of outputs names, supplied to the Task class.""" parameters = dynamic_io_settings["TaskSettings"]["Parameters"] return ["TopicData({},{})".format(*parameters.values())]
cc4a36c602da398e40d3fdd099c4655c3cf3462d
19,444
import os def find_visual_studio(): """ Attempt to find vs 11 or vs 12 :return: """ vcvers = ["13.0", "12.0", "11.0"] for vc in vcvers: vcdir = os.path.join("c:\\", "Program Files (x86)", "Microsoft Visual Studio {}".format(vc), "VC", "bin") vcvars = os.path.join(vcdir, "vcvars32.bat") if os.path.exists(vcvars): return vcdir, vcvars raise Exception("cannot find visual studio!")
65a06bf82b31d30eb2cb190756ed68bfa6450442
19,445
def add_filter_to_request(data, additional_fieldname, exclusive_operator, operator, value): """ Adding additional filter to API-request. :param data: prepared request :param additional_fieldname: fieldname is used to create an additional filter. :param exclusive_operator: exclusive_operator is used to create an additional filter. :param operator: operator is used to create additional filter. :param value: value is used to create additional filter. :return: dict """ filters = data.get('filters', []) filtr = {'field': additional_fieldname, 'exclusive': exclusive_operator, 'operator': operator, 'value': value} filters.append(filtr) data['filters'] = filters return data
48fa136b6c27fdf4e5c271e5cab8ca191dae443b
19,446
import re def get_peptides(seq, amino_acid): """in silico digest of sequence given the site at which the enzyme cuts Parameters ---------- seq : str sequence of amino acids amino_acid : str one-letter code for site at which enzyme cleaves Returns ------- r_peptides : list of strings list of petides resulting from in-silico digest """ r_indeces = [m.end()-1 for m in re.finditer(amino_acid, seq)] if amino_acid == 'R': if r_indeces[-1] < (len(seq)-1): r_indeces = [r for r in r_indeces if seq[r+1] != 'P'] else: r_indeces[:-1] = [r for r in r_indeces[:-1] if seq[r+1] != 'P'] r_indeces.append(len(seq)) start = 0 r_peptides = [] for ind in r_indeces: r_peptides.append(seq[start:ind+1]) start = ind + 1 return r_peptides
d876fb401c2df3890932182a9c39c0768a11806e
19,447
def maybe_map(apply_fn, maybe_list): """ Applies `apply_fn` to all elements of `maybe_list` if it is a list, else applies `apply_fn` to `maybe_list`. Result is always list. Empty list if `maybe_list` is None. """ if maybe_list is None: return [] elif isinstance(maybe_list, list): return [apply_fn(item) for item in maybe_list] else: return [apply_fn(maybe_list)]
13f61f0d7c6592dc4ea3cce091635f5119f1a70c
19,448
import os def get_result(db, result_id): """ :param db: a :class:`openquake.server.dbapi.Db` instance :param result_id: a result ID :returns: (job_id, job_status, datadir, datastore_key) """ job = db('SELECT job.*, ds_key FROM job, output WHERE ' 'oq_job_id=job.id AND output.id=?x', result_id, one=True) return (job.id, job.status, job.user_name, os.path.dirname(job.ds_calc_dir), job.ds_key)
586d2f1d1fa39aa23b96aa5afd190079abb72469
19,449
from datetime import datetime def format_timestamp(dt_object): """Formats a datetime object into a Joplin timestamp.""" return(int(datetime.timestamp(dt_object))*1000)
e728a4da2c5148a4e815af1485fc77029ef03fd0
19,450
from typing import List def _find_index(distance_along_lane: float, lengths: List[float]) -> int: """ Helper function for finding of path along lane corresponding to the distance_along_lane. :param distance_along_lane: Distance along the lane (in meters). :param lengths: Cumulative distance at each end point along the paths in the lane. :return: Index of path. """ if len(lengths) == 1: return 0 else: return min(index for index, length in enumerate(lengths) if distance_along_lane <= length)
7a36cd603d266653155e089eb0e72099210601f5
19,452
def handle_frame(image_frame): """ 处理图片帧 :param image_frame:图片帧 :return: """ image_frame_result = image_frame * 1.2 # 如果颜色值超过255,直接设置为255 image_frame_result[image_frame_result > 255] = 255 return image_frame_result
9255a99a66aff0c29e1fb86d9900459930a0dd7c
19,454
def sum_multiples_of_3_or_5_below_n(n): """ If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. """ return sum(x for x in range(n) if (x % 3 == 0 or x % 5 == 0))
4e3f78190c658690bd4a66b67f9f4c07593cea1e
19,456
import os def dicom_file_list(indir): """Returns a list of the name of all dicom files in a dicom series given the directory in which they are stored. The list is not in any particuar order.""" files = [] if os.path.isdir(indir): for rt, dr, fl in os.walk(indir): files.extend(fl) break else: raise IOError("{0} was not a directory.".format(indir)) dicomfiles = [os.path.join(indir, f) for f in files if f[-4:] == ".dcm"] return dicomfiles
19b51e4e4ce6269b58cc1c21ef9dbf32b091a832
19,463
def string_alternative(some_str: str) -> str: """ :param some_str: :return: every other letter of string passed """ return some_str[::2]
79702d70098ad65ae5e229634c8dded8aa18e8d2
19,466
import subprocess def check_output(*popenargs, **kwargs): """ Run command with arguments and return its output as a string. """ return subprocess.check_output(*popenargs, **kwargs, encoding='utf-8')
fafbbb7a56d57474f7e48d860794f4f0c9d6944d
19,467
def find_submission_body_end(submission_body: str) -> int: """Finds the end of the story's body by stripping edits and links at the bottom. Returns: the index at which the story's body ends. """ markers = [ '-Secrets', 'EDIT:', 'Edit:', 'Continued in [part', 'Concluded in [Part', '[Part' ] # only match the bottom half of the story so we don't # match things at the start of the story offset = int(len(submission_body) / 2) # find the end of the story, marked by one of the markers, # or none, and then we don't cut off anything at the end story_end_offset = None for end_marker in markers: try: story_end_offset = submission_body.index(end_marker, offset) except Exception as excp: continue break # no marker has been found, take the entire story if story_end_offset is None: story_end_offset = len(submission_body) return story_end_offset
d4c533ebf322956304e38eced409a9a1c043e37c
19,468
def valuefy(strings, type_cast=None): """ return a list of value, type casted by type_cast list return type: list if values By default, type cast is int """ vals_string = strings.split('_') if type_cast is None: type_cast = [int]*len(vals_string) return [t(e) for e, t in zip(vals_string, type_cast)]
36bc5a6ff64826877bd4f434a0e540f72c4aeb9e
19,469
import subprocess import six def grep(path, expr): """ Call out to native grep rather than feeding massive log files through python line by line """ p = subprocess.Popen(["grep", expr, path], stdout=subprocess.PIPE) p.wait() out, err = p.communicate() if p.returncode == 0: return six.ensure_str(out).split("\n") else: return []
121108104fe3e2527c476346dc1cb0e2ee418100
19,470
def table_fields(Fields, Table, Columns, selection): """ This function returns input fields for the specified table. Arguments: Table - Is a table string as returned by validate_fields. Columns - Are the primary and secondary column lists for tables as returned by validate_fields. Fields - Is a dictionary of input fields as returned by validate_fields. """ output_fields = {} if selection == 'insert': for field in Columns[Table][0]: if field in Fields: output_fields[field] = Fields[field] if selection == 'insert' or selection == 'update': for field in Columns[Table][1]: if field in Fields: output_fields[field] = Fields[field] return output_fields
53991b881d34a03b1076372896af66e1255bad47
19,472
import pickle def read_pickle(file): """Read a pickle file""" with open(file, "rb") as handle: return pickle.load(handle)
d074df20a4b035137efb8c1fc52f6e9fcdda7add
19,474
import os def gcs_default_bucket(): """ Returns GCS_BUCKET if env is set """ return os.environ.get('GCS_BUCKET', '')
e8ed1fc4a569106bd7751fbe45fb19167ccbb95c
19,475
def get_attached_ous(request): """method to get attached ous""" try: ous = [] if 'ou_attached' in request.session: attached_ous = request.session['ou_attached'] if attached_ous: ous = [int(ou) for ou in attached_ous.split(',')] except Exception as e: print('error getting attached ous - {}'.format(str(e))) return [] else: return ous
dc161b66297615b8853ad91972b3af33e61fefde
19,476
import json async def supd(ctx, query: str, *, data: str): """Update a document.""" try: await ctx.send(f'`{repr(query)}`') await ctx.send(f'`{repr(data)}`') query = json.loads(query) data = json.loads(data) except: return await ctx.send('Failed to parse json u dumb slut bitch') await ctx.send(await ctx.bot.storcord.update_one(query, data))
9c5fc5b3126cd9e506698def8f78618414b62a4a
19,477
def fib(num): """ Recursively finds the nth number in the Fibonnacci sequence 1, 1, 2, 3, 5, 8 @param {number} num @return {number} """ if num <= 2: return 1 return fib(num - 1) + fib(num - 2)
db758bc3fff2d01b9ca2d46f0d68bba346b00a49
19,479
import re def remove_tags(sent: str) -> str: """ Replace every tag with "_". Parameters ---------- sent: str Input string on CoNLL-U format. Returns ------- str: Processed string. """ return re.sub( r"(^\d+(?:\-\d+)?\t*(?:[^\t]*)\t(?:[^\t]*)\t)(\w+)", r"\1_", sent, flags=re.MULTILINE, )
9966ab60bcf1b9db19da23f48e7cc52b6dfdf580
19,480
def twoSum(numbers, target): """ :type numbers: List[int] :type target: int :rtype: List[int] """ result = [] def search_another(arr,lo, target2): hi = len(arr) - 1 mid = 0 while lo <= hi: mid = (lo + hi)//2 if target2 > arr[mid]: lo = mid + 1 elif target2 < arr[mid]: hi = mid - 1 else: return mid return -1 for i in range(len(numbers)): if numbers[i] <= target: key = search_another(numbers,i+1,(target-numbers[i])) if key == -1: continue else: return [i+1,key+1]
b4393c51dbf9e98e28d78fdde44d472ef5adfb11
19,481
def non_negative_int(argument): """ Converts the argument into an integer. Raises ValueError for negative or non-integer values. """ value = int(argument) if value >= 0: return value else: raise ValueError('negative value defined; must be non-negative')
b5c447055305141144934a484efc28ee07a7c6fe
19,482
def _process_action_seq(sequence, length=15): """ 对历史动作进行编码的实用函数。我们对15个动作进行编码。如果没有15次动作,我们用空填充。 """ sequence = sequence[-length:].copy() if len(sequence) < length: empty_sequence = [[] for _ in range(length - len(sequence))] empty_sequence.extend(sequence) sequence = empty_sequence return sequence
cb6f3409c3f44ff6194ccc262a274c426ecac0af
19,483
import os import re def log_kv(filepath: str, phase: str, keys: list): """All input will be not case-sensitive phase and key should be same line. Args: filepath: the path to log file. phase: [TRN], [TST], [VAL]. keys: a list like ['loss', 'mae', 'rmse', 'error'] Returns: data: a dict: data['iter']: a list <> data[key]: a list <> """ if not os.path.exists(filepath): raise ValueError('File could not find in %s' % filepath) # transfer to lower case phase = phase.lower() # return data data = {} data['iter'] = [] for key in keys: key = key.lower() data[key] = [] # parse with open(filepath, 'r') as fp: for line in fp: line = line.lower() if line.find(phase) < 0: continue # record iteration r_iter = re.findall('iter:(.*?),', line) data['iter'].append(int(r_iter[0])) # find each matched key for key in keys: key = key.lower() r_key = re.findall(key + ':(.*?),', line) if not r_key: r_key = re.findall(key + ':(.*).', line) if r_iter and r_key: data[key].append(float(r_key[0])) # check equal for key in keys: assert len(data['iter']) == len(data[key]) return data
9092b22063eaa87f4e71f7277724dea5b2db9ea5
19,485
def parse_inpaint_params(**kwargs): """ function to parse the inpainting params :param kwargs: :return: """ sigma_colour = 75 max_level = 4 patch_size = { "x": 5, "y": 5, "t": 5 } texture_feature_activated = 1 return [max_level, patch_size, texture_feature_activated, sigma_colour, "file"]
099b45d27b1d29727c6c9e907aa43c2d1d5e5253
19,487
def trim_words(word_set, data_sets, num): """ trim words number to num Args: word_set: word set data_sets: data set list num: trim number """ word_dict = {} for data in data_sets: for word_list, _ in data: for word in word_list: if word not in word_set: continue if word in word_dict: word_dict[word] += 1 else: word_dict[word] = 1 sorted_list = sorted(word_dict.keys(), key=lambda w: word_dict[w], reverse=True) result_set = set() result_set.update(sorted_list[:num]) return result_set
a2b497a9296b07cc0d8467a604b66587c0a45671
19,488
import os def stratis_link(pool_name, fs_name=None): """ Generate the stratis symlink for the pool and optionally for FS :param pool_name: :param fs_name: :return: Full path and name to symlink """ fs_path = os.path.join(os.path.sep + "stratis", pool_name) if fs_name: fs_path = os.path.join(fs_path, fs_name) return fs_path
30645de44fa3209b737c69f075f86bfae46e3db7
19,489
from typing import Any from unittest.mock import Mock def mock_response(status: int = 200, content: str = "CONTENT", data: Any = None) -> Mock: """A helper function to mock a responses with the given arguments. Arguments: status: Status code of response. content: Content value of response. data: The return value of response. Returns: Mocker of the response. """ response = Mock() response.status_code = status response.content = content if data: response.json = Mock(return_value=data) return response
64fdf0c44e964a5a74047bd84653fca40bd133f9
19,490
import requests def get_keywords(text): """Get keywords that relate to this article (from NLP service) Args: text (sting): text to extract keywords from Returns: [list]: list of extracted keywords """ extracted_keywords = [] request = {'text': text} nlp_output = requests.post('http://localhost:8081/', json=request) json_output = nlp_output.json() if 'error' in json_output: raise Exception(json_output['error']['message']) for keyword in json_output["tokens"]: extracted_keywords.append(keyword["lemma"]) return extracted_keywords
3130034d2418a61f236b9b8f42de4656106d50d5
19,491
from typing import Counter import os def train_count(path): """ Extracts training data from corresponding files. """ vocab = set() legit_terms = Counter() # counter for number of each term in legit docs spam_terms = Counter() # counter for number of each term in spam docs legit_doc_freq = Counter() # counter for document frequencies among legit docs spam_doc_freq = Counter() # counter for document frequencies among spam docs legit_docs = 0 # number of legit documents spam_docs = 0 # number of spam documents path_legit = path + "/training/legitimate" for root, dirs, filenames in os.walk(path_legit): for filename in filenames: if filename.endswith('.txt'): filepath = path_legit + "/" + filename file = open(filepath,"r") text = file.read().split() # read and split the email legit_doc_freq.update(set(text)) # update document frequency of each word legit_terms.update(text) # update number of each term vocab = vocab | set(text) # update the vocabulary legit_docs += 1 file.close() path_spam = path + "/training/spam" for root, dirs, filenames in os.walk(path_spam): for filename in filenames: if filename.endswith('.txt'): filepath = path_spam + "/" + filename file = open(filepath,"r") text = file.read().split() # read and split the email spam_doc_freq.update(set(text)) # update document frequency of each word spam_terms.update(text) # update number of each term vocab = vocab | set(text) # update the vocabulary spam_docs += 1 file.close() return (legit_terms, spam_terms, legit_doc_freq, spam_doc_freq, legit_docs, spam_docs, vocab)
3f08afd6c7ad213649188c057d7147868303a48a
19,492
def _reverse_input_einsum_eq(equation): """ Reverse the input order of the einsum eqaution e.g.: input : "nchw,nwhu->nchu" returns : "nwhu,nchw->nchu" """ input_output_strings = equation.split('->') assert len(input_output_strings) == 2, "invalid equation" input_strings = input_output_strings[0].split(',') assert len(input_strings) == 2, "invalid equation" equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1] return equation
f8c2900b6592f04fdc72b85c5ffdaba4b3b34952
19,493
def instantiate(f): """ Decorator that replaces type returned by decorated function with instance of that type and leaves it as is if it is not a type. """ def decorated(*args, **kwargs): r = f(*args, **kwargs) return r() if (isinstance(r, type)) else r return decorated
68213a110d385412e7c57349c86f04e3e96005db
19,494
def select_regions(localqc_table, genomeinfo, resolution, elongation=150, maxdisp=2.5): """ Select regions to display based on their dispersion and their read count intensity :param localqc_table: LocalQC regions file :param genomeinfo: Dictionary of chromosomes' size :param resolution: Total size of the desired regions (bp) :param elongation: Length (bp) to stretch the region in both directions :param maxdisp: Dispersion filtering (the lower the better) :return: A list of regions and the total number of reads in all selected regions """ regions = {} total_reads = [] with open(localqc_table) as f: for line in f: cols = line.rstrip().split('\t') chrm = cols[0] # Dispersion 10% for 50% sampling # Use the new localqc table format (10 columns instead of 13)! disp = abs(float(cols[9])) if disp <= maxdisp and not chrm.lower().startswith(('chrx', 'chry', 'chrm')): total_reads.append(int(cols[3])) bin_start = int(cols[1]) start = bin_start - resolution / 2 - elongation end = int(cols[2]) + resolution / 2 + elongation # Shift region start/end if it's outside of the chromosome if 0 <= start < end <= genomeinfo[chrm]: # Region entirely in the chromosome if chrm not in regions: regions[chrm] = [] # Store chrm, start, end, reads, bin_start regions[chrm].append([start, end, int(cols[3]), bin_start]) return regions, total_reads
adc1fd762165fd7d6170605ac11be92f370baeb1
19,496
def make_word_list(): """Read the words in words.txt and return a list that contains the words.""" with open('words.txt') as fin: word_list = [] for line in fin: word = line.strip().lower() word_list.append(word) return word_list
c47353440e92f83fe10d70060a7076e47e5265b6
19,497
def halosInMassRange(massColumn, minMass, maxMass, VERBOSE=True): """ Returns the selection array which has 'True' values for halos with masses in the range 'minMass' to 'maxMass'. The masses are given by the argument 'massColumn'. """ if VERBOSE: print( "Selecting the halos with masses in the interval [%e,%e] ... " % (minMass, maxMass) ) return (massColumn >= minMass) * (massColumn <= maxMass)
361e7faffaf2133e222859fdbb8431704af52099
19,498
from typing import Optional import os def parse_name(path: str, root: 'Optional[str]' = None) -> str: """Generate a name for a python module based on the given path. :param path: The path to parse into a name :param root: A root path for this file :return: A name for a python module at the given path """ name = os.path.splitext(os.path.normpath(os.path.relpath(path, root)))[0] return name.replace(os.sep, ".")
fbb1481ff1e3029fab37e547287c0809a91efb86
19,500
def set_attributes_to_descend(meta, traverse_limit): """Set which attributes should have values inferred from ancestral taxa.""" desc_attrs = set() desc_attr_limits = {} for key, value in meta.items(): if "traverse" in value and value["traverse"]: if "traverse_direction" not in value or value["traverse_direction"] in ( "down", "both", ): desc_attrs.add(key) if "traverse_limit" in value: desc_attr_limits.update({key: value["traverse_limit"]}) else: desc_attr_limits.update({key: traverse_limit}) return desc_attrs, desc_attr_limits
545d9e5489441bb5990bb8966aa7c42cd9f26b91
19,501
def normalize_team_names(column, dataset, fname): """ Team names need to be converted from labels to numbers to make it work for sklearn (LabelEncoder is not possible since the normalization needs to be done against different naming conventions - e. g. Man United in the csv file vs Manchester United used by UnibetAPI causes issues) - in contrary to the method in main.py, this method requires to normalize team names in the dataset """ filename = "teams/" + fname + ".txt" for row in range(len(dataset[column])): line_num = 1 with open(filename) as uniform_team_names: for line in uniform_team_names: if dataset.iloc[row, dataset.columns.get_loc(column)] in line: break line_num += 1 dataset.iloc[row, dataset.columns.get_loc(column)] = line_num return dataset
a9cceb07a0cc6d2fab4a3ce33f4d2d4a47644403
19,502
def memory_upscale(df): """ Converts float32 and float 16 to float64 to make transforming possible. Some python functions do not work with float16 """ float_columns = list(df.select_dtypes( include=['float64', 'float32', 'float16']).columns) for column in float_columns: df[column] = df[column].astype('float64') int_columns = list(df.select_dtypes( include=['int64', 'int32', 'int16']).columns) for column in int_columns: df[column] = df[column].astype('int64') return df
1b3794a438bc6cf7016e937d880c92572c931035
19,503
def get_P_Elc_audio_microsystem_with_md_listening(P_Elc_audio_microsystem_with_md_rtd): """聴取時の消費電力を計算する Parameters ---------- P_Elc_audio_microsystem_with_md_rtd : float 定格消費電力, W Returns ---------- P_Elc_audio_microsystem_with_md_listening : float 聴取時消費電力, W """ P_Elc_audio_microsystem_with_md_listening = \ 0.4 * P_Elc_audio_microsystem_with_md_rtd return P_Elc_audio_microsystem_with_md_listening
20e2c58ef3c019236e131d637e19f0a50acc55de
19,505