content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def extract_media(request): """Extract the media from a flask Request. To avoid race conditions when using greenlets we cannot perform I/O in the constructor of GcsObject, or in any of the operations that modify the state of the service. Because sometimes the media is uploaded with chunked encoding, we need to do I/O before finishing the GcsObject creation. If we do this I/O after the GcsObject creation started, the the state of the application may change due to other I/O. :param request:flask.Request the HTTP request. :return: the full media of the request. :rtype: str """ if request.environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": return request.environ.get("wsgi.input").read() return request.data
298bb7c5e4dddf02e12c398d55b6d388354a279e
11,769
from datetime import datetime def time_string_one(float_time=None, fmt=None): """ Transform a single float daytime value to string. Parameters ---------- float_time : float, optional Input time. The default is None, which returns the time now. fmt : float, optional Time format. The default is None, which uses '%Y-%m-%d %H:%M:%S.%f'. Returns ------- str Datetime as string. """ if fmt is None: fmt = '%Y-%m-%d %H:%M:%S.%f' if float_time is None: str_time = datetime.now().strftime(fmt) else: str_time = datetime.utcfromtimestamp(float_time).strftime(fmt) return str_time
aa74d63300100ae6a6d1225c36269d4029b5b26d
11,770
def _indent(level): """Returns leading whitespace corresponding to the given indentation `level`. """ indent_per_level = 4 return ' ' * (indent_per_level * level)
4391c308b59db321ef3f810c73b66e35d44566fa
11,771
import os import fnmatch def find_matches(sdir,pattern): """ Walk sdir and find all files and dirs the match given pattern. """ matches = [] for root, dirs, files in os.walk(sdir): all = [] all.extend(dirs) all.extend(files) for item in fnmatch.filter(all, pattern): matches.append(os.path.join(root, item)) return matches
ffffcc906043606618e5ca7c2f1bee2e8243a916
11,772
def get_lr(optimizer): """Assume that the optimizer has single lr""" lr = optimizer.param_groups[0]["lr"] return lr
84a78839e5dfb37214ac5f785ae60813eb6bae23
11,774
def decode_dram(site): """ Decode the modes of each LUT in the slice based on set features. Returns dictionary of lut position (e.g. 'A') to lut mode. """ lut_ram = {} lut_small = {} for lut in 'ABCD': lut_ram[lut] = site.has_feature('{}LUT.RAM'.format(lut)) lut_small[lut] = site.has_feature('{}LUT.SMALL'.format(lut)) di = {} for lut in 'ABC': di[lut] = site.has_feature('{}LUT.DI1MUX.{}I'.format(lut, lut)) lut_modes = {} if site.has_feature('WA8USED'): assert site.has_feature('WA7USED') assert lut_ram['A'] assert lut_ram['B'] assert lut_ram['C'] assert lut_ram['D'] lut_modes['A'] = 'RAM256X1S' lut_modes['B'] = 'RAM256X1S' lut_modes['C'] = 'RAM256X1S' lut_modes['D'] = 'RAM256X1S' return lut_modes if site.has_feature('WA7USED'): if not lut_ram['A']: assert not lut_ram['B'] assert lut_ram['C'] assert lut_ram['D'] lut_modes['A'] = 'LUT' lut_modes['B'] = 'LUT' lut_modes['C'] = 'RAM128X1S' lut_modes['D'] = 'RAM128X1S' return lut_modes assert lut_ram['B'] if di['B']: lut_modes['A'] = 'RAM128X1S' lut_modes['B'] = 'RAM128X1S' lut_modes['C'] = 'RAM128X1S' lut_modes['D'] = 'RAM128X1S' else: assert lut_ram['B'] assert lut_ram['C'] assert lut_ram['D'] lut_modes['A'] = 'RAM128X1D' lut_modes['B'] = 'RAM128X1D' lut_modes['C'] = 'RAM128X1D' lut_modes['D'] = 'RAM128X1D' return lut_modes all_ram = all(lut_ram[lut] for lut in 'ABCD') all_small = all(lut_small[lut] for lut in 'ABCD') if all_ram and not all_small: return {'D': 'RAM64M'} elif all_ram and all_small: return {'D': 'RAM32M'} else: # Remaining modes: # RAM32X1S, RAM32X1D, RAM64X1S, RAM64X1D remaining = set('ABCD') for lut in 'AC': if lut_ram[lut] and di[lut]: remaining.remove(lut) if lut_small[lut]: lut_modes[lut] = 'RAM32X1S' else: lut_modes[lut] = 'RAM64X1S' for lut in 'BD': if not lut_ram[lut]: continue minus_one = chr(ord(lut) - 1) if minus_one in remaining: if lut_ram[minus_one]: remaining.remove(lut) remaining.remove(minus_one) if lut_small[lut]: lut_modes[lut] = 'RAM32X1D' lut_modes[minus_one] = 'RAM32X1D' else: lut_modes[lut] = 'RAM64X1D' lut_modes[minus_one] = 'RAM64X1D' if lut in remaining: remaining.remove(lut) if lut_small[lut]: lut_modes[lut] = 'RAM32X1S' else: lut_modes[lut] = 'RAM64X1S' for lut in remaining: lut_modes[lut] = 'LUT' return lut_modes
ea1b61b1e924c6637cc7a4fe5d523939d19edb5c
11,775
import ast def add(*arguments): """ Add function. """ return ast.Add(*arguments)
6e48ee2cd5a3caf34fdc9b8f6f72313ed85f93c6
11,777
def perpendicular_vector(v): """ Compute a vector perpendicular to the input vector """ # x = y = z = 0 is not an acceptable solution if v[0] == v[1] == v[2] == 0: raise ValueError("zero-vector") if v[2] == 0: return [-v[1], v[0], 0] else: return [1.0, 1.0, -1.0 * (v[0] + v[1]) / v[2]]
4920eda33090a509617058c9cf392a732726d517
11,778
def get_hover_data(df): """ Creates and formats the hover string over the map data points :param df: Pandas dataframe :return: list: A string of dataframe row information formatted for hover. """ details_labels = ["needothers","detailmed", "detailrescue"] hover_string_list = [] for index, row in df.iterrows(): info_string = row['location'] + "<br>" + "Phone:" +row['requestee_phone'] + "<br>" details_string_list = [] for i in details_labels: if row[i]: details_string_list.append(i + ":" + str(row[i]).strip()) details_string = "<br>".join(details_string_list) hover_string_list.append(info_string + details_string) return hover_string_list
cab8f2a08a3b16e254c8ebfa76196fcdcff06170
11,781
def join_with_oxford_comma(template, items, separator=','): """Joins template with oxford comma rule""" return template.format(f'{separator} '.join(items))
ef6f8fa3eaea899db36f5ca21b8e2771550d2ca0
11,782
def clip(threshold=0.5): """ clip the tensor """ dict_binarize = {'name': 'scale_range', 'kwargs': {'threshold': threshold} } return dict_binarize
5b2ccc5d543173e44c8a48eec054391e7332ca35
11,783
def find_stim_channel(raw): """ Finds the appropriate stim channel from raw. Heuristically just looks for STI101 or STI014. Parameters ---------- raw : mne.io.Raw The raw object Returns ------- str Channel name of the stimulus channel. """ channels = raw.info.get('ch_names') if 'STI101' in channels: return 'STI101' elif 'STI 101' in channels: return 'STI 101' elif 'STI 014' in channels: return 'STI 014' elif 'STI014' in channels: return 'STI014'
1a4014a09ef7050e90f29151b1ff56ac4d5abe89
11,784
def parse_chunk_header_file_range(file_range): """Parses a chunk header file range. Diff chunk headers have the form: @@ -<file-range> +<file-range> @@ File ranges have the form: <start line number>,<number of lines changed> Args: file_range: A chunk header file range. Returns: A tuple (range_start, range_end). The endpoints are adjusted such that iterating over [range_start, range_end) will give the changed indices. """ if ',' in file_range: file_range_parts = file_range.split(',') start = int(file_range_parts[0]) amount = int(file_range_parts[1]) if amount == 0: return (start, start) return (start - 1, start + amount - 1) else: return (int(file_range) - 1, int(file_range))
1af8c8750707c29171373ff4ed03de155d8a1cf1
11,785
from typing import Any def raise_if_incorrect_type(obj: object, expected_type: Any): """ general function to check obj type raise error when it is incorrect Parameters ---------- obj: Object any object expected_type: Any any class or type Returns ------- true_false: bool only return True when matched type Raises ------ TypeError """ if isinstance(obj, expected_type): return True raise TypeError(f"obj {str(obj)} is not an expected type {str(expected_type)}")
923636a3ef1c3fdefbe29bbf12d522ce84acedaf
11,787
def addOneContraints(d, Ca): """ Ajoute une conditions de Robinson qui ne sont pas encore satisfaites à Ca """ addedConstraints = False precision=0.0001 # print 'Contraintes ajoutees' for i in range(d.n-2): for j in range(i+2, d.n): if (d[i,j] < d[i,j-1] - precision) and ([i,j,i,j-1] not in Ca): # print [i,j,i,j-1] Ca.append([i,j,i,j-1]) addedConstraints = True return addedConstraints if (d[i,j] < d[i+1,j] - precision) and ([i,j,i+1,j] not in Ca): # print [i,j,i+1,j] Ca.append([i,j,i+1,j]) addedConstraints = True # print 'Ca in addConstraints\n', Ca # print 'addedConstraints\n', addedConstraints return addedConstraints return addedConstraints
d5f3f5ed9d214bac19c288f62a1ec478b02200f7
11,789
import re def remove_puncatuation(review_str:str)->str: """remove puncatuation of a string """ return re.sub(r'[^\w\s]', '', review_str)
99381b5b6573f0a20466c5de21d7a3b3c65f6ef8
11,792
from typing import Dict from typing import Any from typing import Optional from typing import Callable def try_get_from_dict(data: Dict[str, Any], key: str, original_value: Any, conversion: Optional[Callable[[Any], Any]] = None) -> Any: """Try to get value from dict, otherwise set default value""" if not key in data: return None value = data[key] if value is None: return original_value if conversion is None: return value return conversion(value)
354be2365af69dd5169aa3d74bf45cf8d095ed4d
11,794
import pathlib def get_root_dir(): """Return the root directory, as a :class:`pathlib.Path`. This tries to smart-detect the best location to host PythonUp. It tries `~/Library`, which likely only exists on Macs; if that does not exist, use the Linux standard `~/.local/share` instead. """ macos_library = pathlib.Path.home().joinpath('Library') if macos_library.exists(): return macos_library.joinpath('PythonUp') return pathlib.Path.home().joinpath('.local', 'share', 'pythonup')
583bd479367aabdc89eabd084fac41b39751a55e
11,796
def default_trans_model(): """Return the default transaction model.""" return { 'addresses': {'SingleLocation': {'city': 'Irvine', 'country': 'US', 'line1': '123 Main Street', 'postalCode': '92615', 'region': 'CA'}}, 'commit': False, 'companyCode': 'DEFAULT', 'currencyCode': 'USD', 'customerCode': 'ABC', 'date': '2017-04-12', 'description': 'Yarn', 'lines': [{'amount': 100, 'description': 'Yarn', 'itemCode': 'Y0001', 'number': '1', 'quantity': 1, 'taxCode': 'PS081282'}], 'purchaseOrderNo': '2017-04-12-001', 'type': 'SalesInvoice'}
e55c453be31a4309f304f29081dfb109932ec28c
11,797
from typing import Optional import re def split_str(source, sep: Optional[str] = None): """ warning: does not yet work if sep is a lookahead like `(?=b)` usage: >> splitStr('.......A...b...c....', sep='...') <generator object splitStr.<locals>.<genexpr> at 0x7fe8530fb5e8> >> list(splitStr('A,b,c.', sep=',')) ['A', 'b', 'c.'] >> list(splitStr(',,A,b,c.,', sep=',')) ['', '', 'A', 'b', 'c.', ''] >> list(splitStr('.......A...b...c....', '\.\.\.')) ['', '', '.A', 'b', 'c', '.'] >> list(splitStr(' A b c. ')) ['', 'A', 'b', 'c.', ''] """ sep = sep or "\s+" if sep == '': return iter(source) # return (_.group(1) for _ in re.finditer(f'(?:^|{sep})((?:(?!{sep}).)*)', source)) # alternatively, more verbosely: regex = f'(?:^|{sep})((?:(?!{sep}).)*)' for match in re.finditer(regex, source): yield match.group(1)
b82d1693cd1fc14bec2c566408b100fda6a1a27c
11,799
def memoize(obj): """Memoize function parameters and their output.""" cache = {} def memoizer(*args, **kwargs): if args not in cache: cache[args] = obj(*args, **kwargs) return cache[args] return memoizer
2960010d1cc3f2851e28056ed57694a63ce33dcc
11,800
def as_float(s): """Returns a float from a string """ if not s: return 0.0 return float(s)
de5e2074b19f723b36c676f20904fa53a4878aa5
11,801
from typing import Callable from typing import Iterable from typing import Dict def count_valid_passports( validation_func: Callable, passports: Iterable[Dict[str, str]] ) -> int: """count the valid passports (those who have all keys""" return sum(map(validation_func, passports))
230fd67e9e3bc2b999a5b44141d51f7507de4062
11,802
import os def get_file_path_from_import_definition(import_definition: str) -> str: """ Determines the file path of an imported file from a Python import definition. :param import_marker: The line which contains the Python import definition. :returns: The file path of the imported file. """ path = import_definition.split(" ")[1].replace(".", "/") path = f"{os.path.dirname(path)}" if path == "/": path = "" filename = import_definition.split(" ")[1].split(".")[-1] filename = f"{filename}.py" import_path = f"{path}/{filename}" return import_path
9da0b336ab29e61462fc2a8c9fdcb22475656e36
11,803
import json def json_object_to_str(json_obj): """ json对象序列化为json串 :param json_obj: json对象 :return: 序列化的json串 """ return json.dumps(json_obj, ensure_ascii=False)
935ebc10d412c803d52493cd648cc0c010adb07d
11,804
def power_of(x): """create a inner function to return the power of x""" def inner_func(y): return x**y return inner_func
69900b32c50c73365cf6c492f7c0e3b2f5641e01
11,806
def get_positions(console): """ Compute a dictionary keyed by button names with horizontal alignment, and column/row location to show each choice on the EV3 LCD console. Parameter: - `console` (Console): an instance of the EV3 Console() class returns a dictionary keyed by button names with column/row location """ midrow = 1 + console.rows // 2 midcol = 1 + console.columns // 2 # horiz_alignment, col, row return { "up": ("C", midcol, 1), "right": ("R", console.columns, midrow), "down": ("C", midcol, console.rows), "left": ("L", 1, midrow), "enter": ("C", midcol, midrow) }
ba9df0c2ea189e146d5cf123354c283dd6e2b280
11,807
def generate_patch_transfer_job_message(messages, job): """Generates Apitools patch message for transfer jobs.""" project_id = job.projectId job.projectId = None if job.schedule == messages.Schedule(): # Jobs returned by API are populated with their user-set schedule or an # empty schedule. Empty schedules cannot be re-submitted to the API. job.schedule = None return messages.StoragetransferTransferJobsPatchRequest( jobName=job.name, updateTransferJobRequest=messages.UpdateTransferJobRequest( projectId=project_id, transferJob=job, updateTransferJobFieldMask=( 'description,notification_config,schedule,status,transfer_spec'), ))
5330f3e248ad57b69045b54f1c46c32219d1dde5
11,810
import logging def parse_requirements(file_): """Parse a requirements formatted file. Traverse a string until a delimiter is detected, then split at said delimiter, get module name by element index, create a dict consisting of module:version, and add dict to list of parsed modules. Args: file_: File to parse. Raises: OSerror: If there's any issues accessing the file. Returns: tuple: The contents of the file, excluding comments. """ modules = [] # For the dependency identifier specification, see # https://www.python.org/dev/peps/pep-0508/#complete-grammar delim = ["<", ">", "=", "!", "~"] try: f = open(file_, "r") except OSError: logging.error("Failed on file: {}".format(file_)) raise else: try: data = [x.strip() for x in f.readlines() if x != "\n"] finally: f.close() data = [x for x in data if x[0].isalpha()] for x in data: # Check for modules w/o a specifier. if not any([y in x for y in delim]): modules.append({"name": x, "version": None}) for y in x: if y in delim: module = x.split(y) module_name = module[0] module_version = module[-1].replace("=", "") module = {"name": module_name, "version": module_version} if module not in modules: modules.append(module) break return modules
958ff91957667e3783e9971b2f2d0c04e4cd2a61
11,811
def duplicate(somelist): """ biore binary search na srodku listy sprawdzam :param somelist: :return: """ emptylist = [] for i in somelist: if len(emptylist) == 0: emptylist.append(i) else: if i in emptylist: return i emptylist.append(i)
aad005e7a2b2fe2f4874673dd93ad6eaf982b13a
11,813
def get_handler_filename(handler): """Shortcut to get the filename from the handler string. :param str handler: A dot delimited string representing the `<module>.<function name>`. """ module_name, _ = handler.split('.') return '{0}.py'.format(module_name)
4e6d464e83e4a6557d03ebabbb4f275fc4784d8f
11,814
def parseEPSGCode(string, parsers): """ parse EPSG code using provided sequence of EPSG parsers """ for parser in parsers: epsg = parser(string) if epsg is not None: return epsg return None
b85e2d69952cc16d7f5f3b9e22009d14432a014f
11,816
import re def get_value_for_key(text_buffer, key): """Parsing value from a line with key=value""" for match in re.finditer("%s=(?P<value>.*)" % key, text_buffer): return match.group('value') return ""
86865f806adc3fa5f78671d8a686389cc5f0f353
11,817
import pathlib def _is_valid_doc_version(folder: pathlib.Path) -> bool: """ Test if a version folder contains valid documentation. A version folder contains documentation if: - is a directory - contains an `index.html` file """ if not folder.is_dir(): return False if not (folder / "index.html").exists(): return False return True
5dadea657b717373e8e84360f1f3a9e8b8adc1f8
11,819
def gen_explicit_map_one_delta(params_pt, args_pt, k_ms, k_pt): """ Generate explicit_map for which include mapping relationship is `1 - k_ms = k_pt`. Args: params_pt (dict): Params for APIPt. args_pt (dict): Args for APIPt. Returns: dict, map between frames. """ value = args_pt[k_pt] if k_pt in args_pt else params_pt[k_pt] value = value.strip() def is_number(string): try: float(string) return True except ValueError: return False if is_number(value): return {k_ms: str(1 - float(value))} return {k_ms: "1.0 - " + value}
841d81c0095dadf7e62c32fca5a3d9e6007b6032
11,820
def strip_comments(l: str) -> str: """ Strip any ``#`` comments from a line. :param str l: A string line, which may contain ``#`` comments :return str clean_line: The line ``l`` - stripped of any comments and excess whitespace. """ return l.split('#', 1)[0].strip()
0587cb5e2a986c9d0bb4c610c289186b8c184942
11,821
def shengbte_code(aiida_local_code_factory): """Get a shengbte code. """ shengbte_code = aiida_local_code_factory(executable='diff', entry_point='shengbte') return shengbte_code
de9f6bba763c4bfe7f6a1153927f73542a83dc6a
11,822
import mimetypes def get_content_type(content, name): """ Checks if the content_type is already set. Otherwise uses the mimetypes library to guess. """ if hasattr(content.file, "content_type"): return content.file.content_type else: mime_type, encoding = mimetypes.guess_type(name) return mime_type
838d11c2660e0d76c813961498ab0ebcc25c1f3c
11,823
def in_or_none(x, L): """Check if item is in list of list is None.""" return (L is None) or (x in L)
bc3e4ef5a8daf7669e7430940e361d4c7ec1a240
11,825
def _clean_timeformat(text): """returns an ISO date format from a funny calibre date format""" if text.endswith("+00:00"): text = text[:-6] + "+0000" if text[10] == " ": text = text.replace(" ", "T", 1) return text
03f908e8cc85485496d627248e9dfce2e2ab08f2
11,827
from io import StringIO def image2str(img): """Converts PIL Image object to binary data. @param img: PIL Image object @return: binary data """ f = StringIO() img.save(f, "JPEG") return f.getvalue()
0ba3958475018f30ccc25a23d80f384dd75552c7
11,828
def apply(R,point): """Applies the rotation to a point""" return (R[0]*point[0]+R[3]*point[1]+R[6]*point[2], R[1]*point[0]+R[4]*point[1]+R[7]*point[2], R[2]*point[0]+R[5]*point[1]+R[8]*point[2])
f6f4e5431e92dac9711f80da9ca0dc75bae793fb
11,829
def auto_delete_file_on_change(sender, instance, **kwargs): """ Deletes old file from filesystem when corresponding `MediaFile` object is updated with new file. """ if not instance.pk: return False
903224f8070f61055e4068843eed0bb3360c94b6
11,830
def update_sub(orig_df, pivot_col=None, pivot_val=None, new_col=None, new_val=None): """ update a subset of DataFrame based on fixed boolean matching. """ out_df = orig_df.copy() out_df.loc[out_df[pivot_col] == pivot_val, new_col] = new_val return out_df
ee52c8bbc9c25e5198093e9f8435725acfb98170
11,831
def buffer(ft): """ Buffer an input feature by 10km """ return ft.buffer(10000).bounds(5)
2eb4eeadbbd47b2527c9ac63f64e21ae3593560c
11,832
def alpha_jacobian(**opts): """The jacobian of the change of variables between virtuality and the alpha parameter of the rescaling collinear mapping. """ Q = opts['Q'] pC = opts['pC'] qC = opts['qC'] return Q.dot(qC)/Q.dot(pC)
ce83140c2266dcfdacc9753dde6d9aca5fae53f2
11,833
def has_new_triggerword(predictions, chunk_duration, feed_duration, threshold=0.5): """ Function to detect new trigger word in the latest chunk of input audio. It is looking for the rising edge of the predictions data belongs to the last/latest chunk. Argument: predictions -- predicted labels from model chunk_duration -- time in second of a chunk feed_duration -- time in second of the input to model threshold -- threshold for probability above a certain to be considered positive Returns: True if new trigger word detected in the latest chunk """ predictions = predictions > threshold chunk_predictions_samples = int(len(predictions) * chunk_duration / feed_duration) chunk_predictions = predictions[-chunk_predictions_samples:] level = chunk_predictions[0] for pred in chunk_predictions: if pred > level: return True else: level = pred return False
4a3d7b82e94fac7ee26b18dd848da4ef79378d30
11,834
def _password_repr(val: str) -> str: """Change representation of password to hide its content.""" del val return "'********'"
f7e5d653e874b023888d2f13502fc2ff9d894161
11,835
def beaufort(wind): """ https://en.wikipedia.org/wiki/Beaufort_scale """ if wind >= 32.7: return "12 hurricane" elif wind > 28.5: return "11 violent storm" elif wind > 24.5: return "10 violent storm" elif wind > 28.5: return "9 violent storm" elif wind > 17.2: return "8 gale" elif wind > 13.9: return "7 high wind" elif wind > 10.8: return "6 strong breeze" elif wind > 8.0: return "5 fresh breeze" elif wind > 5.5: return "4 modere breeze" elif wind > 3.4: return "3 gentle breeze" elif wind > 1.6: return "2 light breeze" elif wind > 0.3: return "1 light air" else: return "0 calm"
4fdbe110ea6b94bdeccbc5c1a4ad39950d35c479
11,836
import pandas def stackDF(df, column_names = ["column1","column2","values"], symmetric = True ): """Stacks similarity dataframe by return column1, column2, values df Args: df: pandas data frame column_names: names of return df (default ["column1","column2","values"] ) symmetric (boolean): true if symmetric and remove duplicates (default = True) Returns: pandas data frame: column1, column2, values df """ n_rows = df.shape[0] n_cols = df.shape[1] column1 = [] column2 = [] values = [] for column_name in df.columns: column1 += n_rows*[column_name] column2 += list(df.index) values += list(df[column_name]) assert len(values) == n_rows*n_cols column_values = [column1, column2, values] dic_stacked = {k: v for (k,v) in zip(column_names, column_values)} df_stacked = pandas.DataFrame(dic_stacked, columns = column_names) if symmetric: # remove duplicates if symmetirc # create duplicate column (combination of column 1 and column 2) # first concatenate, then create to list, and sort, convert to string df_stacked["dup_column"] = df_stacked[column_names[0]] + "@" + df_stacked[column_names[1]] df_stacked["dup_column"] = df_stacked["dup_column"].map(lambda x: str(sorted(x.split("@")))) df_stacked.drop_duplicates(['dup_column'], inplace=True) # drop column df_stacked.drop('dup_column', axis=1, inplace=True) # check if remove one half of symmetric matrix is ok assert (len(values) - len(df.columns))/2 + len(df.columns) == df_stacked.shape[0] return df_stacked
d328a77f9bcf411a5ed1dbdd7f7328dcc675c23c
11,837
import subprocess def rsync_file(infile, outdir): """Rsyncs a file from the correlator machines to dsastorage. Parameters ---------- infile : str The sourcefile string, e.g. 'corr01.sas.pvt:/home/user/data/fl_out.1.5618974' outfile : str The destination string, e.g. '/home/user/data/' Returns ------- str The full path to the rsynced file in its destination. """ command = '. ~/.keychain/lxd110h23-sh ; rsync -avvP --inplace {0} {1}'.format(infile, outdir) process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True ) proc_stdout = str(process.communicate()[0].strip()) print(proc_stdout) fname = infile.split('/')[-1] return '{0}{1}'.format(outdir, fname)
6d631685b34077c15ba1aa4ace1c1ed277cbbdbc
11,838
def format_cpus(n): """Formats n as a number of CPUs""" return '{:.1f}'.format(n)
f28f8d24826341aab70347679920dc68f0b394b0
11,839
def from_str_to_bool(string): """ # Notes converts string to boolean value. # Arguments - string: string, either one of these values: "true", "false" """ res = string.lower() if res == 'true': return True else: return False
fea84bdebf9363d754d61755c7a8589a0b2b4950
11,841
def callback(f, *a): """ Vraci funkci f s nastavenym parametrem a. Tato funkce umi osetrit i vice nez jeden parametr. Pouziti: callback(funkce, arg1, arg2, ..., argn ) """ return lambda: f(*a)
a08188ce01bf887fe7c8c58c300c0ca63ea6fc82
11,842
def _intersect_items(baselist, comparelist): """Return matching items in both lists.""" return list(set(baselist) & set(comparelist))
393e31d13dc63656167505068bcb6fee199d4c06
11,843
def bin_to_int(bin_list, inverse=True): """ Given arbitrary length list of binary values, calculate integer value. Parameters ---------- bin_list : list of int List of binary values for calculating integer value. inverse : bool, optional If true (default) caluclate integer from bin_list assuming first element is LSB. If false, integer will be calculated from bin_list assuming first element is MSB. Returns ------- int Integer value calculated from bin_list. """ if inverse: step = -1 else: step = 1 bin_list = bin_list[::step] bin_int = 0 for bit in bin_list: bin_int = (bin_int << 1) | bit return int(bin_int)
0ab74bafe4d045081732f10733704d7bbeb4a9ea
11,847
def get_chem_names(particles): """ Create a list of chemical names for the dispersed phase particles Reads the composition attribute of each particle in a `particles` list and compiles a unique list of particle names. Parameters ---------- particles : list of `Particle` objects List of `SingleParticle`, `PlumeParticle`, or `bent_plume_model.Particle` objects describing each dispersed phase in the simulation Returns ------- chem_names : str list List of the chemical composition of particles undergoing dissolution in the `particles` list """ # Initialize a list to store the names chem_names = [] # Add the chemicals that are part of the particle composition for i in range(len(particles)): if particles[i].particle.issoluble: chem_names += [chem for chem in particles[i].composition if chem not in chem_names] # Return the list of chemical names return chem_names
7b1a462732b6bdd389fb0d9c2b80aab5a5f385d5
11,853
import requests def get_APOD(date): """This will do the request on the NASA api""" payload = 'https://api.nasa.gov/planetary/apod' params = {'api_key' : 'DEMO_KEY', 'date' : date} response = requests.get(payload, params = params) return response.json()
45cdbadd7c67a71033e506fdc8a2358e8f7d8c09
11,854
import re def replace_with_dict( s, d ): """ replaces the substrings in the dictionary keys with their values Parameters ---------- s: string d: dict Returns ------- string Examples -------- >>>s1 = "asdf qwer" >>>d1 = { 'asdf', 'foo' } >>>d2 = { 'qwer', 'bar' } >>>replace_with_dict( s1, d1) "foo qwer" >>>replace_with_dict( s1, d2) "asdf bar" """ if s is None: return None pattern = re.compile( r'\b(' + '|'.join( d.keys() ) + r')\b' ) return pattern.sub( lambda x: d[ x.group() ], s )
c7e185860eb3e13f65cdf614301c1a5afed894f9
11,855
import numpy as np def mape(predictions, actuals): """ Calcuate Mean Absoute Percentage Error (MAPE) Reported as percentage Parameters ---------- predictions: numpy array of predicted values actuals: numpy array of actual values Returns ------- Returns float of calculated MAPE """ result = np.mean(np.abs((actuals - predictions)/actuals)) * 100 return result
560b5960cfdf870b2d9954419140d85d3d8a5a27
11,856
def env_cfg_path() -> str: """Get a L5 environment config file from artefacts. Note: the scope of this fixture is "session"-> one per test session :return: the L5Kit gym-compatible environment config python dict """ env_cfg_path = "./l5kit/tests/artefacts/gym_config.yaml" return env_cfg_path
d762e1ee39a54fab6cd3f00961a552801eeddcc6
11,857
def to_timestamp(value): """ Convert datetime to a timestamp string MediaWiki would understand. :type value: datetime :rtype str """ # datetime.isoformat() wouldn't work because it sometimes produces +00:00 # that MW does not support. # Also perform sanity check here to make sure this is a UTC time if value.tzinfo is not None and value.tzinfo.utcoffset(value): raise ValueError('datetime value has a non-UTC timezone') return value.strftime('%Y-%m-%dT%H:%M:%SZ')
4acb92d65060ada4e0d783c1eef5b108bd81f88f
11,858
def provide_help(): """ send an api syntax error status and show the correct usage of the api Return: return a dictionary with keys 'syntax', 'status' , 'description' specifying error & help """ # reference syntax syntax_str = 'HOST:PORT/geocode?address=ADDRESS' # description of parameters description_str = 'Use the correct syntax in which, ' description_str += ' HOST is the host server ip address - ' description_str += ' PORT is the port to use - ' description_str += ' ADDRESS is the address to be geocoded.' # result dictionary result = {'syntax': syntax_str, 'status': 'api syntax error', 'description': description_str} # return help result return result
cf76a136d6b7e64a06f8a740972a391d8945b03d
11,860
def rmask_byte(num_mask_bits,value): """ This function applies a right-justified mask of a specified number of bits to an unsigned integer representing a single byte value. >>> rmask_byte(3,list(b'\\xff')[0]) 7 >>> bin(7) '0b111' """ return ((1<<(num_mask_bits))-1) & value
156fe1797590c5418a28db13b1064dfdb4e7bc4c
11,861
def verify_token(token: str) -> str: """ Mock google and fb auth. :param token: email :return: email """ return token.strip()
806a6e5cb65bf6919afda7cc13bc5fa8e521a750
11,862
def data_topology_get(): # noqa: E501 """data_topology_get returns topology.Topology # noqa: E501 :rtype: TopologyTopology """ return 'do some magic!'
3fecb83fcea17cc8c963d3c47dd69416a0455838
11,863
def printif(string: str, pre_sep: str = " ", post_sep: str = " ") -> str: """Print `string` if `string` is not null.""" if string not in [None, ""]: return f"{pre_sep}{string}{post_sep}" return ""
cfd7272ae106fda7c0f4e6705875a3656d90d52c
11,864
import argparse def sliding_tokenize(article: str, tokenizer, args: argparse.Namespace, task=None) -> tuple: """Tokenization function for sliding window approach.""" sentences = tokenizer(article, max_length=args.max_seq_length, truncation=True, stride=args.stride, return_overflowing_tokens=True, padding=True) return sentences["input_ids"], sentences["attention_mask"]
425947c0a44f64c3bde0d3c725b015d636354df6
11,865
from io import StringIO def GetLines(data, strip=False): """Returns a list of all lines in data. Args: strip: If True, each line is stripped. """ ret = StringIO(data).readlines() if strip: ret = [x.strip() for x in ret] return ret
8ce3a3e829eed590ba0978a659ff1d3b063dc50d
11,866
def has_an_update(page_data: dict, tracker_data: dict) -> bool: """Checks if there was an update comparing two story mappings of the same story. Arguments: page_data {dict} -- Requested story mapping, from `get_story_data`. tracker_data {dict} -- Story mapping from the tracked list. Returns: bool -- Whether or not there was an update. """ for key in ("words", "chapter-amt", "last-update-timestamp"): if page_data[key] > tracker_data[key]: return True return False
a7374a167c368f85bdab6ec68dbea044c801ef90
11,869
def meaning(of=None): """The meaning of life?""" if of == "life": return 42 return None
ee85e2cd0caa1c0780e11eb3759b8e0d74031209
11,870
import codecs import json from typing import OrderedDict def _load_json_result(result_file): """ Load the result file as utf-8 JSON and strip test_dir prefix from locations. Sort the results by location. """ with codecs.open(result_file, encoding='utf-8') as res: scan_result = json.load(res, object_pairs_hook=OrderedDict) if scan_result.get('scancode_version'): del scan_result['scancode_version'] scan_result['files'].sort(key=lambda x: x['path']) return scan_result
c313a2bbf000cb3e3b48ea9314f48188243ecf9b
11,871
from typing import Callable from typing import Any from typing import OrderedDict def multimap(f: Callable, *xs: Any) -> Any: """ Each x in xs is a tree of the same structure. Apply f at each leaf of the tree with len(xs) arguments. Return a tree of the same structure, where each leaf contains f's return value. """ first = xs[0] if isinstance(first, dict) or isinstance(first, OrderedDict): assert all(isinstance(x, dict) or isinstance(x, OrderedDict) for x in xs) assert all(x.keys() == first.keys() for x in xs) return {k: multimap(f, *(x[k] for x in xs)) for k in sorted(first.keys())} else: return f(*xs)
7c81deab0875b2396a2127b504a4ce8773ab356c
11,873
def get_atom_target_index(target_index, input_index, resindices): """get target indices of input atom Arguments: target_index {array} -- target(native) residue index input_index {array} -- input residue index resindices {array} -- target residue index of input atoms Returns: array -- target indices of input atom """ dic = {} for i,j in zip(input_index, target_index): dic[i] = j atom_target_residue_index = [] for index in resindices: atom_target_residue_index.append(dic[index]) return atom_target_residue_index
5fd414cc147e1fe5c6c51b6c6e655a11b8bec8db
11,875
def array_from_where(process, where): """Get array of scannables from provided parameter""" if where == 'all': return process.pages if where == 'pages': return process.pages if where == 'modules': return process.modules if not where is None: return [module for module in process.modules if module.get_name().lower() == where.lower()] return process.pages
9e312cbd2b890ec976ba42f524ee57d501c30efa
11,876
def inside_string(i, j, string_ranges): """Returns true if the range described by i and j is contained within a range in the list string_ranges""" for s, e in string_ranges: if i >= s and j <= e: return True return False
aea2ac8a777914c2e86aea40cf43f4d01b2ef8b0
11,877
def cast_ext(ext: str) -> str: """Convert ext to a unified form.""" ext = ext.lower() if ext == 'jpeg': ext = 'jpg' return ext
7b4894e035a3b785b017c9794e494ff7b78e7c1b
11,879
def filter_reply_msg(top_indices, top_cosine, cosine_cut_off, count_cutoff): """ Filter out responses 1. cosine similarity < cosine_cut_off 2. too short replies (word count <= count_cutoff) 3. replies contain other special characters :type pandas DataFrame: response_df :type float: cosine_cut_off :type int: count_cutoff :rtype list: filtered_msg :rtype list: filtered_msg_cosine :rtype int: count """ filtered_msg = [] filtered_msg_cosine = [] count = 0 if top_cosine > cosine_cut_off: count = 0 else: filtered_msg = top_indices filtered_msg_cosine.append(top_cosine) count += 1 return filtered_msg, filtered_msg_cosine, count
4915c71f64cc2a676a3f7da458a73ff8a165aee0
11,881
def low_frequency_accuracy(sent_test_tags, sent_pred_tags, counter): """ sent_test_tags: # [[{'start_idx': 7, 'end_idx': 12, 'text': '東芝キヤリア'}, {'start_idx': 14, 'end_idx': 19, 'text': 'ダイキン工業'}], [{'start_idx': 0, 'end_idx': 1, 'text': '東芝'}, {'start_idx': 27, 'end_idx': 32, 'text': 'ダイキン工業'}]] sent_pred_tags: [[{'start_idx': 14, 'end_idx': 19, 'text': 'ダイキン工業'}], [{'start_idx': 0, 'end_idx': 1, 'text': '東芝'}, {'start_idx': 27, 'end_idx': 32, 'text': 'ダイキン工業'}]] counter = {'1': ['東芝キヤリア', ''東芝''], '2': ['ダイキン工業']} total is calculated by sent_test_tags and counter, total = {'once': 100, 'twice': 70, 'more': 50} correct is calculated by sent_pred_tags, correct = {'once': 1, 'twice': 2, 'more': 0}, which means '東芝' (once) is corrected, 'ダイキン工業' (twice) is corrected. '東芝キヤリア' (once) is not predicted. accuracy = {'once': 1/100, 'twice': 2/70, 'more': 0/50} """ correct = {'once': 0, 'twice': 0, 'more': 0} total = {'once': 0, 'twice': 0, 'more': 0} accuracy = {'once': 0, 'twice': 0, 'more': 0} # get total for test_tags in sent_test_tags: for test_tag in test_tags: if test_tag['text'] in counter['1']: total['once'] += 1 elif test_tag['text'] in counter['2']: total['twice'] += 1 else: total['more'] += 1 # get correct pred for test_tags, pred_tags in zip(sent_test_tags, sent_pred_tags): for pred_tag in pred_tags: for test_tag in test_tags: if pred_tag['start_idx'] == test_tag['start_idx'] and pred_tag['end_idx'] == test_tag['end_idx'] and pred_tag['text'] == test_tag['text']: if pred_tag['text'] in counter['1']: correct['once'] += 1 elif pred_tag['text'] in counter['2']: correct['twice'] += 1 else: correct['more'] += 1 accuracy = [correct[key] / total[key] for key in total.keys()] result = "once accuracy: {:.4f}, twice accuracy: {:.4f}, more times: {:.4f}".format(accuracy[0], accuracy[1], accuracy[2]) return result
49d40fd41ace2a25be35d0a2d764bd23b4cad0b8
11,883
from typing import List from typing import Tuple def count_chunks(alignment: List[Tuple[int, int]]) -> int: """ Find the minimum number of chunks the alignment can be grouped into. """ alignment = sorted(alignment) num_chunks = 0 last_h, last_r = -2, -2 for (h, r) in alignment: if abs(last_h - h) != 1 or abs(last_r - r) != 1: num_chunks += 1 last_h, last_r = (h, r) return num_chunks
41429e2e725b32edadc066ce4a2748cc3933cc50
11,884
from typing import Any from typing import List from typing import Optional def verify_trim(trims: Any) -> List[Optional[int]]: """Basically just to satisfy mypy. My trims should *always* be a tuple.""" return list(trims) if isinstance(trims, tuple) else [None, None]
1c3f738fd0b77388999c5de34d71ead90d39543e
11,885
def get_reference_output_files(reference_files_dict: dict, file_type: str) -> list: """ Returns list of files matching a file_type from reference files Args: reference_files_dict: A validated dict model from reference file_type: a file type string, e.g. vcf, fasta Returns: ref_vcf_list: list of file_type files that are found in reference_files_dict """ ref_vcf_list = [] for reference_key, reference_item in reference_files_dict.items(): if reference_item['file_type'] == file_type: ref_vcf_list.append(reference_item['output_file']) return ref_vcf_list
7050c39a8116f8874dbc09bcf7ff2908dcd13ff8
11,886
def close_noscript(tag): """Return a string with each closing parent tags for escaping a noscript""" s = "" if tag.findParent("noscript"): curr = tag.parent while True: s += "</{0}>".format(curr.name) if curr.name == "noscript": break curr = curr.parent return s
8082f609ff4c98059d728eeed8fb1f87a5fda48f
11,887
import hashlib def testfunc(res, i): """Return a hash of the index plus the other results.""" m = hashlib.md5() m.update(str(i).encode('utf-8')) for r in res: m.update(r.encode('utf-8')) return m.hexdigest()
f19a38d99bf50c33614134dec0d8184f35b27d60
11,889
def clasificar(a1: float, a2: float, a3: float) -> str: """ Retorna 'Equilatero' si el triángulo es equilatero, 'Isóceles' si es isóceles y 'Escaleno' si es escaleno. """ if a1 == a2 and a1 == a3 and a2 == a3: retorno = "Equilatero" elif a1 == a2 or a1 == a3 or a2 == a3: retorno = "Isóceles" else: retorno = "Escaleno" return retorno
e44104b58ab5fa24ab37831920e4d1c855eb25f3
11,890
def pretty_print_prime_rules(primes): """Prints pyboolnet a prime dictionary as Boolean rules The output format is of the form: A* = B & C | !D, for example. Parameters ---------- primes : pyboolnet primes dictionary Update rules to print. """ if primes is None: return "" for k,v in primes.items(): s = k + "* = " sl = [] for c in v[1]: sll = [] for kk,vv in c.items(): if vv: sli = kk else: sli = '!'+kk sll.append(sli) if len(sll) > 0: sl.append(' & '.join(sll)) if len(sl) > 0: s += ' | '.join(sl) if v[1]==[]: s = k + "* = 0" if v[1]==[{}]: s = k + "* = 1" print(s)
f72f5b9a2c9b3c90c6682e253dffd1d22f78fa0c
11,891
import os def createfolders(foldername_list): """Create the folder structure specified by the list.""" path = '' for s in foldername_list: if s != '/': path = path + s + '/' try: os.makedirs(path) except OSError as e: if os.path.exists(path): print('Path "{}" already exists.'.format(path)) else: print('Unable to create path "{}".'.format(path)) raise e return path
9b9ebf3c29dce52b05a885019f7a3e2092856605
11,892
def is_trivial_pair(p): """Is the critical pair trivial?""" u, v = p; return u == v
ee4b42a9f1345c7260c3992db4c2445b32273a01
11,893
def normalize_meaning(source_string): """Escape all HTML tags if any""" flag = 0 index = 0 # Trash is a list containing all the html tags found in the source string trash = [] for c in source_string: if c == '<': # Flag the start of the html tag flag = index elif c == '>': # Append full tag from the flagged start to the current index trash.append(source_string[flag:index+1]) index += 1 # Remove whitespaces from start and end of string result = source_string.strip() # Remove all html tags inside the trash variable for item in trash: result = result.replace(item, '') # Return normalized string return result
be22cbcbf09c4d5705f5d854e4b562d570d3f216
11,894
def get_heart_rate_units_str(): """Returns the units in which heart rate is displayed.""" return "bpm"
f7a17f7d6d71124e53a72cfdc8192c78b7beba10
11,895
def cut_list(list, length): """ This function allows to cut a list into parts of a certain length. It returns a new list which takes less memory and contains for each index a part of the initial list. Args : list : list of the images path of the whole database\n length (int): the length of the parts\n Returns : list containing the images path cut by parts of <length> """ listing_parts = [] intervalle_0 = 0 intervalle_1 = length while intervalle_0 <=(len(list)): listing_parts.append(list[intervalle_0:intervalle_1]) intervalle_0 = intervalle_1 intervalle_1 = intervalle_1 + length return listing_parts
210a1ad7db3058396ad32493a91b5c70176fb77c
11,896
import json def _convert_vars_to_json(env_dict): """Converts 'variables' in the given environment dict into string.""" if ('variables' in env_dict and isinstance(env_dict.get('variables'), dict)): env_dict['variables'] = json.dumps(env_dict['variables']) return env_dict
8dbf0456fd29833ff42570e011a45b368aa2ac0c
11,897
def getprimarymode(image_list, bid): """Determine the primary mode of the science frame for the block """ primary_mode=None instr=[] obsmode=[] detmode=[] for img in image_list: if img[10]==bid: instr.append(img[5]) obsmode.append(img[6]) detmode.append(img[7]) #set the instrument if 'RSS' in instr: instr='RSS' elif 'HRS' in instr: instr='HRS' else: instr='SCAM' #set the mode if instr=='RSS': if 'SPECTROSCOPY' in obsmode: primary_mode='SPECTROSCOPY' elif 'FABRY-PEROT' in obsmode: primary_mode='FABRY-PEROT' else: primary_mode='IMAGING' elif instr=='HRS': primary_mode = obsmode[0] elif instr=='SCAM': if 'SLOTMODE' in detmode: primary_mode='SLOTMODE' else: primary_mode='NORMAL' return instr, primary_mode
f50f585fb9e9888be0a478c230767f8ee7907fac
11,898
def flatten_probas(preds, targets, ignore=None): """ Flattens predictions in the batch """ B, C, H, W = preds.size() preds = preds.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C targets = targets.view(-1) if ignore is None: return preds, targets valid = (targets != ignore) vprobas = preds[valid.nonzero().squeeze()] vlabels = targets[valid] return vprobas, vlabels
4765c2b22ab526e762fa5877ca30299b4de851b7
11,900
def to_float(num): """ 引数をFloatに変換します。 """ return float(num) if type(num) is str else num
b2692edb3037359853bcd22fa5059efa20976a70
11,903
from functools import reduce def merge(s, p): """Merge union-find s with permutation p (as cycles).""" def find(x): while s[x] != x: x = s[x] return x def union(x, y): x = find(x) s[find(y)] = x return x for cycle in p: reduce(union, cycle) for x in range(len(s)): s[x] = find(x) return s
b65e1151c678fc9627840799770499b5fe91f71e
11,905
def pluralize( string ): """ Correctly convert singular noung to plural noun. """ if string.endswith('y'): plural = string[:-1] + 'ies' elif string.endswith('us'): plural = string[:-2] + 'i' else: plural = string + 's' return plural
7179c02f71f0044735c6573d64784456fcabc6cc
11,906
import csv def get_sampleID_and_sex(args): """ Retrieve sample IDs (samples with "-T*") and patient sex from title file """ sample2sex = {} with open(args.title_file_path, "rU") as titleFile: tfDict = csv.DictReader(titleFile, delimiter="\t") for row in tfDict: if row["Class"] == "Tumor": if "Sex" in row: sex = row["Sex"].replace("Female", "Female").replace("Male", "Male") else: sex = row["SEX"].replace("F", "Female").replace("M", "Male") if "Sample" in row: sample2sex[row["Sample"]] = sex else: sample2sex[row["CMO_SAMPLE_ID"]] = sex return sample2sex
a3cc0419bfcb0eb658700f4d49981887ccc7a69f
11,907
def reverse_key_value(orig_dict): """ DESCRIPTION ----------- Reverse the key value pairs of a dictionary object. PARAMETERS ---------- orig_dict : dict A dictionary object. RETURNS ------- rev_dict : dict A dictionary with the values of the original dictionary stored as keys and the keys of the oriinal dictionary stored as values. MODIFICATIONS ------------- Created : 4/24/19 """ rev_dict = {} for j, k in orig_dict.items(): rev_dict[k] = j return rev_dict
92599452e511193ce34c4421b13029023fb9c762
11,910
import numpy def make_labels(X): """Helper function that generates a single 1D array with labels which are good targets for stock logistic regression. Parameters ========== X : numpy.ndarray The input data matrix. This must be an array with 3 dimensions or an iterable containing 2 arrays with 2 dimensions each. Each correspond to the data for one of the two classes, every row corresponds to one example of the data set, every column, one different feature. Returns ======= labels : numpy.ndarray With a single dimension, containing suitable labels for all rows and for all classes defined in X (depth). """ return numpy.hstack( [k * numpy.ones(len(X[k]), dtype=int) for k in range(len(X))] )
d10ac3b9213bd6a2d43d24059295ff2779b1cd39
11,911