content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def ringfilter(motif, bond, pivot1, pivot2): """ For a given linear sequence of atoms it tests whether they for a ring, i.e., whether atoms pivot1 and pivot 2 are connected. """ if bond[motif[pivot1]][motif[pivot2]] > 0: return 1 else: return 0
2fea90a0965d8a1504f5b82610a90ab8611287b9
18,246
import os def read_data(): """ Function: read_data\n Parameters: None\n Functionality: Read the data and convert it to a meaning ful form\n """ parent_dir = os.getcwd() dir_name = 'analysis_output_files' file_name = 'scenario_analysis.log' file_path = os.path.join(parent_dir, dir_name, file_name) lines = [] with open(file_path, 'r') as file_pointer: lines = file_pointer.readlines() data = lines[3:-3] graph_data_lst = [] prev = 0 for idx in range(0, len(data) + 1, 6): block_dict = {} block = data[prev:idx:] if len(block) > 1: block_dict['Area'] = block[0].split(':')[1].split('X')[0] block_dict['UAV'] = round(float(block[1].split(':')[1]), 2) block_dict['user'] = round(float(block[2].split(':')[1]), 2) block_dict['similarity'] = round(float(block[3].split(':')[1]), 2) block_dict['std'] = round(float(block[4].split(':')[1]), 2) block_dict['comm_th'] = round(float(block[5].split(':')[1]), 2) graph_data_lst.append(block_dict) prev = idx return graph_data_lst
b462f0345b0bb0f1f13defaa72772c84c29c2cbf
18,248
def FindNonTrivialOrbit(generators): """ Given a generating set <generators> (a Python list containing permutations), this function returns an element <el> with a nontrivial orbit in the group generated by <generators>, or <None> if no such element exists. (Useful for order computation / membership testing.) """ if generators == []: return None n = generators[0].n for P in generators: for el in range(n): if P[el] != el: return el
ddd3f92523a0b0f8a7cb08146d4d1014a5adedf1
18,249
def city_functions(city, country, population=''): """Generate a neatly formatted city.""" if population: return (city.title() + ', ' + country.title() + ' - population ' + str(population) + '.') return city.title() + ', ' + country.title() + '.'
1fde2b9bd910e02c67cf7a5c46c13b88d381ce03
18,253
def create_pivot_df(df_snp): """constructs pivot DataFrame grouping data by the duplex variant """ context_data = df_snp.groupby('ref_duplex').sum() context_data.drop(['start_pos', 'stop_pos'], inplace=True, axis=1) return context_data
691f067155c12a58522d48f83789db2fa4b5d584
18,254
def key_(arg): """ get an argument's destiation key """ _, kwargs = arg return kwargs['dest']
51b310edda31a1973b62e4ae3b6a24b1fcd5f35b
18,255
import torch def smooth_dice_loss(pred: torch.Tensor, target: torch.Tensor, smooth: float=1., eps: float=1e-6) -> torch.Tensor: """ Smoothed dice loss. :param pred: (torch.Tensor) predictions, logits :param target: (torch.Tensor) target, logits or binrary :param smooth: (float) smoothing value :param eps: (eps) epsilon for numerical stability :returns dice_loss: (torch.Tensor) the dice loss """ pred = torch.sigmoid(pred) target = (target > 0).float() intersection = (pred.reshape(-1) * target.reshape(-1)).sum() return 1 - ((2. * intersection + smooth) / (pred.sum() + target.sum() + smooth + eps))
30cda9c2789661d997254e43febb4412ecc76b15
18,256
import argparse def parse_args(): """Use argparse to get command line arguments.""" ''' task: segmentation gt: path to ground truth result: path to results to be evaluated num_classes: dataset classes. #e.g. 19 for cityscapes ignore_label: label to be ignored when to do evalution. #e.g. 255 for cityscapes result_suffix: prediction result file suffix. #e.g. leftImg8bit.png gt_suffix: ground truth file suffix. #e.g. gtFine_trainIds.png ''' parser = argparse.ArgumentParser() parser.add_argument('--task', default='segmentation', help='evaluation task name') parser.add_argument('--gt', help='path to ground truth') parser.add_argument('--result', help='path to results to be evaluated') parser.add_argument('--result_suffix', type=str, default='leftImg8bit.png', help = 'prediction result file suffix') parser.add_argument('--gt_suffix', type=str, default='gtFine_trainids.png', help = 'groundtruth file suffix') parser.add_argument('--num_classes', type=int, default=20, help='dataset classes') parser.add_argument('--ignore_label', type=int, default=19, help='dataset classes') parser.add_argument('--result_file', type=str, default='accuracy.txt', help = 'save accuracy to file') args = parser.parse_args() return args
d94a12b76251415a42c7262e2163ce6eaf347aa0
18,257
def climbing_stairs_three_recur(steps: int) -> int: """Staircase by top-down recursion. Time complexity: O(3^n). Space complexity: O(n). """ if steps < 0: return 0 if steps == 0: return 1 return (climbing_stairs_three_recur(steps - 1) + climbing_stairs_three_recur(steps - 2) + climbing_stairs_three_recur(steps - 3))
5ecf0ee4a627b22d4aac65e488d62c86ca958ac3
18,258
def get_seat_id(ticket: str) -> int: """Get seat id based on boarding ticket (ie. 'BBFFBBFRLL')""" rows = range(128) cols = range(8) for letter in ticket: if letter in "FB": midpoint = len(rows) // 2 rows = rows[:midpoint] if letter == "F" else rows[midpoint:] else: midpoint = len(cols) // 2 cols = cols[:midpoint] if letter == "L" else cols[midpoint:] return rows[0] * 8 + cols[0]
5f901755192c93dc275afa5392c2472db7e60108
18,262
def _create_object_from_type_and_dict(cls, obj_dict): """Creates an object, bypassing the constructor. Creates an object of type `cls`, whose `__dict__` is updated to contain `obj_dict`. Args: cls: The type of the new object. obj_dict: A `Mapping` that should be used to initialize the new object's `__dict__`. Returns: An object of type `cls`. """ value = object.__new__(cls) value.__dict__.update(obj_dict) return value
540c11639f724aeacc745cc7b334abb556783eee
18,263
import copy def populate_target_env_cfg(target_cfg, target_env): """ Read out context from target config then merge it with global magic context All keys in target config that starts with `_` is considered magic context and will be merged into each target_env config. """ # we need to do deepcopy here because yaml extend operation is not a # deepcopy and we will be injecting new keys in the following for loop target_env_cfg = copy.deepcopy(target_cfg[target_env]) for dkey, dval in target_cfg.items(): if dkey.startswith('_') and dkey not in target_env_cfg: target_env_cfg[dkey] = dval return target_env_cfg
b252b87faa50a949b3c1a51ed8da5dac8256a2ac
18,264
import re def valid_email(email): """Verifica la validità sintattia di una mail Args: email (str): email da validare Returns: bool: restituisce True se la mail è valida """ EMAIL_REGEX = r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?" pattern = re.compile(EMAIL_REGEX) return bool(pattern.match(email))
8335f4fe0fcdfaaf5d053104c838a039c2555041
18,265
from typing import Union from typing import Callable import importlib def validate_task(task: Union[str, Callable]) -> str: """ Helper function for dealing with task inputs which may either be a callable, or a path to a callable as a string In case of a string being provided, this function checks whether the import path leads to a valid callable Otherwise, the callable is converted back into a string (as the :class:`carrot.objects.Message` requires a string input) This function is used by the following other utility functions: - :func:`.create_scheduled_task` - :func:`.create_message` """ mod, fname = (None,) * 2 if isinstance(task, str): try: fname = task.split('.')[-1] mod = '.'.join(task.split('.')[:-1]) module = importlib.import_module(mod) getattr(module, fname) except ImportError as err: raise ImportError('Unable to find the module: %s' % err) except AttributeError as err: raise AttributeError('Unable to find a function called %s in the module %s: %s' % (fname, mod, err)) else: # noinspection PyUnresolvedReferences task = '%s.%s' % (task.__module__, task.__name__) return task
f5938029aea67c711ed3c5363c6682fa4d6389bb
18,266
def SetSingleTrack(center_row, height): """ This function will set the single track parameters. The parameters are validated in the following order: centre row and then track height. Parameters: int center_row: centre row of track int height: height of track Valid values: center_row: Valid range 1 to number of vertical pixels. 1 conventional/Extended NIR Mode(clara) heigth: Valid range > 1 (maximum value depends on centre row and number of vertical pixels). """ return None
9e086cbffa046763200d0ddb4eb21261a9cfe0ee
18,267
def check_msg_size(msg): """Check if the commit message is too long. Parameters ---------- msg : str Returns ------- bool """ if len(msg) > 72: print( "ERROR: Commit message is too long. It should be 72 characters or" " less." ) return False return True
4e3a985bcaccf0546d51a1fb67990c100ae9ab6e
18,268
import os def config(key): """ reads key-value pairs from a .env file. """ return os.environ.get(key)
3a2e09c656c7caf2ba265023ec08c19e7e2c02e4
18,269
def get_option_value(option): """ An option in a Checkboxes or CheckboxTree question is a dict, but we need to treat their contents in consistent ways, e.g. when getting the value to be persisted in the API. :param option: dict from a Question's list of options :return: string value to be persisted """ return option.get('value') or option['label']
98e9119c77a1fbc05f8e988bc7cf9a0e3ef9385e
18,270
def bytes2str(data): """ Convert bytes to string >>> bytes2str(b'Pwning') 'Pwning' >>> """ data = "".join(map(chr, data)) return data
cd2e7fd59628c7b4eb8fdc918148f960f1226d6f
18,271
from io import StringIO def svg(plot, close=True): """ Creates SVG code and closes plot. Args: plot: matplotlib.pyplot Plot from which the SVG should be made. Returns: str SVG code. """ # make SVG svg_file = StringIO() plot.savefig(svg_file, format='svg') svg = svg_file.getvalue() svg_file.close() # close plot if close: plot.close() return svg
4acdd6f346af2de672e538415795e0e1181ee4e4
18,272
def docselect(posts, category): """ Receives the category of devices and returns the relevant documents """ search_filter = {'Ancestors': {"$in": [category]}, "$where": 'this.Toolbox.length>0'} postsample = posts.find(search_filter, no_cursor_timeout=True) print('{} document are selected'.format(postsample.count()), flush=True) return postsample
dcee4a1fe6223307a16114ca7fe9b3cae58fbe44
18,273
def mocked_events_ics(source: str = "uqcs") -> bytes: """ Returns a locally stored .ics file that imitates the UQCS Calendar on Google Calendar. """ with open("test/test_events_events.ics", "rb") as events_file: return events_file.read()
8388d826648c9b274c991453eaf9e3885794e980
18,275
def extract_elements_from_nested_list(nested_list): """Extract elements from nested list.""" new_list = [] # For item in nested list for item in nested_list: # Detangle if type(item) != str: new_item = extract_elements_from_nested_list(item) new_list += new_item else: new_list.append(item) return new_list
4aa3ea7c6d07b73383f54a26ce073d85db383047
18,276
import re def split_psalm(reference): """splits pslams when multiple are included together""" # Example input: 'Ps. 33,108:1-6,7-13' # Example input: 'Ps. 63:1-8,9-11,Ps. 98' if len(re.findall(r'Ps\.', reference)) > 1: psalms = reference.split(',Ps. ') psalms[0] = re.sub(r'Ps\. ', '', psalms[0]) else: reference = re.sub(r'Ps\. ', '', reference) if re.search(r',\d+,\d+:', reference): psalms = reference.split(',', 2) elif re.search(r'^[^,]*\d+,\d+:', reference): psalms = reference.split(',', 1) else: psalms = reference.split(',') return psalms
d104e6a4ab718a544ef8d5970959ccb549e72704
18,277
def remove_upper_channel(lo, hi): """ take difference between two channels: # rule: lo ) 0 0 1 1 up ) 0 1 0 1 -> 0 0 1 0 """ lo = lo.astype(bool) hi = hi.astype(bool) return (lo ^ (lo & hi).astype(bool)).astype(bool)
b3b6a86af53a6529b873a0aefb642589c58f27bd
18,278
def _dictify(value): """ Converts non-dictionary value to a dictionary with a single empty-string key mapping to the given value. Or returns the value itself if it's already a dictionary. This is useful to map values to row's columns. """ return value if isinstance(value, dict) else {'': value}
241ead832384a1459666f70829634aa8f3fdc2ae
18,279
def find_opposite_neighbor(node, element, elements): """ This function returns the neighbor of 'element' that doesn't contain the node 'node'. NOTE: Valid only for triangular elements. """ neighbors = elements[element.neighbors - 1] for neighbor in neighbors: if node not in neighbor.nodes: return neighbor
1a38deabf1e9e89ba0e3bab3b88398c0c0d71357
18,280
import random def random_row(categories=None): """ Make a random row Returns rows like (category, a, b): - category is a categorical variable, - a is a randomly selected cluster value (with two clusters) - b is a uniform integer on 0, 1 Returns: (category, a, b) - the random row """ # Category categories = categories or 'xyz' category = random.choice(categories) # Make a from clusters a_mean = random.choice([25, 75]) a_stddev = 20 a = random.normalvariate(a_mean, a_stddev) # b is just random uniform b = random.uniform(0, 1) # Do other values return (category, a, b)
dc4db2db758454898a92a7953cb0f186e03a5ba8
18,281
import re def _generate_remote_name(remote_url: str) -> str: """Generate a kind-of human readable name based on a url.""" filtered = ( re.sub(r"[./\\@:\^]", "-", remote_url).replace("https", "").replace("http", "") ) return re.sub(r"[-]{2,}", "-", filtered).strip("-")
d3ed64cfbe9ad222b12620254f709cccfe101352
18,283
def connect_device_success(self, *args, **kwargs): """Return `self`, which will result in the ADB connection being interpreted as available.""" return self
55b8238f4406804609f08318b00ebfe2b4997690
18,284
import logging def prepare_data(df): """it is used to separate the dependent variables and independent features Args: df (pd.DataFrame): its the pandas DataFrame to Returns: tuple: it returns the tuples of dependent variables and independent variables """ logging.info("Preparing the data by segregating the independent and dependent variables") (X_train_full,y_train_full),(X_test,y_test) = df.load_data() X_valid, X_train = X_train_full[:5000]/255., X_train_full[5000:]/255. y_valid, y_train = y_train_full[:5000], y_train_full[5000:] X_test = X_test/255. return X_train,y_train,X_valid, y_valid, X_test,y_test
4891e6d858c1f56a1d9b1dd634668bf92f56015e
18,286
def count_vowels(sentence:str) -> dict: """ find vowel count in a sentence We are not initializing a vowels_count with each vowel set to 0 """ vowels = ['a', 'e', 'i', 'o', 'u'] vowel_cnt = {} for char in sentence: if char in vowels: vowel_cnt.setdefault(char, 0) vowel_cnt[char] += 1 return(vowel_cnt)
9730910d359564bc2be5721a69b41cb57ca59b27
18,287
def parseExitStatus(exitStatus): """Parse exit status of command line commands""" # 0 means executed correctly return not bool(exitStatus)
460e93275e519848837a9251adf21f8a57ab90de
18,290
import re def delete_links(string): """Delete links from input string Args: string (str): string to delete links Returns: str: string without links """ return re.sub(r'http\S+', '', string)
c452e25f245b7b791800cff8ca31ce75598e06c9
18,291
def validate_entrypoints(entrypoints): """Check that the loaded entrypoints are valid. Expects a dict of dicts, e.g.:: {'console_scripts': {'flit': 'flit:main'}} """ def _is_identifier_attr(s): return all(n.isidentifier() for n in s.split('.')) problems = [] for groupname, group in entrypoints.items(): for k, v in group.items(): if ':' in v: mod, obj = v.split(':', 1) valid = _is_identifier_attr(mod) and _is_identifier_attr(obj) else: valid = _is_identifier_attr(v) if not valid: problems.append('Invalid entry point in group {}: ' '{} = {}'.format(groupname, k, v)) return problems
3785b9739a0d6aa281f9d6b448dd0581f3c44e04
18,293
import six def prepare_call(parameters): """Dispatches messages to a different pipeline. Sends the message through another pipeline. Any changes to the message will be visible in the remainder of this pipeline. Note that there is a hardcoded limit of 10 pipeline changes per message (includes jumps made by ``jump`` and ``fork``) Example: .. code:: yaml call: my_pipeline """ pipeline_name = (parameters if isinstance(parameters, six.string_types) else parameters.get("pipeline")) if not pipeline_name: raise ValueError("parameter pipeline required") def handle_call(message, context): context.pipeline_manager.process(message, pipeline_name) return handle_call
e98620474c39deae79289a85154d1a1d127faf7f
18,294
import random def random_pick(i_list: list,n)-> list: """ Select n numbers from the source list :param i_list: The source list :param n: The number of element to pick :return: The output list """ _list_copy = i_list.copy() _shallow_list = [] assert(n <= len(i_list)) iter_idx = 1 while n > 0: _shallow_list.append( _list_copy.pop( random.randint(0,len(i_list)-iter_idx) ) ) iter_idx += 1 n -= 1 return _shallow_list
a2d1c7fd986e9d46ce1cdf7831c3bf91ae806295
18,296
def parse_ffmpeg(input_dir, input_file_name, video_scale_bitrates, video_keyint): """parse ffmpeg command""" ffmpeg_cmd = [] ffmpeg_files = [] # 音频处理 audio_file = input_dir + '\\' + 'video_audio.mp4' # audio_cmd = 'ffmpeg' + ' ' + '-i ' + input_file_name + ' ' + '-c:a copy -vn' + ' ' + audio_file audio_cmd = ['ffmpeg', '-i', input_file_name, '-c:a', 'copy', '-vn', audio_file] ffmpeg_cmd.append(audio_cmd) ffmpeg_files.append(audio_file) # 视频处理 for scale in video_scale_bitrates: for bitrate in video_scale_bitrates[scale]: output_file = input_dir + '\\' + 'video_' + bitrate + '.mp4' # cmd = 'ffmpeg' + ' ' + '-i ' + input_file_name + ' ' + '-an -c:v -x264opts keyint=%d:min-keyint=%d:no-scenecut' % (video_keyint, video_keyint) + ' ' + '-b:v %s -maxrate %s -bufsize %dk' % (bitrate, bitrate, int(bitrate[:-1]) // 2) + ' ' + '-vf scale=%s' % (scale) + ' ' + output_file keyint = 'keyint=%d:min-keyint=%d:no-scenecut' % ( video_keyint, video_keyint) cmd = ['ffmpeg', '-i', input_file_name, '-y', '-c:v', 'libx264', '-x264opts', keyint, '-b:v', bitrate, '-maxrate', bitrate, '-bufsize', str(int(bitrate[:-1]) // 2) + 'k', '-vf', 'scale=%s' % (scale), output_file] ffmpeg_files.append(output_file) ffmpeg_cmd.append(cmd) print(ffmpeg_cmd) return ffmpeg_files, ffmpeg_cmd
26158819802a48c608a4b26cb97f88c96507eb04
18,297
def strip_uri(repo): """ Remove the trailing slash from the URI in a repo definition """ splits = repo.split() for idx, val in enumerate(splits): if any(val.startswith(x) for x in ("http://", "https://", "ftp://")): splits[idx] = val.rstrip("/") return " ".join(splits)
284d100f96c5d6912f75c4b7ae45310a69a6dc6c
18,298
def aoi_status(objects,status): """ Returns the status of the aoi """ return objects.filter(status=status)
0fc8542ec8e92e65f58e10e86c8ae5566008245d
18,301
import logging import sys def parser_cvs(filename, fields_jsons): """esta funcion lista con un diccionario con la configuracion""" try: with open(filename, "r") as conf_file: readfilecsv = conf_file.readlines() info_load_config = "Cargado archivo: {}" logging.info(info_load_config.format(filename)) except Exception as e: error_load_config = "No se puedo cargar el archivo: {}" logging.error(error_load_config.format(filename)) # print ERROR_LOAD_CONFIG.format(name_file) sys.exit(0) diccionario_json = [] # Recorro la lineas de csv for read_line in readfilecsv: # Omitir la linea por comentarios if read_line.startswith("#"): continue # Sila linea esta vacia if read_line.strip() == "": continue # separo los campos por comas split_read_lines = read_line.split(",") # incialicio el diccionario para ingresarles key and value row_json = {} # Recorrer los atributos del json indice = 0 # print len(fields_jsons), # for field_json in fields_jsons: for value_field in split_read_lines: if value_field.strip() == '""': value_field = "" row_json[fields_jsons[indice]] = value_field.strip() indice = indice + 1 if row_json: diccionario_json.append(row_json) return diccionario_json
e3c24c04bf731b3351bad5484a85ab2777e3186c
18,302
def one_text_header(response, header_name): """ Retrieve one text header from the given HTTP response. """ return response.headers.getRawHeaders(header_name)[0].decode("utf-8")
afc4b3bba143a092173a935ede77ff8ecec6d7e3
18,303
import numpy as np def vectorised_exceed_ns_icpm(cln_min, cln_max, cls_min, cls_max, dep_n, dep_s): """Vectorised version of exceed_ns_icpm(). Calculates exceedances based on the methodology outlined by Max Posch in the ICP Mapping manual (section VII.4): https://www.umweltbundesamt.de/sites/default/files/medien/4292/dokumente/ch7-mapman-2016-04-26.pdf NB: All units should be in meq/l. Args: cln_min: Float array. Parameter to define "critical load function" (see PDF) cln_max: Float array. Parameter to define "critical load function" (see PDF) cls_min: Float array. Parameter to define "critical load function" (see PDF) cls_max: Float array. Parameter to define "critical load function" (see PDF) dep_n: Float array. Total N deposition dep_s: Float array. Total (non-marine) S deposition Returns: Tuple of arrays (ex_n, ex_s, reg_id) ex_n and ex_s are the exceedances for N and S depositions dep_n and dep_s and the CLF defined by (cln_min, cls_max) and (cln_max, cls_min). The overall exceedance is (ex_n + ex_s). reg_id is an integer array of region IDs, as defined in Figure VII.3 of the PDF. """ # Create NaN arrays for output with correct dimensions ex_n = np.full(shape=dep_s.shape, fill_value=np.nan) ex_s = np.full(shape=dep_s.shape, fill_value=np.nan) reg_id = np.full(shape=dep_s.shape, fill_value=np.nan) # Handle edge cases # CLF pars < 0 mask = (cln_min < 0) | (cln_max < 0) | (cls_min < 0) | (cls_max < 0) # Updated 07.11.2020. Values < 0 do not make sense, so were originally set to NaN. This change is # equivalent to setting values less than zero back to zero # ex_n[mask] = np.nan # ex_s[mask] = np.nan ex_n[mask] = dep_n[mask] ex_s[mask] = dep_s[mask] reg_id[mask] = -1 edited = mask.copy() # Keep track of edited cells so we don't change them again # This is analagous to the original 'if' statement in # exceed_ns_icpm(), which requires the logic to be # implemented in a specific order i.e. once a cell has been # edited we do not want to change it again (just like once # the 'if' evaluates to True, we don't proceed any further) # CL = 0 mask = (cls_max == 0) & (cln_max == 0) & (edited == 0) ex_n[mask] = dep_n[mask] ex_s[mask] = dep_s[mask] reg_id[mask] = 9 edited += mask # Otherwise, we're somewhere on Fig. VII.3 dn = cln_min - cln_max ds = cls_max - cls_min # Non-exceedance mask = ( (dep_s <= cls_max) & (dep_n <= cln_max) & ((dep_n - cln_max) * ds <= (dep_s - cls_min) * dn) & (edited == 0) ) ex_n[mask] = 0 ex_s[mask] = 0 reg_id[mask] = 0 edited += mask # Region 1 mask = (dep_s <= cls_min) & (edited == 0) ex_n[mask] = dep_n[mask] - cln_max[mask] ex_s[mask] = 0 reg_id[mask] = 1 edited += mask # Region 5 mask = (dep_n <= cln_min) & (edited == 0) ex_s[mask] = dep_s[mask] - cls_max[mask] ex_n[mask] = 0 reg_id[mask] = 5 edited += mask # Region 2 mask = (-(dep_n - cln_max) * dn >= (dep_s - cls_min) * ds) & (edited == 0) ex_n[mask] = dep_n[mask] - cln_max[mask] ex_s[mask] = dep_s[mask] - cls_min[mask] reg_id[mask] = 2 edited += mask # Region 4 mask = (-(dep_n - cln_min) * dn <= (dep_s - cls_max) * ds) & (edited == 0) ex_n[mask] = dep_n[mask] - cln_min[mask] ex_s[mask] = dep_s[mask] - cls_max[mask] reg_id[mask] = 4 edited += mask # Region 3 (anything not already edited) dd = dn ** 2 + ds ** 2 s = dep_n * dn + dep_s * ds v = cln_max * ds - cls_min * dn xf = (dn * s + ds * v) / dd yf = (ds * s - dn * v) / dd ex_n[~edited] = dep_n[~edited] - xf[~edited] ex_s[~edited] = dep_s[~edited] - yf[~edited] reg_id[~edited] = 3 del mask, edited, dd, s, v, xf, yf, dn, ds return (ex_n, ex_s, reg_id)
2c79709ca15e6ade5a66a8135a4446d31afd3ed4
18,305
from typing import Optional from typing import Dict from typing import List from typing import Tuple import base64 def as_metadata( credentials: Optional[Dict[str, str]] = None, requires_leader: Optional[bool] = None ) -> Optional[List[Optional[Tuple[str, str]]]]: """Returns a valid grpc "metadata" object.""" metadata = [] if credentials is not None: if all(elm in ["username", "password"] for elm in credentials): token = f"{credentials['username']}:{credentials['password']}" token = base64.b64encode(token.encode("ascii")).decode("ascii") auth = ("authorization", f"Bearer {token}") metadata.append(auth) if requires_leader is not None: req_leader = ("requires-leader", str(requires_leader).lower()) metadata.append(req_leader) return metadata if metadata else None
3377aed0298de597edfb5edb6141024a65c39fc5
18,306
def node_tuple(): """ :returns: The tuple that defines a node :rtype: :class:`tuple_(dict, dict)` """ return {}, {}
fe3793124eb4d56fdf7537f403d1c9d11ec5e343
18,307
def split_string(string: str, indices: list) -> list: """Splits string between indices. Notes: It is possible to skip characters from the beginning and end of the string but not from the middle. Examples: >>> s = 'abcde' >>> indices = [1, 2, 4] >>> split_string(s, indices) ['b', 'cd'] """ return [string[n:m] for n, m in zip(indices[:-1], indices[1:])]
0850c4d0f18b70cbd75790e34580ea0567ddea05
18,308
import subprocess def check_vault_connection(): """docstring for check_vault_connection""" try: out = subprocess.check_output(['vault', 'read', '--format', 'json', '/auth/token/lookup-self']) == 0 except subprocess.CalledProcessError as e: return False else: return True
f6f5bf1a2b26bfbb2891693d56ea0d416081db70
18,310
def preproc(line): """ 一行の命令を命令名と引数の列に分解する。 引数はカンマ区切りで分割され、前から順番にargsに入る。 d(Rb)の形式のものは、d,Rbの順でargsに入る。 """ head, tail = "", "" for i in range(len(line)): if line[i] == " ": tail = line[i + 1 :] break head += line[i] cmd = head.upper() tmp = [s.strip() for s in tail.split(",") if not s == ""] args = [] for i in range(len(tmp)): if "(" in tmp[i] and ")" in tmp[i]: a = tmp[i][: tmp[i].find("(")].strip() b = tmp[i][tmp[i].find("(") + 1 : tmp[i].find(")")].strip() try: args.append(int(a)) args.append(int(b)) except Exception: raise ValueError else: try: args.append(int(tmp[i])) except Exception: raise ValueError return cmd, args
bf884ea59599c0c1026310ac364317687349151c
18,311
def attributes(attrs): """Returns an attribute list, constructed from the dictionary attrs. """ attrs = attrs or {} ident = attrs.get("id","") classes = attrs.get("classes",[]) keyvals = [[x,attrs[x]] for x in attrs if (x != "classes" and x != "id")] return [ident, classes, keyvals]
92d48f897bd0a825819dc1b3a8f1ffd18686bd05
18,312
def fact_iter(n): """ 1. number of times around loop is n 2. number of operations inside loop is a constant 3. overall just O(n) >>> fact_iter(5) 120 >>> fact_iter(12) 479001600 >>> fact_iter(10) 3628800 >>> fact_iter(16) 20922789888000 >>> fact_iter(4) 24 """ prod = 1 for i in range(1, n + 1): prod *= i return prod
063abee825c8407b276f839543eb6985a90ac653
18,316
def char_sum(word: str) -> int: """Sum letter indices of uppercase word.""" total = 0 for letter in word: total += ord(letter) - 65 return total
58ac7b372a8dcbee3beed6b8132295cdc677095c
18,319
def _new_base(dests: str) -> str: """ Create New Base State :param dests: Dests :return: New Base """ bat, run1, run2, run3 = int(dests[:1]), int(dests[1:2]), int(dests[2:3]), int(dests[3:4]) nrunner1, nrunner2, nrunner3 = '0', '0', '0' if run1 == 1 or bat == 1: nrunner1 = '1' if run1 == 2 or run2 == 2 or bat == 2: nrunner2 = '1' if run1 == 3 or run2 == 3 or run3 == 3 or bat == 3: nrunner3 = '1' return f"{nrunner1}{nrunner2}{nrunner3}"
047b648b24d3aa20a3e964583d39eea151b99f42
18,320
def execute(command, document): """Call obj as an command recursively while callable.""" while callable(command): command = command(document) return command
46f34e8eb04a21aee303f3b13088d7bc4be1c16e
18,322
def r(a, b, C): """ Defines the compatibility coefficients """ return C[a, b]
f1f8f109742feabb4119e20b137d7ae30a635db3
18,323
def _model_field_values(model_instance) -> dict: """Return model fields values (with the proper type) as a dictionary.""" return model_instance.schema().dump(model_instance)
aeffb9cfc9304dec5672be6169c0909f2afbb3cb
18,324
import re def is_in_file(path, search_string): """ Determine whether a string is contained in a file. Like ``grep``. Parameters ---------- path : str The path to a file. search_string : str The string to be located in the file. """ with open(path, 'r') as filep: for line in filep: if re.search(search_string, line): return True return False
9912db6a81551e6b930bbf9b3c11168a34b91fe5
18,325
def rc_one_hot_encoding(encoded_seq): """Reverse complements one hot encoding for one sequence.""" return(encoded_seq[::-1, ::-1])
7dbd65c384cdd89dbb76698fe792fa85b0ff4206
18,327
def safe_get(obj, key, def_val=None): """ try to return the key'd value from either a class or a dict (or return the raw value if we were handed a native type) """ ret_val = def_val try: ret_val = getattr(obj, key) except: try: ret_val = obj[key] except: if isinstance(obj, (float, int, str)): ret_val = obj return ret_val
8fd58128801f5338d7ddbc8fbb1b78830b644b4b
18,329
def _genotypes_str(sample_genotypes, genotypes_str_cache): """ Returns - string representations of sample genotypes, - string representations of various marginal probabilities in form (0??, 1??, ..., ?0?, ...). """ n_copies = len(sample_genotypes[0]) sample_cn = sum(sample_genotypes[0]) if sample_cn in genotypes_str_cache: return genotypes_str_cache[sample_cn] sample_genotypes_str = [','.join(map(str, gt)) for gt in sample_genotypes] marginal_str = [] if n_copies > 2: gt_str = ['?'] * n_copies for copy in range(n_copies): for curr_copy_cn in range(sample_cn + 1): gt_str[copy] = str(curr_copy_cn) marginal_str.append(''.join(gt_str)) gt_str[copy] = '?' res = (sample_genotypes_str, marginal_str) genotypes_str_cache[sample_cn] = res return res
195ec10ed2690ad114e80f7156fed172095dc6cc
18,330
import uuid def format_jsonrpc_msg(method, params=None, *, notification=False): """ Returns dictionary that contains JSON RPC message. Parameters ---------- method: str Method name params: dict or list, optional List of args or dictionary of kwargs. notification: boolean If the message is notification, no response will be expected. """ msg = {"method": method, "jsonrpc": "2.0"} if params is not None: msg["params"] = params if not notification: msg["id"] = str(uuid.uuid4()) return msg
23797940fb64efd7cc39da6871a377268ace57a7
18,331
def day2yr(day): """day -> yr""" return day/365
4b7cd48f432df6ddf27096624a89f5380982a5f2
18,332
import os def generate_indexed_filename(name_format, idx_start=0, folder=""): """ Generate an unused indexed filename in `folder`. The name has `name_format` (using standard Python :func:`format()` rules), with index starting with `idx_start`. """ while True: name=name_format.format(idx_start) if not os.path.exists(os.path.join(folder,name)): return name idx_start=idx_start+1
031a9b889b5cb6cd2c1b8e67a9d70cb65cd39225
18,333
import gzip import base64 def decompress(data): """ Decodes a Base64 bytes (or string) input and decompresses it using Gzip :param data: Base64 (bytes) data to be decoded :return: Decompressed and decoded bytes """ if isinstance(data, bytes): source = data elif isinstance(data, str): source = bytes(data, encoding='utf-8') else: raise RuntimeError("Compression is only supported for strings and bytes") return gzip.decompress(base64.b64decode(source))
30dff2aad4facbece190a2b2719fb96b840fd6ee
18,334
from typing import Dict def largest_valued_key(dic: Dict[str, set]) -> str: """Find the key with the largest value.""" biggest_size = -1 biggest_key = None for key, value in dic.items(): length = len(value) if length > biggest_size: biggest_size = length biggest_key = key assert isinstance(biggest_key, str) return biggest_key
2d4312217b93560514fb717251028baaeee7a6fe
18,335
def validate_entangler_map(entangler_map, num_qubits, allow_double_entanglement=False): """Validate a user supplied entangler map and converts entries to ints. Args: entangler_map (list[list]) : An entangler map, keys are source qubit index (int), value is array of target qubit index(es) (int) num_qubits (int) : Number of qubits allow_double_entanglement (bool): If we allow in two qubits can be entangled each other Returns: list: Validated/converted map Raises: TypeError: entangler map is not list type or list of list ValueError: the index of entangler map is out of range ValueError: the qubits are cross-entangled. """ if isinstance(entangler_map, dict): raise TypeError("The type of entangler map is changed to list of list.") if not isinstance(entangler_map, list): raise TypeError("Entangler map type 'list' expected") for src_to_targ in entangler_map: if not isinstance(src_to_targ, list): raise TypeError('Entangle index list expected but got {}'.format(type(src_to_targ))) ret_map = [] ret_map = [[int(src), int(targ)] for src, targ in entangler_map] for src, targ in ret_map: if src < 0 or src >= num_qubits: raise ValueError( 'Qubit entangle source value {} invalid for {} qubits'.format(src, num_qubits)) if targ < 0 or targ >= num_qubits: raise ValueError( 'Qubit entangle target value {} invalid for {} qubits'.format(targ, num_qubits)) if not allow_double_entanglement and [targ, src] in ret_map: raise ValueError('Qubit {} and {} cross-entangled.'.format(src, targ)) return ret_map
5c329eadd1aa1775e0df403ad5c0fe728e8b2750
18,336
def keyName(*args): """Sort values in order and separate by hyphen - used as a unique key.""" a = [int(i) for i in [*args]] # make sure all ints a = [str(i) for i in sorted(a)] # sort ints and then return strings return '-'.join(a)
4eea1461cc308e8e4491f9c970ee23473e48c5a7
18,339
def get_declarative_base(model): """ Returns the declarative base for given model class. :param model: SQLAlchemy declarative model """ for parent in model.__bases__: try: parent.metadata return get_declarative_base(parent) except AttributeError: pass return model
35beb1d7a75f30ddf5c4246a0c659c2caac9b10a
18,340
def get_supported_versions(): """ """ return ['RAS500', 'RAS41']
fe7906fec549f144cb8c9064df91792145a210fb
18,342
def one_quarter_right_rotation_escalators(escalator): """ Return the escalator coordinates rotated by 1/4. This assumes the escalator is defined for a 4×4 board. """ return ((escalator[0][1], 4 - (escalator[0][0] + 1)), (escalator[1][1], 4 - (escalator[1][0] + 1)))
22e4dfe3fc63f3cc1e81450b921e5c64c00e841e
18,343
def run_caffe_net_forward(net, names): """ :return: The network output is a list of predicted bounding boxes """ outs = net.forward(names) return outs
4cbe747d2214e1688326b05500558dbb95150d33
18,344
def _decorate_tree(t, series): """ Attaches some default values on the tree for plotting. Parameters ---------- t: skbio.TreeNode Input tree series: pd.Series Input pandas series """ for i, n in enumerate(t.postorder()): n.size = 30 if n.is_root(): n.size = 50 elif n.name == n.parent.children[0].name: n.color = '#00FF00' # left child is green else: n.color = '#FF0000' # right child is red if not n.is_tip(): t.length = series.loc[n.name] return t
2e7a14c19882938a6b6132c8a09764ab641f2be2
18,346
from functools import reduce def AddRelativeAndCumulativeDistributions( h ): """adds relative and cumulative percents to a histogram. """ if len(h) == []: return [] new_histogram = [] total = float(reduce( lambda x,y: x+y, map( lambda x: x[1], h))) cumul_down = int(total) cumul_up = 0 for bin,val in h: percent = float(val) / total cumul_up += val percent_cumul_up = float(cumul_up) / total percent_cumul_down = float(cumul_down) / total new_histogram.append( (bin, (val, percent, cumul_up, percent_cumul_up, cumul_down, percent_cumul_down)) ) cumul_down -= val return new_histogram
81f5868ba7b03fe34d7257850f91065070eaa400
18,348
import os def rootname(filename): """Return basename root without extensions.""" name = os.path.basename(filename) root, ext = os.path.splitext(name) while ext: root, ext = os.path.splitext(root) return root
628908dfe2ff4f64d97c5f87447e599795a2f885
18,349
def format_response(resp, body): """Format an http.client.HTTPResponse for logging.""" s = f'{resp.status} {resp.reason}\n' s = s + '\n'.join(f'{name}: {value}' for name, value in resp.getheaders()) s = s + '\n\n' + body return s
5866723df307a66a710bb105dc31ed9662780d78
18,350
def public(func): """Do not require authentication on given view method.""" msg = "*Does not require authentication*\n" doc = func.__doc__ prefix = doc[:len(doc) - len(doc.lstrip())] func.__doc__ = prefix + msg + doc func.needs_auth = False return func
bc93d9c250d8a285439904355a7083e62b877b8e
18,351
import struct def read_entry(f): """ Read a single entry of trace file f. """ kinds={0:'loads', 1:'stores', 2:'atomics'} mark = f.read(1) # kernel start mark if mark == b'\x00': buf = f.read(2) size, = struct.unpack('H', buf) kernelname = f.read(size).decode('utf-8') return 'kernel', { 'name': kernelname, } # single access mark elif mark == b'\xff': buf = f.read(24) f1, f2, f3 = struct.unpack('QQQ', buf) smid = (f1 >> 32) type = (f1 & 0xf0000000) >> 28 size = (f1 & 0x0fffffff) addr = f2 ctax = (f3 >> 32) & 0xffffffff ctay = (f3 >> 16) & 0x0000ffff ctaz = (f3) & 0x0000ffff return 'record', { 'smid': smid, 'size': size, 'type': type, 'kind': kinds[type], 'addr': addr, 'cta': (ctax, ctay, ctaz), 'count': 1, } # run-length encoded access mark elif mark == b'\xfe': buf = f.read(24 + 2) f1, f2, f3, count = struct.unpack('QQQH', buf) smid = (f1 >> 32) type = (f1 & 0xf0000000) >> 28 size = (f1 & 0x0fffffff) addr = f2 ctax = (f3 >> 32) & 0xffffffff ctay = (f3 >> 16) & 0x0000ffff ctaz = (f3) & 0x0000ffff return 'record', { 'smid': smid, 'size': size, 'type': type, 'kind': kinds[type], 'addr': addr, 'cta': (ctax, ctay, ctaz), 'count': count, } # eof reached elif mark == b'': return 'eof', None else: return 'invalid', { 'byte': mark, 'pos': f.tell() }
195d53ed045fd062dce2c6b4257573731d5acd30
18,353
def roh_air( dem, tempka): """ calculates the Atmospheric Air Density. This is found in Bastiaanssen (1995). /* Atmospheric Air Density * Requires Air Temperature and DEM*/ """ b = (( tempka - (0.00627 * dem )) / tempka ) result = 349.467 * pow( b , 5.26 ) / tempka if (result > 1.5): result = -999.99 elif (result < 0.0): result = -999.99 return result
b837baf3a0123af27a75587aed7552f6d58642ad
18,354
from typing import Tuple def _should_include(key: str, split_range: Tuple[float, float]) -> bool: """ Hashes key to decimal between 0 and 1 and returns whether it falls within the supplied range. """ max_precision_order = 10000 decimal_hash = (hash(key) % max_precision_order) / max_precision_order return split_range[0] < decimal_hash <= split_range[1]
28cb494f5ca4681d04d3568b0c5e74a1e4d28ee3
18,355
def extendedEuclideanAlgorimth(a, b): """ Compute the coefficients of the bezout's identity. Bezout's identity: for integer a and b exist integers x and y such that a * x + b * y = d, where d = gcd(a, b). Parameters ---------- a: int One of the numbers used for compute the coefficients of the bezout's identity b: int One of the numbers used for compute the coefficients of the bezout's identity Returns ------- out: tuple The coefficientes of the bezout's identity """ r0, r1 = a, b s0, s1 = 1, 0 t0, t1 = 0, 1 while r1 != 0: quotient = r0 // r1 r0, r1 = r1, r0 - quotient * r1 s0, s1 = s1, s0 - quotient * s1 t0, t1 = t1, t0 - quotient * t1 return s0, t0
37f5cd45254e358bc0c0c267d2b1d77351d04711
18,356
import torch def get_knots(start, end, n_bases=5, spline_order=3): """ Arguments: x; torch.tensor of dim 1 """ x_range = end - start start = start - x_range * 0.001 end = end + x_range * 0.001 # mgcv annotation m = spline_order - 1 nk = n_bases - m # number of interior knots dknots = (end - start) / (nk - 1) knots = torch.linspace( start=start - dknots * (m + 1), end=end + dknots * (m + 1), steps=nk + 2 * m + 2 ) return knots.float()
824cac0f71a1790698c14956db074a484417de9a
18,357
def make_vlan_name(parent, vlan_id): """ Create a VLAN name. Parameters ---------- parent : str The parent interface. vlan_id : The vlan id. Returns ------- str The VLAN name. """ return '{}.{}'.format(parent, vlan_id)
52793977737792726066de674d3da854bd3cf129
18,358
def index_to_coord(index): """Returns relative chunk coodinates (x,y,z) given a chunk index. Args: index (int): Index of a chunk location. """ y = index // 256 z = (index - y * 256) // 16 x = index - y * 256 - z * 16 return x, y, z
35c3aa7efdd9d820c1e8ca1e269d74a4b7d082ca
18,359
import math def diagonal(n: int, p: int) -> int: """ We want to calculate the sum of the binomial coefficients on a given diagonal. The sum on diagonal 0 is 8 (we'll write it S(7, 0), 7 is the number of the line where we start, 0 is the number of the diagonal). In the same way S(7, 1) is 28, S(7, 2) is 56. :param n: n is the line where we start and :param p: p is the number of the diagonal :return: the sum of the binomial coefficients on a given diagonal """ if n < 0: raise ValueError('ERROR: invalid n ({}) value. n must be >= 0') if p < 0: raise ValueError('ERROR: invalid p ({}) value. p must be >= 0') #print('\nn: {}, p: {}'.format(n, p)) #combinations = list() result = math.factorial(n + 1) // (math.factorial(p + 1) * math.factorial(n - p)) #for row in range(p, n + 1): #temp = list() #for col in range(p, row + 1): #number = math.factorial(row) // (math.factorial(col) * math.factorial(row - col)) #temp.append(number) #print('row: {}, col: {}, number: {} '.format(row, col, number), end='') #print(temp) #result += temp[0] #number = math.factorial(row) // (math.factorial(p) * math.factorial(row - p)) #print(' ', number, end='') #result += number #combinations.append(temp) #print() #length: int = len('{}'.format(combinations[-1])) #for row in combinations: #space = ' ' * ((length - len('{}'.format(row))) // 2) #print('{}{}'.format(space, row)) return result
2a8545851c407cbee0eb839bc5c8507d52722c33
18,360
import torch def orthogonal_random_matrix_(rows, columns, device): """Generate a random matrix whose columns are orthogonal to each other (in groups of size `rows`) and their norms is drawn from the chi-square distribution with `rows` degrees of freedom (namely the norm of a `rows`-dimensional vector distributed as N(0, I)). """ w = torch.zeros([rows, columns], device=device) start = 0 while start < columns: end = min(start+rows, columns) block = torch.randn(rows, rows, device=device) norms = torch.sqrt(torch.einsum("ab,ab->a", block, block)) Q, _ = torch.qr(block) # Q is orthonormal w[:, start:end] = ( Q[:, :end-start] * norms[None, :end-start] ) start += rows return w
bfd40861fb78ecebe7cfceeaf5ab32044cf1a8dc
18,361
def parse_pairs(pairs): """Parse lines like X=5,Y=56, and returns a dict.""" # ENST00000002501=0.1028238844578573,ENST00000006053=0.16846186988367085, # the last elem is always "" data = {x.split("=")[0]: x.split("=")[1] for x in pairs.split(",")[:-1]} return data
f39c329e74ec0e749754386cbffbea186ba43a3c
18,363
def to_snake_case(camel_str): """ From https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case Note that this function doesn't cover all cases in general, but it covers ours. """ return ''.join(['_'+c.lower() if c.isupper() else c for c in camel_str]).lstrip('_')
43737481fc871051dc816c969d51174f84c29e3d
18,364
def symbol_constructor(loader, node): """YAML constructor for Ruby symbols. This constructor may be registered with '!ruby/sym' tag in order to support Ruby symbols serialization (you can use :py:meth:`register_constructors` for that), so it just need return the string scalar representation of the key. """ return loader.construct_scalar(node)
5539c9f8043da419eebb35a41744338ccf7c6636
18,365
def drop_peaks(dataframe,data,cutoff): """ Filters out the peaks larger than a cut-off value in a dataseries Parameters ---------- dataframe : pd.DataFrame dataframe from which the peaks need to be removed data : str the name of the column to use for the removal of peak values cutoff : int cut off value to use for the removing of peaks; values with an absolute value larger than this cut off will be removed from the data Returns ------- pd.DataFrame dataframe with the peaks removed """ dataframe = dataframe.drop(dataframe[abs(dataframe[data]) > cutoff].index) return dataframe
d645b8519a7da9d4dff588ccc1bbf50ecc801d34
18,366
import argparse def _parser(): """Take care of all the argparse stuff. :returns: the args """ parser = argparse.ArgumentParser(description='Xml generator for tapas submission') parser.add_argument("fname", help='Input file name', type=str) parser.add_argument("-o", "--output_file", help='Output file', type=str, default="tapas_request.xml") parser.add_argument("-l", "--listspectra", action="store_true", help="Was filename a DRACS list of nod spectra for the observation (without fits extensions)") parser.add_argument("-r", "--resolvpower", default=False, help="Specify Instrument resolution power, defined as λ/FWHM for convolution") parser.add_argument("-s", "--sampling", type=int, default=10, help=("Sampling ratio - This is the number of points per FHWM interval on which the" "convolved transmission will be sampled.")) parser.add_argument("-f", "--instrument_function", help="Instrument function - gaussian or none", type=str, default="gaussian", choices=["none", "gaussian"]) parser.add_argument("-u", "--unit", help="Spectra Unit", choices=["air", "vacuum", "wavenumber"], type=str, default="vacuum") parser.add_argument("-b", "--berv", help="Have BERV RV correction applied to the Tapas spectra", action="store_true") parser.add_argument("-t", "--tapas_format", help="Tapas file format", type=str, default="ascii", choices=["ascii", "fits", "netcdf", "vo"]) parser.add_argument("-c", "--constituents", help="Atmospheric constituents for spectra", type=str, default="all", choices=["all", "ray", "h2o", "o2", "o3", "co2", "ch4", "n2o", "not_h2o"]) parser.add_argument("-i", "--request_id", type=int, default=0, help=("Request ID number, I use want to change from species identification." " E.g. 10=all, 11=ray, 12=h20 etc.")) parser.add_argument("--wl_min", help="Minimum Wavelength", default=False) parser.add_argument("--wl_max", help="Maximum Wavelength", default=False) parser.add_argument("-n", "--request_number", help="Tapas request number. Iterate on previous request number.", type=int) parser.add_argument("-v", "--verbose", action="store_true", help="Turn on verbosity.") args = parser.parse_args() return args
37c1183fd77235944bd06c5ae5e630d50edccccb
18,367
def decap(df, neck, end_func=None): """ Separate the head from the body of a dataframe """ if end_func is None: end_func = lambda h, b: len(b) - 1 head = df.iloc[:neck] body = df.iloc[neck:] end = end_func(head, body) body = body[:end] return head, body
06da0736ce37308a2794722ddc502789fd9a0a5e
18,368
import imghdr def what(pic_filename): """ 获得图片的真实类型 :param pic_name: 图片文件名 :return: """ return imghdr.what(pic_filename)
ab57fb251457abc692c829b7eb34fef7dbeb0ac9
18,372
import os def get_branch(default="master"): """ Derive the selected branch. We first look to the environment variable for INPUT_BRANCH, meaning that the user set the branch variable. If that is unset we parse GITHUB_REF. If both of those are unset, then we default to default (master). Returns: (str) the branch found in the environment, otherwise master. """ # First check goes to use setting in action branch = os.getenv("INPUT_BRANCH") if branch: return branch # Second check is for GITHUB_REF branch = os.getenv("GITHUB_REF") if branch: branch = branch.replace("refs/heads/", "") return branch return default
6f85a83476500ce927b4f580d91d61564606485a
18,373
def get_maximal_alignment(address): """ Calculate the maximal alignment of the provided memory location. """ alignment = 1 while address % alignment == 0 and alignment < 256: alignment *= 2 return alignment
efd24a7030b968d9f7630390077b3a0642fd30a2
18,374
import logging def create_logger(log_name, log_file): """ This function is used to generate logger for the whole program :param log_name: The name of the logger :param log_file: The output log file to save logging :return: logger """ logger = logging.getLogger(log_name) logger.setLevel(level=logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') stream_handlers = logging.StreamHandler() # 往屏幕上输出 stream_handlers.setFormatter(formatter) logger.addHandler(stream_handlers) file_handler = logging.FileHandler(log_file) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
461cd0f186d129cf011387e0d5dd70689dc1409e
18,375
def intersection(iterableA, iterableB, key=lambda x: x): """Return the intersection of two iterables with respect to `key` function. Used to compare to set of strings case insensitively. """ def unify(iterable): d = {} for item in iterable: d.setdefault(key(item), []).append(item) return d A, B = unify(iterableA), unify(iterableB) return [(A[k], B[k]) for k in A if k in B]
1e38162dc7508e5621618b089d64be8ae4b57669
18,378
def get_location_from_action_name(action): """Return the location from the name of the action""" if action.startswith('at_'): location_name = action[len('at_'):] elif action.startswith('go_to_'): location_name = action[len('go_to_'):] elif action.startswith('look_at_'): location_name = action[len('look_at_'):] else: location_name = None return location_name
c918630b03c4b233d6b976ebb31fd295a94dee2b
18,380
import torch def array_to_tensor(array): """ Converts a numpy.ndarray (N x H x W x C) to a torch.FloatTensor of shape (N x C x H x W) OR converts a nump.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W) """ if array.ndim == 4: # NHWC tensor = torch.from_numpy(array).permute(0,3,1,2).float() elif array.ndim == 3: # HWC tensor = torch.from_numpy(array).permute(2,0,1).float() else: # everything else tensor = torch.from_numpy(array).float() return tensor
8fe8bfcbad60c3b14fa7390af2051ebaf2d44021
18,381
import itertools def subsequences(iterable, seq=2): """Return subsequences of an iterable Each element in the generator will be a tuple of `seq` elements that were ordered in the iterable. """ iters = itertools.tee(iterable, seq) for i, itr in enumerate(iters): for _ in range(i): next(itr, None) return zip(*iters)
da436028d37f74729a2b5c2dfb74716da61efb2d
18,382
def sign(x): """ Return 1 if x is positive, -1 if it's negative, and 0 if it's zero. """ if x > 0: return 1 elif x < 0: return -1 else: return 0
7625903a16419c8914b92c2c1273c34bd646d9d2
18,383
def copy_metadata(nb_data): """Copy metadata of notebook Args: nb_data (JSON): a json data load from jupyter notebook Returns: dict: metadate copied from nb_data """ metadata = dict() metadata["metadata"] = nb_data["metadata"] metadata["nbformat"] = nb_data["nbformat"] metadata["nbformat_minor"] = nb_data["nbformat_minor"] return metadata
bfd0c0e53097b4a47150b5a2d7a35fabbcc03098
18,387