content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def TestSuccessful(test_output, test_result): """Returns true if the return value of CompareResults is a test success.""" if test_result.has_key('error'): return (False, test_result['error']) if not test_output.has_key('image_file'): return (False, 'No output image file produced.') if not test_result.has_key('expected_image_file'): return (False, 'No expected image results file found. For Steel, all ' 'tests must have associated "*-expected.png" files.') if not test_result.has_key('image_diff_file'): return (False, 'Could not diff expected/actual images because they had ' 'different dimensions or formats.') if test_result['diff_num_non_zero_pixels'] > 0: return (False, 'Image diff failed, there is at least one pixel difference') return (True, '')
dcbd38e4e1b025226e349ccfbd380ae35156dc0b
169,626
def get_dim_wind(self): """Get the two first dimension of the winding matrix Parameters ---------- self : Winding A Winding object Returns ------- (Nrad, Ntan): tuple Number of layer in radial and tangential direction """ return (1, 1)
7657697dd8e26d20fd1ce370f7aee7a0eeff6328
587,443
def normalize(tokens, case=False): """ Lowercases and turns tokens into distinct words. """ tokens = [ str(t).lower() if not case else str(t) for t in tokens ] tokens = [t.strip() for t in tokens if len(t.strip()) > 0] return tokens
998cfb6632db2830700ff71698e2d303fdc22718
379,637
def get_vocabulary_info(feature_info, file_io): """ Extract the vocabulary (encoding and values) from the stated vocabulary_file inside feature_info. Parameters ---------- feature_info : dict Dictionary representing the configuration parameters for the specific feature from the FeatureConfig file_io : FileIO object FileIO handler object for reading and writing files Returns ------- vocabulary_keys : list values of the vocabulary stated in the vocabulary_file. vocabulary_ids : list corresponding encoding ids (values of the vocabulary_keys). Notes ----- Args under `feature_layer_info` vocabulary_file : str path to vocabulary CSV file for the input tensor containing the vocabulary to look-up. uses the "key" named column as vocabulary of the 1st column if no "key" column present. max_length : int max number of rows to consider from the vocabulary file. if null, considers the entire file vocabulary. default_value : int default stated value in the configure used to replace missing data points. """ args = feature_info.get("feature_layer_info")["args"] vocabulary_df = file_io.read_df(args["vocabulary_file"]) if "key" in vocabulary_df.columns: vocabulary_keys = vocabulary_df["key"] else: vocabulary_keys = vocabulary_df.iloc[:, 0] if "max_length" in args: vocabulary_keys = vocabulary_keys[: args["max_length"]] if "default_value" in feature_info: vocabulary_keys = vocabulary_keys.fillna(feature_info["default_value"]) vocabulary_keys = vocabulary_keys.values if "dropout_rate" in args: # NOTE: If a dropout_rate is specified, then reserve 0 as the OOV index vocabulary_ids = ( vocabulary_df["id"].values if "id" in vocabulary_df else list(range(1, len(vocabulary_keys) + 1)) ) if 0 in vocabulary_ids: raise ValueError( "Can not use ID 0 with dropout. Use categorical_embedding_with_vocabulary_file instead." ) else: vocabulary_ids = ( vocabulary_df["id"].values if "id" in vocabulary_df else list(range(len(vocabulary_keys))) ) return vocabulary_keys, vocabulary_ids
4e336995c8364b23da325c17e1a63fe2ea69dc27
578,004
import torch def _get_intermediate_features(resnet, input_): """ Receives a ResNet50 network and a valid input and extracts intermediate features. Extracts features right after each conv or fc layer (plus one right before the last fc) so 51 in total. Arguments: resnet (nn.Module): A ResNet50 from torchvision.models. input_ (torch.Tensor): Input to the Resnet (N x C x H x W) Returns: x (torch.Tensor): Output of the resnet (same as resnet(input_)) features (list): A list with 51 feature representations. Each element is a (N x num_features). Note: To make sense of it, check the original Resnet50 forward. I essentially just copied everything here. """ features = [] x = resnet.conv1(input_) features.append(x.mean(dim=(-1, -2)).cpu().numpy()) x = resnet.bn1(x) x = resnet.relu(x) x = resnet.maxpool(x) for layer in [resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4]: for bottleneck in layer: identity = x out = bottleneck.conv1(x) features.append(out.mean(dim=(-1, -2)).cpu().numpy()) out = bottleneck.bn1(out) out = bottleneck.relu(out) out = bottleneck.conv2(out) features.append(out.mean(dim=(-1, -2)).cpu().numpy()) out = bottleneck.bn2(out) out = bottleneck.relu(out) out = bottleneck.conv3(out) features.append(out.mean(dim=(-1, -2)).cpu().numpy()) out = bottleneck.bn3(out) if bottleneck.downsample is not None: identity = bottleneck.downsample(x) out += identity out = bottleneck.relu(out) #return out x = out x = resnet.avgpool(x) x = torch.flatten(x, 1) features.append(x.cpu().numpy()) x = resnet.fc(x) features.append(x.cpu().numpy()) return x, features
5c31e45e92bf1775d9c5fd20fd2157c293fb0297
359,015
def combin(n, k): """Number of combinations C(n,k)""" if k > n // 2: k = n - k x = 1 y = 1 i = n - k + 1 while i <= n: x = (x * i) // y y += 1 i += 1 return x
7d7ce529652170dc8da14a5370c11e8b2ea0f721
251,725
from typing import Any from typing import Iterable import itertools def prefix(val: Any, it: Iterable): """Add a value to the beginning of an iterable. Return an iterable. >>> tuple(prefix(1, (2, 3, 4))) (1, 2, 3, 4) """ return itertools.chain((val,), it)
c43d232fea9e4d99f0d21b12b7d0cd56770e1692
397,007
def until(n, filter_func, v): """Build a list: list( filter( filter_func, range(n) ) ) >>> list( filter( lambda x: x%3==0 or x%5==0, range(10) ) ) [0, 3, 5, 6, 9] >>> until(10, lambda x: x%3==0 or x%5==0, 0) [0, 3, 5, 6, 9] """ if v == n: return [] if filter_func(v): return [v] + until(n, filter_func, v+1) else: return until(n, filter_func, v+1)
1996d5a27f964267122604937fa1163fa0da20cd
664,441
import six import codecs def normalise_charset_name(s): """Convert an encoding name to the form implied in the SGF spec. In particular, normalises to 'ISO-8859-1' and 'UTF-8'. Raises LookupError if the encoding name isn't known to Python. """ if not isinstance(s, six.text_type): s = s.decode('ascii') return (codecs.lookup(s).name.replace("_", "-").upper() .replace("ISO8859", "ISO-8859"))
5aa0303376231577aed7af03f9224c899adf0c38
530,406
import torch def CE_loss(x, y): """Computes the Cross Entropy loss of the given prediction x and target y""" return torch.nn.functional.binary_cross_entropy(x, y)
25d1f5362f2e1ab86fe11410193ff3957c874299
406,951
def create_daily_schedule_details(api, api_exception, custom_interval, start_time): """ Creates a ScheduleDetails object for use with a scheduled task for daily execution using a custom interval. :param api: The Deep Security API modules. :param api_exception: The Deep Security API exception module. :param custom_interval: The interval between each run. For example, '2' will start a run every second day. :param start_time: The epoch time in milliseconds when the scheduled task first runs. :return: A ScheduleDetails object. """ # Create a ScheduleDetails object and set the recurrence type daily_schedule = api.ScheduleDetails() daily_schedule.recurrence_type = "daily" # Specify when the task runs daily_schedule_parameters = api.DailyScheduleParameters() # Use a custom frequency type to run the task at daily intervals. # Every day and only weekdays are other available frequency types. daily_schedule_parameters.frequency_type = "custom" daily_schedule_parameters.custom_interval = custom_interval daily_schedule_parameters.start_time = start_time # Add the schedule parameters to the schedule details daily_schedule.daily_schedule_parameters = daily_schedule_parameters return daily_schedule
80dab4ccb9141bd6da53d907a4a55cdaf52c1950
385,321
def factorial( of: int, down_to: int = 0 ) -> int: """ Returns the multiplication of all positive integers from ``of`` down to (but not including) ``down_to``. :param of: The greatest positive integer to include in the product. :param down_to: The greatest positive integer, less than ``of``, to exclude from the product. :return: The factorial of ``of`` down to ``down_to``. If ``of`` equals ``down_to``, the result is ``of``. """ if down_to < 0: raise ArithmeticError(f"'down_to' cannot be less than 0, got {down_to}") if of < down_to: raise ArithmeticError(f"'of' must be at least 'down_to', got 'of = {of}, 'down_to' = {down_to}") result = 1 while of > down_to: result *= of of -= 1 return result
72f50fb3359ecaf828991a29008ebded2f25c089
384,997
def remove_suffix(name): """ Remove suffix from given name string @param name: string, given name string to process @return: string, name without suffix """ # By convention, we split the names when we find a _ character edits = name.split('_') # If there is not a _ it means that there is not an underscore and we return the full name if len(edits) < 2: return name # The suffix will be _ + the last split string and the name will be the complete name without the suffix suffix = '_' + edits[-1] name_no_suffix = name[:-len(suffix)] return name_no_suffix
c54a5902fae1d384a333e3610aac67900d388838
562,826
from typing import Optional def try_parse_int(s:str) -> Optional[int]: """ Convert `s` to an integer, if possible Parameters ---------- s : string A string which may represent an integer Returns ------- int_s : int An integer --- OR --- None If `s` cannot be parsed into an `int`. """ try: return int(s) except ValueError: return None
eb851aa2fdaf85367b6695b367d33df9dd12f80c
211,879
import re import logging def get_device(device: str): """Get device (cuda and device order) from device name string. Args: device: Device name string. Returns: Tuple[bool, Optional[int]]: A tuple containing flag for CUDA device and CUDA device order. If the CUDA device flag is `False`, the CUDA device order is `None`. """ # obtain device device_num = None if device == 'cpu': cuda = False else: # match something like cuda, cuda:0, cuda:1 matched = re.match(r'^cuda(?::([0-9]+))?$', device) if matched is None: # load with CPU logging.warning('Wrong device specification, using `cpu`.') cuda = False else: # load with CUDA cuda = True device_num = int(matched.groups()[0]) if device_num is None: device_num = 0 return cuda, device_num
df60ac6a7a5f17c3d2454e67e02ef38804bed6c1
100,939
from typing import List import ast def _ast_for_fiddler(func_name: str, param_name: str, body: List[ast.AST]) -> ast.FunctionDef: """Returns an `ast.FunctionDef` for the fiddler function. Args: func_name: The name of the fiddler function. param_name: The name of the fiddler function's parameter. body: The body of the fiddler function. """ return ast.FunctionDef( name=func_name, args=ast.arguments( args=[ast.arg(arg=param_name)], posonlyargs=[], kwonlyargs=[], kw_defaults=[], defaults=[]), body=body, decorator_list=[])
8c531c9bb931564560cd4bdb9db9862c9d6e7523
557,825
from typing import List def opt_filter_string(filter_string: str, id_list: List[int]): """ Generate string for option-based filter. Generates a filter for filtering for specific rows based on the unique element ID that will be added to the main SQL query. Args: filter_string: Start of the filter string containing the column. id_list: List of element ID that will be filtered for. Returns: filter_string: The formatted options filter string. """ id_list_string = ", ".join([str(element_id) for element_id in id_list]) return(filter_string + f"({id_list_string})")
6ef5680d95bc2ac3e894002e599b11ba2c634366
645,620
def get_reasonable_repetitions(natoms): """ Choose the number of repetitions according to the number of atoms in the system """ if natoms < 4: return [3, 3, 3] if 4 <= natoms < 15: return [2, 2, 2] if 15 <= natoms < 50: return [2, 2, 1] return [1, 1, 1]
96d19519a86afd31f7771c4ac3c5e0e4104ba457
511,972
import inspect def _get_fit_parameter_names(fn): """ Get the name of all but the first parameter to the given function """ return list(inspect.signature(fn).parameters.keys())[1:]
cf04daa19863746d06cf2c8dc38e70103662af27
361,529
from pathlib import Path import re def sanitize_path(path, subchar='-', invalid_chars=r'[^a-zA-Z0-9-_.]', invalid_comps=('.', '..', '.git'), force_suffix='.git'): """Sanitizes a path by making it relative and git safe. Any part coming in will be made relative first (by cutting leading slashes). Invalid characters (see ``invalid_chars``) are removed and replaced with ``subchar``. A suffix can be forced upon the path as well. :param path: Path to sanitize (string or Path). :param subchar: Character used to substitute illegal path components with. :param invalid_chars: A regular expression that matches invalid characters. :param invalid_comps: A collection of path components that are not allowed. :param force_suffix: The suffix to force onto each path. :return: A Path instance denoting a relative path. """ unsafe = Path(path) # turn absolute path into a relative one by stripping the leading '/' if unsafe.is_absolute(): unsafe = unsafe.relative_to(unsafe.anchor) # every component must be alphanumeric components = [] for p in unsafe.parts: # remove invalid characters clean = re.sub(invalid_chars, subchar, p) # if the name is empty, ignore it. this usually shouldn't happend with # pathlib if not clean: continue # if the name is potentially dangerous, reject it if clean in invalid_comps: raise ValueError('{} is a reserved path component'.format(clean)) components.append(clean) if not components: raise ValueError('Path too short') # append a final suffix if not present if force_suffix and not components[-1].endswith(force_suffix): components[-1] += force_suffix return Path(*components)
b2722acbde15588944dd395631b5dde7f917aedc
289,725
import torch def any_nan(tensor: torch.Tensor) -> bool: """Returns true if the tensor contains a NaN Args: tensor (torch.Tensor): the input tensor Returns: bool: true if contains a NaN """ return bool(torch.isnan(tensor).any().item())
ba14b4efac9930b5dd67f32a7e38ec1db049e714
499,619
def reSubObject(pattern, string, repl=None): """ like re.sub, but replacements don't have to be text; returns an array of alternating unmatched text and match objects instead. If repl is specified, it's called with each match object, and the result then shows up in the array instead. """ lastEnd = 0 pieces = [] for match in pattern.finditer(string): pieces.append(string[lastEnd : match.start()]) if repl: pieces.append(repl(match)) else: pieces.append(match) lastEnd = match.end() pieces.append(string[lastEnd:]) return pieces
3e59d54a7a28f5793df71be53cbd39ddd3248548
72,721
def convert_value(var): """Convert the metric value from string into python type.""" if var["type"] == "number": return float(var["value"]) if var["type"] == "boolean": return var["value"] == "true" if var["type"] == "string": return var["value"] print("can't convert unknown type {} for var {}".format(var["type"], var["name"])) return None
52dc9ebe2f66de7638dc8f5ab8261c1e2f76e199
61,546
from typing import Union from pathlib import Path def pathing(path: Union[str, Path], new: bool = False) -> Path: """ Guarantees correct expansion rules for pathing. :param Union[str, Path] path: path of folder or file you wish to expand. :param bool new: will check if distination exists if new (will check parent path regardless). :return: A pathlib.Path object. :example: >>> pathing('~/Desktop/folderofgoodstuffs/') /home/user/Desktop/folderofgoodstuffs """ path = Path(path) # Expand tilda shortened path or local path. if str(path)[0] == '~': path = path.expanduser() else: path = path.absolute() # # Make sure new paths don't existing. # Make sure existing paths actually exist. if new: if not path.parent.exists(): raise ValueError(f'ERROR ::: Parent directory of {path} does not exist.') if path.exists(): raise ValueError(f'ERROR ::: {path} already exists!') else: if not path.exists(): raise ValueError(f'ERROR ::: Path {path} does not exist.') return path
cc65980464cbb07c9fbea39d95e824ae2f1488ee
291,160
from typing import OrderedDict def _remove_duplicates(input_list): """Remove duplicate entries from a list. This operation preserves the order of the list's elements. Args: input_list (list[Hashable]): The list whose duplicate entries shall be removed Returns: list[Hashable]: The input list without duplicate entries """ return list(OrderedDict.fromkeys(input_list))
9613674ceb137eb1a46a394efda1617593203888
595,810
def unique_wrt(reqs, main_reqs): """Ensures reqs doesn't contain anything in main_reqs.""" return list(set(reqs) - set(main_reqs))
48be213a6a8237b9b498ce8272603b2f734e0035
283,453
def blend(arr1, arr2, alpha=0.5): """Blend 2 arrays together, mixing with alpha. Parameters ---------- arr1:array Image 1. arr2:array Image 2. alpha:float Higher alpha makes image more like image 1. Returns ------- array Resulting image. """ return alpha*arr1 + (1.-alpha)*arr2
ad94bde493d7f0bcb6e0207992a9b7106483de08
342,942
def wbi(b2, b4): """ Water Body Index (Domenech and Mallet, 2014). .. math:: WBI = (b2 - b4) / (b2 + b4) :param b2: Blue. :type b2: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :returns WBI: Index value .. Tip:: Domenech, E., Mallet, C. 2014. Change Detection in High \ resolution land use/land cover geodatabases (at object level). \ EuroSDR official publication, 64. """ WBI = (b2 - b4) / (b2 + b4) return WBI
40b82a3011f151cbf32158b2a7354cdbb8498353
417,907
def perfect_inserter(t, keys): """Insert keys into tree t such that t is perfect. Args: t (BinaryTree): An empty tree. keys (list): A sorted list of keys. """ def f(n): """find the point so partition n keys for a perfect tree""" # x = 1 # while x <= n//2: # x *= 2 x = 1 << (n.bit_length() - 1) if x//2 - 1 <= (n-x): return x - 1 else: return n - x//2 n = len(keys) if n == 0: return else: x = f(n) t.insert(keys[x]) perfect_inserter(t, keys[:x]) perfect_inserter(t, keys[x+1:])
aac46a29e52dc2dcf7ef20cd0580488d4312a906
385,750
def parse_modifiers(modifiers): """Parse modifiers(regex match object) into type sets then return them.""" weaknesses, immunities = set(), set() sections = modifiers.group(1).split(';') for section in sections: split = [w.strip(',') for w in section.strip().split(' ')] for word in split[2:]: if split[0] == 'weak': weaknesses.add(word) else: immunities.add(word) return (weaknesses, immunities)
1a144fa6317e58a1de7d302fe17ca70aba61bd1b
94,059
def _rectify_column(df): """remove string spaces from the column names of the PNBOIA dataframe Parameters ---------- df : pandas dataframe Returns ------- Pandas dataframe """ df1 = df.copy(deep=True) k = {i: i.replace(' ', '') for i in df.keys()} df1 = df1.rename(columns=k) return df1
2e3e66fe5f9d7746c17fc19a2a6747f484ea05c7
429,297
def cmpid(a, b): """Compare two objects by their Python id.""" if id(a) > id(b): return 1 if id(a) < id(b): return -1 return 0
f698d8e757418d9a2a6baa6596d6643b02e2a336
126,475
def serialise_roles(Member): """Turns a list of roles in a comma-seperated string.""" return ",".join([Role.name for Role in Member.roles])
a4c98513af25d65f949339d27ae6383fa5e89d34
436,056
def std_average(four_month_std): """ A function that computes the std specific average. This is just the average :param: four_month_std A series with the historical % change std :return: float The average """ return four_month_std.mean()
ae76aa1ef8400595c72a7e58e2e883f0fde2a7be
572,169
def check_file_extension(filename, extension_list): """ Check if the extension file match with the authorized extension list :param string filename: Filename to check :param list extension_list: The list of extension the file have to match to :return: True if the extension is correct, False otherwise """ if len(filename.split('.')) > 1: extension = filename.split('.')[-1] if extension in extension_list: return True else: return False else: return False
91f184fc940b8d71c77ebd56a5d229a864906421
634,639
import hashlib def calculate_hash(file_path: str, block_size: int = 10240, hash_content=True): """ Helper function that calculates the hash of a file/folder. If the given path is a folder: the hash of the string absolute path will be calculated If the given path is a file: the hash of the binary content will be calculated :param file_path: complete path to file with extension :param block_size: :param hash_content: Hash the file content (True) or the file_path string (False) :return: """ hash_sum = hashlib.md5() if not hash_content: hash_sum.update(file_path.encode()) else: with open(file_path, "rb") as f: for block in iter(lambda: f.read(block_size), b""): hash_sum.update(block) return hash_sum.hexdigest()
bc2d2fafe61ad5747ee89f36820964a175367cef
207,261
def weather_scale(wx_type): """ returns an integer from 1-5 for a relative scale of the various weather descriptions more documents here https://www.visualcrossing.com/resources/documentation/weather-api/weather-condition-fields/""" wx={ 'type_43': {'type': 43, 'description':'Clear', 'rating': 0}, 'type_27': {'type': 27, 'description':'Sky Coverage Decreasing', 'rating': 0}, 'type_28': {'type': 28, 'description':'Sky Coverage Increasing', 'rating': 0}, 'type_29': {'type': 29, 'description':'Sky Unchanged', 'rating': 0}, 'type_42': {'type': 42, 'description':'Partially cloudy', 'rating': 0}, 'type_2': {'type': 2, 'description': 'Drizzle', 'rating': 1}, 'type_3': {'type': 3, 'description': 'Heavy Drizzle', 'rating': 1}, 'type_4': {'type': 4, 'description': 'Light Drizzle', 'rating': 1}, 'type_6': {'type': 6, 'description': 'Light Drizzle/Rain', 'rating': 1}, 'type_26': {'type': 26, 'description':'Light Rain', 'rating': 1}, 'type_41': {'type': 41, 'description':'Overcast', 'rating': 1}, 'type_1': {'type': 1, 'description':'Blowing Or Drifting Snow','rating': 2}, 'type_5': {'type': 5, 'description':'Heavy Drizzle/Rain', 'rating': 2}, 'type_8': {'type': 8, 'description':'Fog', 'rating': 2}, 'type_9': {'type': 9, 'description':'Freezing Drizzle/Freezing Rain', 'rating': 2}, 'type_11': {'type': 11, 'description':'Light Freezing Drizzle/Freezing Rain', 'rating': 2}, 'type_12': {'type': 12, 'description':'Freezing Fog', 'rating': 2}, 'type_14': {'type': 14, 'description':'Light Freezing Rain', 'rating': 2}, 'type_17': {'type': 17, 'description':'Ice', 'rating': 2}, 'type_18': {'type': 18, 'description':'Lightning Without Thunder', 'rating': 2}, 'type_19': {'type': 19, 'description':'Mist', 'rating': 2}, 'type_20': {'type': 20, 'description':'Precipitation In Vicinity', 'rating': 2}, 'type_23': {'type': 23, 'description':'Light Rain And Snow', 'rating': 2}, 'type_24': {'type': 24, 'description':'Rain Showers', 'rating': 2}, 'type_31': {'type': 31, 'description':'Snow', 'rating': 2}, 'type_32': {'type': 32, 'description':'Snow And Rain Showers', 'rating': 2}, 'type_33': {'type': 33, 'description':'Snow Showers', 'rating': 2}, 'type_35': {'type': 35, 'description':'Light Snow', 'rating': 2}, 'type_36': {'type': 36, 'description':'Squalls', 'rating': 2}, 'type_38': {'type': 38, 'description':'Thunderstorm Without Precipitation', 'rating': 2}, 'type_10': {'type': 10, 'description':'Heavy Freezing Drizzle/Freezing Rain', 'rating': 3}, 'type_13': {'type': 13, 'description':'Heavy Freezing Rain', 'rating': 3}, 'type_16': {'type': 16, 'description':'Hail Showers', 'rating': 3}, 'type_21': {'type': 21, 'description':'Rain', 'rating': 3}, 'type_22': {'type': 22, 'description':'Heavy Rain And Snow', 'rating': 3}, 'type_25': {'type': 25, 'description':'Heavy Rain', 'rating': 3}, 'type_34': {'type': 34, 'description':'Heavy Snow', 'rating': 3}, 'type_37': {'type': 37, 'description':'Thunderstorm', 'rating': 3}, 'type_40': {'type': 40, 'description':'Hail', 'rating': 3}, 'type_7': {'type': 7, 'description':'Duststorm', 'rating': 4}, 'type_15': {'type': 15, 'description':'Funnel Cloud/Tornado', 'rating': 4}, 'type_39': {'type': 39, 'description':'Diamond Dust','rating': 4}, 'type_30': {'type': 30, 'description':'Smoke Or Haze', 'rating': 4}, } number = [] description = [] for i in wx_type.split(','): rating = next(val for key, val in wx.items() if i.strip() == key) number.append(rating['rating']) description.append(rating['description']) return number, description
3f1810bc7eb82dc074808b9f42a82b9cc03aadfb
291,308
def stringtogether(alist): """Return elements in a list as a string.""" return ''.join(alist)
a71ddb73be255e1964bbfc3de40409986325eb2a
456,224
def calc_check_digits(number): """Calculate the check digits for the number.""" d1 = (11 - sum(((3 - i) % 8 + 2) * int(n) for i, n in enumerate(number[:12]))) % 11 % 10 d2 = (11 - sum(((4 - i) % 8 + 2) * int(n) for i, n in enumerate(number[:12])) - 2 * d1) % 11 % 10 return '%d%d' % (d1, d2)
6f882773a4c84a628004d57f51499d16bb66772a
232,161
def topological_sort(graph): """ Repeatedly go through all of the nodes in the graph, moving each of the nodes that has all its edges resolved, onto a sequence that forms our sorted graph. A node has all of its edges resolved and can be moved once all the nodes its edges point to, have been moved from the unsorted graph onto the sorted one. Parameters ---------- graph : dict Dictionary that has graph structure. Raises ------ RuntimeError If graph has cycles. Returns ------- list List of nodes sorted in topological order. """ sorted_nodes = [] graph_unsorted = graph.copy() while graph_unsorted: acyclic = False for node, edges in list(graph_unsorted.items()): if all(edge not in graph_unsorted for edge in edges): acyclic = True del graph_unsorted[node] sorted_nodes.append(node) if not acyclic: raise RuntimeError("A cyclic dependency occurred") return sorted_nodes
d67113e456f7c7a6f366bc216235dbca8c818f91
334,187
def _direction_to_index(direction): """Map direction identifier to index. """ directions = {-1: 0, 0: slice(None), 1: 1, '<=': 0, '<=>': slice(None), '>=': 1} if direction not in directions: raise RuntimeError('Unknown direction "{:d}".'.format(direction)) return directions[direction]
1612c7107601a6166e478815b89725701f8dc3ff
675,621
def duration(df): """Finds time duration of ECG strip This function subtracts the minimum time value from the maximum time value and returns the duration of the ECG strip. :param df: a dataframe of floats of ecg data :returns: a float of the duration of the ECG strip in seconds """ max_time = max(df['time']) min_time = min(df['time']) duration = max_time - min_time return duration
dab50a2aed4fb2ddc69cab75263c9c331bf1d437
122,009
def left_string(string, padding): """ Formats a string with padding. Either adds spaces to the end or truncates Args ---- string : str String for printing padding : int Width (in columns) for the final format Returns ------- : str If ``string`` is longer than padding, it will be trimmed with ellipses Example ------- :: left_string('A very long string', 10) # => 'A very ...' left_string('short', 10) # => 'short ' """ if len(string) > padding: return '{}...'.format(string[0:padding-3]) if len(string) == padding: return string if len(string) < padding: return '{string}{padding}'.format( string=string, padding=' '*(int(padding)-len(string)) )
5406614cf30a0d194547140c7ba28a52711d3867
271,570
def check_variable_exclusion(variable_name, ex_variables): """Checks whether the variable has to be excluded. Excluded variables are reported by the user in the .cfg file Arguments: variable_name {string} -- the variable name ex_variables {list} -- the list of excluded variables Returns: True -- if the variable is not excluded False -- is the variable is the .cfg file """ if variable_name in ex_variables: return False else: return True
b1e95c665e14450189d6aa1204e733d8b19e2ece
207,527
def _ravel_dictionary(dictionary, conflict): """ This function unravels a dictionary, un-nesting nested dictionaries into a single dictionary. If conflicts arise, then the conflict rule is used. The keys of dictionary entries that have dictionary values are discarded. Parameters ---------- dictionary : dictionary The dictionary to be unraveled. conflict : string The conflict rule. It may be one of these: * 'raise' If a conflict is detected, a sparrowcore.DataError will be raised. * 'superior' If there is a conflict, the least nested dictionary takes precedence. Equal levels will prioritize via alphabetical. * 'inferior' If there is a conflict, the most nested dictionary takes precedence. Equal levels will prioritize via anti-alphabetical. Returns ------- raveled_dictionary : dictionary The unraveled dictionary. Conflicts were replaced using the conflict rule. """ # Reaffirm that this is a dictionary. if (not isinstance(dictionary, dict)): dictionary = dict(dictionary) else: # All good. pass # Ensure the conflict is a valid conflict type. conflict = str(conflict).lower() if (conflict not in ('raise', 'superior', 'inferior')): raise RuntimeError("The conflict parameter must be one the " "following: 'raise', 'superior', 'inferior'.") # The unraveled dictionary. raveled_dictionary = dict() # Sorted current dictionary. This sorting helps # with priorities prescribed by `conflict`. sorted_dictionary = dict(sorted(dictionary.items())) for keydex, itemdex in sorted_dictionary.items(): # If this entry is a dictionary, then # recursively go through it like a tree search. if (isinstance(itemdex, dict)): temp_dict = _ravel_dictionary( dictionary=itemdex, conflict=conflict) else: # It is a spare item, create a dictionary. temp_dict = {keydex:itemdex} # Combine the dictionary, but, first, check for # intersection conflicts. if (len(raveled_dictionary.keys() & temp_dict.keys()) != 0): # There are intersections. Handle them based # on `conflict`. if (conflict == 'raise'): raise RuntimeError("There are conflicts in these two " "dictionaries: \n" "Temp : {temp} \n Ravel : {ravel}" .format(temp=temp_dict, ravel=raveled_dictionary)) elif (conflict == 'superior'): # Preserve the previous entries as they are # either higher up in the tree or are # ahead alphabetically. raveled_dictionary = {**temp_dict, **raveled_dictionary} elif (conflict == 'inferior'): # Preserve the new entires as they are # either lower in the tree or are behind # alphabetically. raveled_dictionary = {**raveled_dictionary, **temp_dict} else: # The code should not get here. raise RuntimeError("The input checking of conflict " "should have caught this.") else: # They can just be combined as normal. Taking superior # as the default. raveled_dictionary = {**temp_dict, **raveled_dictionary} # All done. return raveled_dictionary
162e2dea81e83009e3409e709516dbd47e51f5c3
281,304
def has_data(data: str) -> bool: """ Checks if the input contains data. If there are any non-whitespace characters then return True, else return False. Parameters: data: (string) input to check whether it contains data Returns: Boolean True if input string (data) contains non-whitespace characters, otherwise False """ return bool(data and not data.isspace())
d8eff13c3de2c933830e2acafc55c6ad39a671f8
320,691
def pad(data, blocksize=16): """ Pads data to blocksize according to RFC 4303. Pad length field is included in output. """ padlen = blocksize - len(data) % blocksize return bytes(data + bytearray(range(1, padlen)) + bytearray((padlen - 1,)))
f4fa160ab8f37f3156191b80545bc0664ddd44dc
249,007
def ipc_error_response(resp_data): """Make error response.""" response = ("error", resp_data) return response
78191c47af640cb25874d349ee96326458c4c82e
288,175
def GetAllDictPaths(tree_dict): """Obtain list of paths to all leaves in dictionary. The last item in each list entry is the value at the leaf. For items in dictionary that are a list of dictionaries, each list entry is indexed by a string repesenting its position in the list. Implementation inspired by https://stackoverflow.com/a/40555856. Args: tree_dict: Input dictionary. Returns: List of lists with all paths to leaf items in dictionary. Example: >> a = {'a':[1 , 2, 3], 'b':[{'c': 10}, {'d': 20}] } >> print get_all_dict_paths(a) [['a', [1, 2, 3]], ['b', '0', 'c', 10], ['b', '1', 'd', 20]] """ if isinstance(tree_dict, list): if isinstance(tree_dict[0], dict): return [[str(i)] + path for i, value in enumerate(tree_dict) for path in GetAllDictPaths(value)] else: return [[tree_dict]] elif not isinstance(tree_dict, dict): return [[tree_dict]] return [[key] + path for key, value in tree_dict.items() for path in GetAllDictPaths(value)]
be291f58c7b2a3fe63fc4e8b8e9b2fd57de45b5e
66,734
def first_negative(l): """ Returns the first negative element in a given list of numbers. """ for x in l: if x < 0: return x return None
1e62ce772f7d38e3835e5d6635c33bf365f134e7
49,812
def determine_issues(project): """ Get the list of issues of a project. :rtype: list """ issues = project["Issue"] if not isinstance(issues, list): return [issues] return issues
7b8b670e4ad5a7ae49f3541c87026dd603406c9f
708,245
def time_str_to_seconds(time): """Convert a time intervall specified as a string ``dd:hh:mm:ss'`` into seconds. Accepts both ',' and ':' as separators.""" intervals = [1, 60, 60*60, 60*60*24] return sum(iv*int(t) for iv, t in zip(intervals, reversed(time.replace('-', ':').split(':'))))
f451f398108bea7c9d9050175c97d6698c2643f4
243,401
def create_cubes(n): """returns list of cubes from 0 to n""" result = [] for x in range(n): result.append(x**3) # entire 'result' list in memory (inefficient) return result
59055269162ba33407ea0ede9747b77b74e504db
80,788
def channel_parser(channel): """Parse a channel returned from ipmitool's "sol info" command. Channel format is: "%d (%x)" % (channel, channel) """ chan, xchan = channel.split(' (') return int(chan)
d2a5cb20b9e44bbbc6c617706e952b333db5b2b4
675,519
def member_to_beacon_proximity_smooth(m2b, window_size = '5min', min_samples = 1): """ Smooths the given object using 1-D median filter Parameters ---------- m2b : Member to beacon object window_size : str The size of the window used for smoothing. Defaults to '5min'. min_samples : int Minimum number of samples required for smoothing Returns ------- pd.DataFrame : The member-to-beacon proximity data, after smoothing. """ df = m2b.copy().reset_index() df = df.sort_values(by=['member', 'beacon', 'datetime']) df.set_index('datetime', inplace=True) df2 = df.groupby(['member', 'beacon'])[['rssi']] \ .rolling(window=window_size, min_periods=min_samples) \ .median() # For std, we put-1 when std was NaN. This handles the case # when there was only one record. If there were no records ( # median was not calculated because of min_samples), the record # will be dropped because of the NaN in 'rssi' df2['rssi_std']\ = df.groupby(['member', 'beacon'])[['rssi']] \ .rolling(window=window_size, min_periods=min_samples) \ .std().fillna(-1) # number of records used for calculating the median df2['rssi_smooth_window_count']\ = df.groupby(['member', 'beacon'])[['rssi']] \ .rolling(window=window_size, min_periods=min_samples) \ .count() df2 = df2.reorder_levels(['datetime', 'member', 'beacon'], axis=0)\ .dropna().sort_index() return df2
0825d167e4ae78a7b7204881e939c162aba77c3e
483,129
def dict_to_g6a(geometry_dict: dict, result_path: str) -> bool: """ Turns a dictionary into a delphin geometry file. :param geometry_dict: Dictionary holding the information for the geometry file :param result_path: Path to were the geometry file should be written :return: True """ file_obj = open(result_path + '/' + geometry_dict['name'] + '.g6a', 'w') file_obj.write('D6GARLZ! ' + str(geometry_dict['D6GARLZ']) + '\n') file_obj.write('TABLE MATERIALS\n') for material in geometry_dict['materials']: file_obj.write(str(material[0]) + ' ' + str(material[1]) + ' ' + str(material[2] + '\n')) file_obj.write('\nTABLE GRID\n') for dimension in geometry_dict['grid']: if not dimension == 'z': file_obj.write(' '.join([str(int(element_)) if element_ == int(element_) else str(element_) for element_ in geometry_dict['grid'][dimension]]) + ' \n') else: file_obj.write(' '.join([str(int(element_)) if element_ == int(element_) else str(element_) for element_ in geometry_dict['grid'][dimension]]) + '\n') file_obj.write('\nTABLE ELEMENT_GEOMETRY\n') for element in geometry_dict['element_geometry']: space0 = ' ' * (9 - len(str(int(element[0])))) space1 = ' ' * max((10 - len(str(element[1]))), 1) space2 = ' ' * (29 - len(str(int(element[0])) + space0 + str(element[1]) + space1 + str(element[2]))) space3 = ' ' * (6 - len(str(int(element[3])))) space4 = ' ' * (6 - len(str(int(element[4])))) file_obj.write(str(int(element[0])) + space0 + str(element[1]) + space1 + str(element[2]) + space2 + '\t ' + str(int(element[3])) + space3 + str(int(element[4])) + space4 + str(int(element[5])) + '\n') file_obj.write('\nTABLE SIDES_GEOMETRY\n') for side in geometry_dict['sides_geometry']: if side[1] == int(side[1]): side[1] = int(side[1]) space0 = ' ' * (9 - len(str(int(side[0])))) space1 = ' ' * max((10 - len(str(side[1]))), 1) space2 = ' ' * (29 - len(str(int(side[0])) + space0 + str(side[1]) + space1 + str(side[2]))) space3 = ' ' * (7 - len(str(int(side[3])))) space4 = ' ' * (7 - len(str(int(side[4])))) space5 = ' ' * 4 file_obj.write(str(int(side[0])) + space0 + str(side[1]) + space1 + str(side[2]) + space2 + '\t ' + str(int(side[3])) + space3 + str(int(side[4])) + space4 + str(int(side[5])) + space5 + '\n') file_obj.write('\n') file_obj.close() return True
64cfe0f96fa434f0541ee205ceceadaf067cd419
133,654
def join(*paths): """ Joins multiple paths into a single path. Arguments: *paths -- path components """ path = "" for component in paths: path += ("/" if path and not path.endswith("/") else "") + component.replace( "\\", "/" ) return path
68b41d4efdf1979347e56b69dc7eb30fc77ae875
653,247
def always_roll(n): """ Возвращает стратегию, по которой всегда происходит N бросков. >>> strategy = always_roll(5) >>> strategy(0, 0) 5 >>> strategy(99, 99) 5 """ def strategy(score, opponent_score): return n return strategy
7f6489a64dcb525a1467763cb68521bb37fefb35
677,705
def get_distance(p1, p2): """It finds the minimum distance between two Points Parameters ---------- p1 : shapely geometric object The first point p2 : shapely geometric object The second point Returns ------- list Returns the minimum distance. The value follows the geometric object projection. """ dist = 5000 try: dist = min(dist, p1.distance(p2)) except TypeError as err: print(f'{err}') return [dist]
2bcfdc62b25e286d1a1d46c27f058e8da3e722e9
40,305
def getParamIndx(fit, name, parameter): """Get index for a specific parameter for a specific source from model in UnbinnedAnalysis object fit""" ID = -1 # Try to get the first component (if summed analysis) try: fit = fit.components[0] except: pass spec = fit[name].funcs['Spectrum'] for indx, parName in zip(spec._parIds, spec.paramNames): if(parName == parameter): ID = indx if(ID == -1): print(('Parameter %s not found for source %s in file %s.' % (parameter, name, fit.srcModel))) return ID
6451d6494f55495802e67ab4b9185f7236dee0f2
394,608
import math def distance(coords1, coords2): """ Calculate the distance between two coordinates, as denoted by dist = sqrt((x2- x1)^2 + (y2 - y1)^2 + (z2 - z1)^2)) Parameters coords1: Coordinates of form [x,y,z] coords2: Coordinates of form [x,y,z] Returns dist: Distance between the two coordinates (float) """ dist = 0.0 list = [] p = coords2[0] - coords1[0] q = coords2[1] - coords1[1] r = coords2[2] - coords1[2] dist = math.sqrt(p*p + q*q + r*r) return dist
6b4fc0a99f4f55fec463c453247d5f4466826587
382,125
def _slice_set(X, dates): """ Returns a copy of the original data, sliced based on the specified dates :param X: dataset with a MultiIndex consisting of date as pd.DatetimeIndex and category :param dates: list of dates for filtering :return: subset of X (copy) """ new_samples = X.copy().loc[dates] new_samples.index = new_samples.index.remove_unused_levels() return new_samples
689ca235d30cd9e18191ded26a96a6785915d175
552,645
import struct def pngxy(data): """read the width/height from a PNG header""" ihdr = data.index(b'IHDR') # next 8 bytes are width/height w4h4 = data[ihdr+4:ihdr+12] return struct.unpack('>ii', w4h4)
b0d3fe3930858d5b6f22075405339ce358c81dd1
232,770
from typing import BinaryIO def read_hex(stream: BinaryIO) -> bytes: """ Read HEX line from stream and decode as bytes Args: stream: stream to read from Returns: decoded bytes """ return bytes.fromhex(stream.readline().decode())
fd2f08c6fc280ee2a5e0ec08125b58c76278ef3a
512,401
def sanitize(s) -> str: """ Returns string decoded from utf-8 with leading/trailing whitespace character removed """ return s.decode('utf-8').strip()
f3d2d520ae74a0bbfaa27ab749fa8d3c83e5dda7
256,177
from typing import Iterable def flexible_str(obj): """ A more flexible str function which intelligently handles stringifying strings, lists and other iterables. The results are lexographically sorted to ensure generated responses are consistent when iterables such as Set are used. """ if obj is None: return None elif not isinstance(obj, str) and isinstance(obj, Iterable): return ', '.join(str(item) for item in sorted(obj)) else: return str(obj)
69b2210470c012848ab4c29d3d3dca7b24ba25a8
207,901
import logging def getLoggers(dotted_module_name): """ Returns two loggers. The first is in the namespace that's passed-in. The second inserts ".math" as the second module, so ``cascade.core.log`` becomes ``cascade.math.core.log``. Args: dotted_module_name (str): The name of the module, usually as a ``__name__`` variable at the top of the module after imports. Returns: logging.Logger: The logger to use for regular code logs. logging.Logger: The logger to use for messages about the statistics. """ code_log = logging.getLogger(dotted_module_name) separated = code_log.name.split(".") math_log = logging.getLogger(".".join([separated[0], "math"] + separated[1:])) return code_log, math_log
e5a4d8deb029cd3326d4d8f773063c1a2e418805
488,618
import json def get_json(file_name): """Load JSON data from a file.""" with open(file_name) as json_data: return json.load(json_data)
79ecbc4859631aeed2c21eda8ae41b9eca3c6836
427,249
def join_prefix(values, num): """Produce a string joining first `num` items in the list and indicate total number total number of items. """ if len(values) <= 1: return "".join(values) if len(values) <= num: return ", ".join(values[:-1]) + " and " + values[-1] return "%s and %d others" % (", ".join(values[:num]), len(values) - num)
82952d2f451019c4ea945cf6b7876fe6cff4f262
336,696
def hzip(x): """ Zips the first and second half of `x`. If `x` has odd length, the last element will be ignored. >>> list(hzip([1, 2, 3, 4, 5, 6])) [(1, 4), (2, 5), (3, 6)] >>> list(hzip([1, 2, 3, 4, 5, 6, 7])) [(1, 4), (2, 5), (3, 6)] """ N = int(len(x) // 2) return zip(x[:N], x[N:])
c99214a2f8e3110b0d070ed5fa3fdd428cb0dd6b
659,731
def generate_memory_region(region_descriptor): """ Generates definition of memory region. Args: region_descriptor (dict): memory region description Returns: string: repl definition of the memory region """ return """ {}: Memory.MappedMemory @ sysbus {} size: {} """.format(region_descriptor['name'], region_descriptor['address'], region_descriptor['size'])
0e1ff3468cdeb5495d26c9acc080b2dda154d31b
142,692
def stripTag(chunk): """Strips the tag and returns the rest of the chunk. Only useful in password-protected uploads. Arguments: chunk {bytes} -- Everything inside the message field of an IOTA tx, except for the signature. Returns: [bytes] -- Chunk without the tag. """ #tag = chunk[-16:] chunkAndNonce = chunk[:-16] return chunkAndNonce
3b56916a798b261b282ef35c1c5613f9404da327
605,088
def snake2title(s): """Convert 'snake_case' string to 'Title Case' string.""" return ' '.join(s.split('_')).strip().title()
77d0e4589e1f5b9dc0e18a625acdd9e7e3dd8efb
466,681
def read_file(a_file): """ Read the specified file and return its content as one string. """ with open(a_file, 'r') as fp: content = fp.read() return content
0d61a52aa0678eaad52d243c4c79a538f7531665
515,787
def edgeos_action(object, on=None): """ Return the appropriate EdgeOS configuration command to send with *object*. Unless *on* is specified, return "set" if *object* evaluates to ``True`` or "delete" if *object* evaluates to ``False``. When *on* is specified, if *object* evaluates to ``True``, then return an EdgeOS configuration command to set the configuration node specified by *on* to *object*. If *object* evaluates to ``False``, then return an EdgeOS configuration command to delete the configuration node specified by *on*. """ if on is None: return "set" if object else "delete" if not object: return f"delete {on}" return f"set {on} {object}"
47ed4a10c22f1e439ffe93105b21f584825e4040
610,430
import logging import ftplib def ftp_check_directory(ftp_connection, path): """ Following convention with the rest of the code, return 0 if it is a directory, 1 if it is not or failed to do the check """ response = ftp_connection.pwd() if response == '': return 1 original_directory = response # We are NOT scp, so we won't create a file when filename is not # specified (mirrors input behaviour) try: ftp_connection.cwd(path) logging.error( 'Path "%s" at "%s" already exists and is a folder. \ Please specify a target filename and retry', path, ftp_connection.host) is_directory = True except ftplib.error_perm: is_directory = False except (ftplib.error_reply, ftplib.error_temp): logging.exception('Could not check if path "%s" in "%s" is directory', path, ftp_connection.host) return 1 try: ftp_connection.cwd(original_directory) except (ftplib.error_reply, ftplib.error_perm, ftplib.error_temp): logging.exception( 'Error when checking if "%s" in "%s" was a directory', path, ftp_connection.host) return 1 return 0 if is_directory else 1
fba5ff58c84cf78d8c0c1fd72adb9c581d9888c2
217,275
def middle(t): """Takes a list, returns a new list with first and last elements removed""" return t[1:-1]
51da0fbefa9ee7c23c431dea9cfb0ff22139b778
606,753
def get_offer_facebook_description(html_parser): """ This method returns the short standardized description used for the default facebook share message. :param html_parser: a BeautifulSoup object :rtype: string :return: The default facebook share message """ fb_description = html_parser.find(attrs={'name': 'description'}).attrs['content'] return fb_description
2df75f99a4f666dcd6cc6352ee9bea626e928418
491,768
import itertools def countCombosSumEqual(sm: int, nums: list) -> int: """ Count all possible combos of elements in nums[] such that sum(combo) == sm Args: sm (int): the sum to match. nums (list): list of positive integers. Returns: int: resulting count. If nums[0] == sm, start with count=1, then pop nums[0] For r from 2 to len(nums), iterate thru all combos C(len(nums), r), then count all such that sum(combo) == sm """ count = 0 if nums[0] == sm: count += 1 nums.pop(0) return count + len([1 for r in range(2, len(nums) + 1) for combo in itertools.combinations(nums, r) if sum(combo) == sm])
6ab5af1a6b26d6043b677f607a56e989d6d45200
678,617
import itertools def list_flatten(_l): """Flatten a complex nested list of nested lists into a flat list """ return itertools.chain(*[list_flatten(j) if isinstance(j, list) else [j] for j in _l])
05d1e4018accfe850c07504f123d85949a2ced60
698,340
import struct def _decode_int(s): """ Decodes the 4-byte strings representing a 4 byte big endian integer into an int. :param s: A 4 byte string representing an integer :return: The integer the string passed represents """ return struct.unpack('>i', s)[0]
f98d1d1fa2365c74307fd68f3ddec273c68b012b
607,893
def _deep_get(instance, path): """ Descend path to return a deep element in the JSON object instance. """ for key in path: instance = instance[key] return instance
3dad3eed0115c244ee60e887049747200ed1838c
686,097
import itertools def nth(iterable, n): """Returns the nth item from iterable.""" try: return iterable[n] except TypeError: try: return next(itertools.islice(iterable, n, None)) except StopIteration: raise IndexError('index out of range')
6ca5f1aa0f78607a9f0d31b43fe9ec5cd8a30623
82,167
import gzip import bz2 def open_file(fname, mode): """ A wrapper around `open()` that also supports gzip and bz2. Args: mode (str): should either be 'r' or 'w'. A 't' will be appended when reading from a compressed file. """ if fname.endswith('.gz'): return gzip.open(fname, mode + 't') elif fname.endswith('.bz2'): return bz2.open(fname, mode + 't') else: return open(fname, mode)
258555a025d9bb45bd26d2e59504144749d00c19
298,551
def create_dockerfile(ctx): """ Creates a Dockerfile for this project. """ return ctx.obj['docker'].create_dockerfile( project=ctx.obj['project_name'] )
4530978e708bf727fd9c8e904972cf55da4aef18
625,820
def evaluate_by_syntax(what_is_being_parsed, tokens, syntax_table): """ Given a tokenized form of what is being parsed, find the handler for it in a provided syntax table and invoke the handler. If no matching syntax is found in the syntax table, raise ValueError. :param what_is_being_parsed: string repr of what is being parsed, for use in exception messages :param tokens: sequence of type/value pairs as returned by parse() :param syntax_table: sequence of tuples with two elements: * type sequence * reference to handler function to call when the tokens sequence has the same type sequence :return: whatever the handlers return """ token_types = [token[0] for token in tokens] for expected_types, handler in syntax_table: if list(expected_types) == token_types: return handler(tokens) raise ValueError('Time specification "%s" has unexpected syntax' % what_is_being_parsed)
ca58326c696d857b2f0d175731219a15fbaa201b
202,542
import json def read_vocab(vocab_path): """ Read vocabulary """ words_index = json.loads(open(vocab_path + 'words_index.json', 'r', encoding = 'utf-8').read()) return words_index
d86a813d270639835bb5937a2946042266729aa5
491,988
def is_config_or_edit_end(line, configs): """ Is the line indicates that 'config' or 'edit' section ends? - config <name> ... end - edit <name> ... next :param line: A str represents a line in configurations output :param configs: A stack (list) holding config node objects """ if configs[-1].end_re.match(line): # config/edit section ends. return True return False
13047c503ed4bbff4019b25e5de939922cdbea48
148,594
def get_release_date(data): """Get release date.""" date = data.get("physicalRelease") if not date: date = data.get("inCinemas") return date
65066b370c91b7b47853e1348e3022af9522108c
105,190
def as_bytes(value, length): """Turn an integer into a byte array""" return value.to_bytes(length, byteorder="big", signed=False)
ae6b0eeb1d15899cbdff23be79e2d43f1b92dee9
328,288
def load_data(word_path): """Read data from file.""" with open(word_path, "r") as lines: return [line.strip() for line in lines]
05c85ae9480e60217477448ead0b2155d977e983
556,005
def safe_short_path(file_): """Like `File.short_path` but safe for use with files from external repositories. """ # Note: "F" is "File", "FO": is "File.owner". (Lifted from genpkg.bzl.) # | File type | Repo | `F.path` | `F.root.path` | `F.short_path` | `FO.workspace_name` | `FO.workspace_root` | # |-----------|----------|----------------------------------------------------------|------------------------------|-------------------------|---------------------|---------------------| # | Source | Local | `dirA/fooA` | | `dirA/fooA` | | | # | Generated | Local | `bazel-out/k8-fastbuild/bin/dirA/gen.out` | `bazel-out/k8-fastbuild/bin` | `dirA/gen.out` | | | # | Source | External | `external/repo2/dirA/fooA` | | `../repo2/dirA/fooA` | `repo2` | `external/repo2` | # | Generated | External | `bazel-out/k8-fastbuild/bin/external/repo2/dirA/gen.out` | `bazel-out/k8-fastbuild/bin` | `../repo2/dirA/gen.out` | `repo2` | `external/repo2` | # Beginning with `file_.path`, remove optional `F.root.path`. working_path = file_.path if not file_.is_source: working_path = working_path[len(file_.root.path)+1:] return working_path
603e01a006d15da21a7a7d3139496c2e26c8be96
614,870
def flatten(weights, start=0, stop=2): """This methods reshapes all values in a dictionary. The indices from start to stop will be flattened into a single index. Args: weights: A dictionary mapping keys to numpy arrays. start: The starting index. stop: The ending index. """ for key, val in weights.items(): new_shape = val.shape[0:start] + (-1,) + val.shape[stop:] weights[key] = val.reshape(new_shape) return weights
f5d427e90b0774f072a4729df6e376e285c435cd
304,000
def electrokinetic2(row): """ notes: 1) zeta potentials are in mV. if in V, remove the 1e3 2) relative dialectric is for water, if this is not true, make a column and change the function. references: (1) You-Im Chang and Hsun-Chih Chan. "Correlation equation for predicting filter coefficient under unfavorable deposition conditions". AIChE journal, 54(5):1235–1253, 2008. (2) Rajagopalan, R. & Kim, J. S. "Adsorption of brownian particles in the presence of potential barriers: effect of different modes of double-layer interaction". Journal of Colloid and Interface Science 83, 428–448 (1981). :param row: :return: 2nd electrokinetic parameter """ zp = row.enm_zeta_potential / 1e3 # particle zeta potential zc = row.collector_zeta_potential / 1e3 # collector zeta potential numerator = 2 * (zp / zc) denominator = 1 + (zp / zc) ** 2 return numerator / denominator
ee1fd24883b85163300986a7ab279d6522d130e0
77,706
def deduplicate(constraints): """ Return a new ``constraints`` list with exact duplicated constraints removed. """ seen = set() unique = [] for c in constraints: if c not in seen: unique.append(c) seen.add(c) return unique
cdbd51d99280baf61603e78cf9aea66f9bc0f73e
96,241
import json def is_valid_json(string): """ Determines if a string is valid JSON :param string: The string to be tested :return: boolean representing if the string is valid JSON """ try: json.loads(string) except ValueError: return False except TypeError: return False return True
ce97917e5c7407b68a1b6c7c9541242b664c3bef
74,450
def transpose(L): """Transpose an iterable of iterables.""" return zip(*L)
fbdf1821de67b2381d098948c12e8dac7c3312de
425,870
import re def stripcomments(lines): """ Strips all C++ style block and line comments from a list of lines using RegEx """ def replacer(match): s = match.group(0) if s.startswith('/'): return "" else: return s pattern = re.compile( r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE ) out_lines = [] for line in lines: line = re.sub(pattern, replacer, line).strip() out_lines.append(line) return out_lines
cd0871e7e4fa695a8c3bc2f69e553f22dfa94a30
181,482
def re_reference_column(df, column, index=None): """Re-reference a column by subtracting its value on input index.""" if index is None: index = -1 df = df.copy() return df[column] - df[column].iloc[index]
bf9d416e1192c1b7e8377589c520b0e4b3d8859f
544,524
def convert_to_DNA(sequence): """ Converts RNA to DNA """ sequence = str(sequence) sequence = sequence.upper() return sequence.replace('U', 'T')
2a69a3102df8f5a16b2b049fb1d80dae720b10e3
696,351