content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _is_ctl(c): """Returns true iff c is in CTL as specified in HTTP RFC.""" return ord(c) <= 31 or ord(c) == 127
8c2cb3bdb46dcea2c895d6503f7bfd71aa4af389
62,103
def partition(ls, ls_size): """Partitions list into a list of lists. Useful for dividing up a population into batches in order to process each batch in a different process. :param ls: list to be partitioned. :param ls_size: length of sublist. :return: partitioned list """ parition_num = int(len(ls) / ls_size) + 1 return [ls[i * ls_size:(i + 1) * ls_size] for i in range(parition_num)]
f556a50ac7aba5fd61a8e4b186de71c97fad46fe
62,108
def construct_frag_tuple(frag): """ Build the fragment tuple given the fragment name. Args: frag (str): the fragment name Returns: tuple: the (chain,fragname,fragno) tuple name """ ch, resnum = frag.split('-') res, num = resnum.split(':') return ch, res, int(num)
7ec5fea63bd3cef42e3d95aa7a3509274a735eba
62,111
import requests def fetch_cloudnet_model_types() -> list: """Finds different model types.""" url = f"https://cloudnet.fmi.fi/api/models" data = requests.get(url=url).json() models = [model['id'] for model in data] model_types = [model.split('-')[0] for model in models] return list(set(model_types))
037fe48bfebb49365b1c04902ac3375a403c002d
62,114
def indent_code(code_str, indent): """Indent a snippet of code with indent number of spaces""" if code_str == "" or indent == 0: return code_str if code_str[0] != '\n': code_str = ' ' * indent + code_str idx = code_str.find('\n') while (idx != -1 and idx < len(code_str) - 1): if code_str[idx + 1] not in ['\n']: code_str = code_str[:idx + 1] + ' ' * indent + code_str[idx + 1:] idx = code_str.find('\n', idx + 1) else: idx = code_str.find('\n', idx + 1) return code_str
b63d9fc420b3e165be04e32f931f154b737ca739
62,115
def extract_sequence(region, sequence_file, fai): """ Get a sequence region from an indexed sequence file. :param region: A tuple of the genomic coordinates with elements: name, pos, end. :param sequence_file: Open file to extract sequences from. :param fai: Dictionary of FAI records keyed by sequence name (see `read_fai_file()`). :return: Sequence in `seqFile` specified by `region`. """ # Check FAI for region if region[0] not in fai: raise ValueError('Sequence {} is missing in the index'.format(region[0])) # Get coordinates and lengths chr_start = int(fai[region[0]][1]) seq_len = int(fai[region[0]][2]) line_len = int(fai[region[0]][3]) region_pos = int(region[1]) region_end = int(region[2]) # Calculate psotions start_line = int(region_pos / seq_len) start_line_pos = region_pos % seq_len end_line = int(region_end / seq_len) end_line_pos = region_end % seq_len start_file_pos = chr_start + start_line * line_len + start_line_pos end_file_pos = chr_start + end_line * line_len + end_line_pos # Check file positions if start_file_pos < 0: raise ValueError('Region {0}:{1}-{2} attempts to seek before 0 in the sequence file'.format(*region)) if end_file_pos < start_file_pos: raise ValueError( 'Region {0}:{1}-{2} attempts to seek before the start position of its record in the ' 'sequence file'.format(*region) ) # Read sequence sequence_file.seek(start_file_pos) return sequence_file.read(end_file_pos - start_file_pos).replace('\n', '')
67b17edb2a9e9ebb12fd0a0f7415d359026b5d27
62,118
from typing import List from typing import Dict def limit_intent(raw_data: List[Dict[str, str]], intent: str, max_qty: int) -> List[Dict[str, str]]: """ Limit the number of examples of the specified intent-class in the dataset to the maximum quantity defined. Parameters ----------- - **raw_data**: the dataset to be changed. - **intent**: the intent class to be limited - **max_qty**: the maximum numbers of example of that class allowed. Return ---------- A new dataset but with the limited amount of examples for the specified intent class. """ current = 0 output = [] for message in raw_data: if message["intent"] == intent: if current < max_qty: current += 1 output.append(message) else: output.append(message) return output
f3e07337183d08b1365a07b4503c0af6b9766de0
62,123
import torch def get_embeddings(tokens_tensor, segments_tensor, model): """Obtain embeddigs for word, position, and sequence. adapted from: https://github.com/huggingface/pytorch-pretrained-BERT/blob/ 2a329c61868b20faee115a78bdcaf660ff74cf41/pytorch_pretrained_bert/ modeling.py#L264-L277) Args: tokens_tensor: Tokens for which to get the tokens embedding. segments_tensor: Used to generate the segments embedding. model: The model used for inference. Returns: words_embeddings: Word embeddings for the given tokens. position_embeddings: Positional embeddings for the tokens. sentence embeddings: Sentence embeddings for the tokens. """ seq_length = tokens_tensor.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=tokens_tensor.device) position_ids = position_ids.unsqueeze(0).expand_as(tokens_tensor) if segments_tensor is None: segments_tensor = torch.zeros_like(tokens_tensor) # Get the three token types (words, positions, and sentences individually) words_embeddings = model.embeddings.word_embeddings(tokens_tensor) position_embeddings = model.embeddings.position_embeddings(position_ids) sentence_embeddings = model.embeddings.token_type_embeddings(segments_tensor) return words_embeddings, position_embeddings, sentence_embeddings
479c144a805e91c5eda35a7c93c9ebcf0adb0bc1
62,124
def depth(d, level=1): """ Get the maximum depth of a dictionary. Parameters ---------- d : dict Dictionary to determine depth level : recursive variable LEAVE EQUAL TO 1 (recursive function) Returns ------- (int) The maximum depth of the dictionary (i.e., the most number of nested dictionaries). """ if not isinstance(d, dict) or not d: return level return max(depth(d[k], level + 1) for k in d)
ac1930adbaad80221acfbe077890775b6b07a825
62,128
def sample_var(sequence_of_values): """A function that computes the unbiased sample variance.""" mean = float(sum(sequence_of_values))/float(len(sequence_of_values)) SSD = sum([(float(x)-mean)**2 for x in sequence_of_values]) return SSD/(len(sequence_of_values)-1)
f781dbc9754ba84eabc6362e36b14da9ac5b0a43
62,129
def get_parser_defaults(parser): """ Given an argparse.ArgumentParser pre-configured with args/options via add_argument, return a dictionary of {dest: default}, containing the options attribute names and their default value. :param parser: A pre-configured parser instance. :type parser: argparse.ArgumentParser :return: A dictionary of {dest: default}, containing the options attribute names and their default value :rtype: dict """ defaults = dict() for a in parser._actions: defaults[a.dest] = a.default if 'help' in defaults: del defaults['help'] return defaults
7f81181cd84e2de323e4f88cbdee44c29b31dca7
62,130
def get_recursive(d, names): """ Recursively get dictionary keys The ``names`` argument should be a list of keys from top level to bottom. Example:: >>> get_recursive({'foo': 'bar', 'baz': {'fam': 12}}, ['baz', 'fam']) 12 """ n = names.pop(0) if not names: return d[n] return get_recursive(d[n], names)
979c58a7223e04b87d237a97f74c6af8d6de9f75
62,131
def is_heading_style(style): """ True if this is a style used as a heading """ return 'Heading' in style.name[:len('Heading')]
2c37d16d9f262694f7c6facab74f4ed10da3c507
62,134
def get_age_breakdown(members): """This function will retrieve all the ages of the members, and return the number of adults, seniors, children, infants, and the total number of family members. """ infants = 0 children = 0 adults = 0 seniors = 0 for member in members: if member.age < 2: infants = infants + 1 elif member.age >= 2 and member.age < 18: children = children + 1 elif member.age >= 18 and member.age < 65: adults = adults + 1 else: seniors = seniors + 1 total = infants + children + adults + seniors agegroups = {'infants':infants, 'children':children, 'adults':adults, 'seniors':seniors, 'total':total} return agegroups
102978468dbb4e0dde5f5a43ff4ce012338a0f15
62,140
def median(data): """Returns the median of data. Arguments: data (list): A list of numbers. Returns: float: Median of the provided data. """ n = len(data) if n < 1: return None if n % 2 == 1: return sorted(data)[n//2] else: return sum(sorted(data)[n//2-1:n//2+1])/2.0
63d564f294387ee8764d775584848a6e8eb17bed
62,144
def policy_validation_settings(**kwargs): """ Set policy validation settings. This is used when policy based tasks are created and `validate_policy` is set to True. The following kwargs can be overridden in the create constructor. :param bool configuration_validation_for_alert_chain: default False :param bool duplicate_rule_check_settings: default False :param bool empty_rule_check_settings: default True :param bool emtpy_rule_check_settings_for_alert: default False :param bool general_check_settings: default True :param bool nat_modification_check_settings: default True :param bool non_supported_feature: default True :param bool routing_modification_check: default False :param bool unreachable_rule_check_settings: default False :param bool vpn_validation_check_settings: default True :return: dict of validation settings """ validation_settings = { 'configuration_validation_for_alert_chain': False, 'duplicate_rule_check_settings': False, 'empty_rule_check_settings': True, 'empty_rule_check_settings_for_alert': False, 'general_check_settings': True, 'nat_modification_check_settings': True, 'non_supported_feature': True, 'routing_modification_check': False, 'unreachable_rule_check_settings': False, 'vpn_validation_check_settings': True} for key, value in kwargs.items(): validation_settings[key] = value return {'validation_settings': validation_settings}
225394bd4ebeb6d3f23c83fac3a33c026ba305dd
62,152
import json def read_json(file_name): """Read json file with test definition.""" structure_dic = {} with open(file_name) as json_file: structure_dic = json.load(json_file) return structure_dic
7eafa27af46fc6e313b7ceb8cab2cc95b73f8adb
62,154
def is_valid_year(year): """ Check if year's value is valid :param year: int :return: boolean """ return 2030 >= year > 1970
ac40569b4d2cd21c8ba9b20b5af11e9404b245ff
62,155
def extract_name(module_name): """ extracts the module name. :param module_name: :return: <str> the module name without the version. """ return module_name.split('_v')[0]
b2b25532f65ce4717025f2203214797e601e87b0
62,161
from typing import Mapping from typing import Any import json def load_channels_metadata(metadata_path: str) -> Mapping[str, Any]: """ _summary_ Args: metadata_path (str): _description_ Returns: Mapping[str, Any]: _description_ """ f = open(metadata_path, "r") data = json.load(f) f.close() return data
bca0ef6004f72a1af581a9017df718b0d4949fe0
62,162
import collections def _sort_packages(pkgs): """Sort the MSL packages by the name of the package. Parameters ---------- pkgs : :class:`dict` The MSL packages. Returns ------- :class:`collections.OrderedDict` The packages sorted by name. """ return collections.OrderedDict([(u'{}'.format(k), pkgs[k]) for k in sorted(pkgs)])
7196747d262b1443123f6c7cc73f4128a5b3a8ca
62,163
def sequences_to_fasta(sequences): """Formats sequence dictionary as FASTA.""" return "\n".join( f">{header}\n{sequence}" for header, sequence in sequences.items() )
bc0fed5f379bf5a964204ca0c30f31430296e7b6
62,165
import torch def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor: """ Applies reparametrization trick to obtain sample from latent space. Args: mu: the latent means of shape batch_size x latent_size. logvar: latent log variances, shape batch_size x latent_size. Returns: torch.Tensor: sampled Z from the latent distribution. """ return torch.randn_like(mu).mul_(torch.exp(0.5 * logvar)).add_(mu)
233190d0bd02b5cbf0296d17732c11caa0a03130
62,166
def print_django(inst): """ Produce textual version of django object's fields/values - can be printed to the screen. """ fields = [f.name for f in inst._meta.fields] return "".join("{}: {}\n".format(k, getattr(inst, k)) for k in fields)
3aa930b453374675f265aafd3b729e2e1d07aad2
62,168
import pathlib def read(fname: str) -> str: """Get the content of a file at the root of the project.""" return pathlib.Path(pathlib.Path(__file__).parent, fname).read_text(encoding="utf-8").strip()
9e69843b703109ce847c1253ec768d89701fe53f
62,171
def autoscale(bp, optimal=6): """ >>> autoscale(150000000) 20000000 >>> autoscale(97352632) 10000000 """ slen = str(bp) tlen = slen[0:2] if len(slen) > 1 else slen[0] precision = len(slen) - 2 # how many zeros we need to pad? bp_len_scaled = int(tlen) # scale bp_len to range (0, 100) tick_diffs = [(x, abs(bp_len_scaled / x - optimal)) for x in [1, 2, 5, 10]] best_stride, best_tick_diff = min(tick_diffs, key=lambda x: x[1]) while precision > 0: best_stride *= 10 precision -= 1 return best_stride
4138ff37b3ee98fbe7e7ca3c961714327f274bb5
62,172
from typing import Any from typing import List def default_uncollate(batch: Any) -> List[Any]: """This function is used to uncollate a batch into samples. The following conditions are used. >>> import torch >>> from pprint import pprint >>> batch = {"input": torch.zeros([5, 3, 224, 224]), "target": torch.zeros([5, 3, 224, 224]), ... "metadata": { ... 'size': [torch.tensor([266, 266, 266, 266, 266]), torch.tensor([266, 266, 266, 266, 266])], ... 'height': torch.tensor([266, 266, 266, 266, 266]), ... 'width': torch.tensor([266, 266, 266, 266, 266]) ... }} >>> bbatch = default_uncollate(batch) >>> len(bbatch) 5 >>> print(bbatch[0].keys()) dict_keys(['input', 'target', 'metadata']) >>> print(bbatch[0]["input"].size(), bbatch[0]["target"].size()) torch.Size([3, 224, 224]) torch.Size([3, 224, 224]) >>> pprint(bbatch[0]["metadata"]) {'height': tensor(266), 'size': (tensor(266), tensor(266)), 'width': tensor(266)} """ if isinstance(batch, dict): elements = [default_uncollate(element) for element in batch.values()] return [dict(zip(batch.keys(), element)) for element in zip(*elements)] if isinstance(batch, (list, tuple)): return list(zip(*batch)) return list(batch)
f851c81069355fe219245f1c8df99e6615e8134d
62,173
def count_lines(filename): """Open file, count newlines, then return (filename, line_count) tuple.""" with open(filename, 'rb') as file_to_read: chunk_size = 8192 line_count = 0 while True: chunk = file_to_read.read(chunk_size) if chunk: line_count += chunk.count(b'\n') else: break return filename, line_count
afd7eb49345434403c51399bceb0f921affa1d37
62,179
def resource_id(source_id: str, checksum: str, output_format: str) -> str: """Get the resource ID for an endpoint.""" return f"{source_id}/{checksum}/{output_format}"
0e14b52b7dd2d4da6a4027b639ed273ef1c35e20
62,180
def insert_school(mongo_collection, **kwargs): """insert a new document.""" return mongo_collection.insert_one(kwargs).inserted_id
d4aef58c6486d51e60bfc51712057e73ccdd1a37
62,182
def _sam_readline(sock): """ read a line from a sam control socket """ response = bytearray() while True: c = sock.recv(1) if c: if c == b'\n': break response += c else: break return response.decode('ascii')
ac35ce3bdd6a4e28eba0b25adae5e53b8b794aa6
62,186
def get_parent_id(an_id: str) -> str: """ given an id - get the parent id. for most items - it will be the id before the last / for .json or .xml - it will be the id up to the .json/.xml """ parent_id = an_id.rsplit("/", 1)[0] if an_id.find(".xml") > 0: parent_id = an_id[: an_id.find(".xml") + 4] elif an_id.find(".json") > 0: parent_id = an_id[: an_id.find(".json") + 5] return parent_id
340e113b082a3baacf682ba32ce1301f73928a83
62,194
from typing import Tuple def _square_to_grid(square: str) -> Tuple[int, int]: """ Converts the string name of the square into the grid location """ row_val = square[0] col_val = square[1] col = ["8", "7", "6", "5", "4", "3", "2", "1"].index(col_val) row = ["a", "b", "c", "d", "e", "f", "g", "h"].index(row_val) return row, col
641c195ed483cf2a217210f111379618800b168f
62,196
from datetime import datetime import pytz def get_run_time(creation_time): """ Get the time difference between resource creation time and current time in hours """ current_time = datetime.utcnow().replace(tzinfo=pytz.UTC) diff = current_time - creation_time days, seconds = diff.days, diff.seconds hours = days * 24 + seconds // 3600 return hours
2b868f81f1ce21a1cc448def1a636bc9db21c0a8
62,197
from pathlib import Path import tempfile import shutil import six def create_temp_dir_with_constant_name(name): # type: (six.text_type) -> Path """ Create temporary directory but with constant name. :param name: tmp directory name. :return: path object to new tmp directory. """ tmp_dir_path = Path(tempfile.gettempdir()) / name if tmp_dir_path.exists(): shutil.rmtree(six.text_type(tmp_dir_path), ignore_errors=True) tmp_dir_path.mkdir(exist_ok=True) return tmp_dir_path
f0d5e514d25b3438de5b4f6a750b35aa44fb1b74
62,201
def _reverse_search_options(search_options): """ Reverse the search_options to map a UID to its numeric version >>> search_options = {1: {'uid': 'Computer.name'}} >>> _reverse_search_options(search_options) {'Computer.name': 1, 'name': 1, 1: 1} """ rev = {} for k, v in search_options.items(): try: rev[v['uid']] = k rev[v['uid'].split('.', 1)[1]] = k rev[k] = k except (KeyError, TypeError): pass return rev
603f33338f25fdc0e88c7e4b4cfd0f5204a4cc8a
62,205
def format_line_stats(kmer_stats): """Create a parsable string for the calculated kmer stats.""" fields = ("total_kmers", "unclassified_kmers", "ambiguous_kmers", "kmer_support") formatted_str = "\t".join(str(kmer_stats[x]) for x in fields[:3]) formatted_str += "\t" + "\x1F".join(str(x) for x in kmer_stats["kmer_support"]) return formatted_str
e60a6b6e7dee0851ddf7f85a5999bb5d9111f7d3
62,207
def _ensure_forecast_measurement_compatibility(forecast, measurement): """Checks that a forecast is compatible with the measurement (observation or aggregate) it is compared to. Criteria: * matching variable * interval length of measurement is less than or equal to that of the forecast. * observations are made at the same site as the forecast * the forecast is made for the aggregate. Parameters ---------- forecast: dict measurement: dict Returns ------- dict Dictionary mapping field names to error messages. Dict will be empty if no issues are found. """ errors = {} if forecast['variable'] != measurement['variable']: errors['variable'] = 'Must match forecast variable.' if forecast['interval_length'] < measurement['interval_length']: errors['interval_length'] = ('Must be less than or equal to ' 'forecast interval_length.') if 'site_id' in measurement: if forecast['site_id'] != measurement['site_id']: errors['site_id'] = 'Must match forecast site_id.' else: if forecast['aggregate_id'] != measurement['aggregate_id']: errors['aggregate_id'] = 'Must match forecast aggregate_id.' return errors
94654077c0c6f229bdba62d2ca2735a03417376a
62,208
def should_deploy(branch_name, day_of_week, hour_of_day): """ Returns true if the code can be deployed. :param branch_name: the name of the remote git branch :param day_of_week: the day of the week as an integer :param hour_of_day: the current hour :return: true if the deployment should continue >>> should_deploy("hot-fix", 0, 10) True >>> should_deploy("hot-fix", 4, 10) #this branch can be deployed on Friday True >>> should_deploy("hot-fix", 5, 10) #this branch can be deployed on Saturday True >>> should_deploy("hot-fix", 6, 10) #this branch can be deployed on Sunday True >>> should_deploy("hot-fix", 0, 7) #this branch can be deployed before 8am True >>> should_deploy("hot-fix", 0, 16) #this branch can be deployed after 4pm True >>> should_deploy("master", 0, 10) True >>> should_deploy("master", 4, 10) #master cannot be deployed on Friday False >>> should_deploy("master", 5, 10) #master cannot be deployed on Saturday False >>> should_deploy("master", 6, 10) #master cannot be deployed on Sunday False >>> should_deploy("master", 0, 7) #master cannot be deployed before 8am False >>> should_deploy("master", 0, 16) #master cannot be deployed after 4pm False """ if branch_name == "master" and day_of_week >= 4: return False elif branch_name == "master" and (hour_of_day < 8 or hour_of_day >= 16): return False else: return True
8d8ba7657e9393d588c7019839acc859c56e63f9
62,210
def get_total_cost(meal_cost: float, tip_cost: float) -> float: """Calculate the total meal cost from the tip cost and meal cost.""" return meal_cost + tip_cost
93a8285e54f4b9233105789c2a5d5ceedef25bb8
62,216
def search_person(lista, nombre): """ Buscar persona en lista de personas. Buscamos un nombre en una lista de instancias de la clase Person. En caso de que se encuentre regresamos su indica, en otro caso regresamos -1 Hay que tener cuidado con este valor porque -1 es un valor que se puede pasar como indice, pero esta funcon lo regresa como indicador de que no encontro lo que se buscaba """ if nombre != None: for i in range(len(lista)): if lista[i].nombre == nombre: return i return -1
1f95d41b06c49402550855e01d10b25b3b40fa8a
62,219
def _list_to_matcher(matcher_list): """Converts a matcher as a list to a dict matcher.""" return {"default": matcher_list}
df5fecf3e979699d537b9e5fe2176c79c79264fb
62,221
def common_ancestors(ind1, ind2): """ Common ancestors of ind1 and ind2. Recursively searches ancestors for both individuals, and then performs a set intersection on on each set of ancestors :param ind1: the first individual :param ind2: the second individual :type ind1: Individual :type ind2: Individual :returns: Common ancestors :rtype: set """ return ind1.ancestors() & ind2.ancestors()
e0f7e8c8521e656053067a129dcaa0dfd14b7c29
62,224
from typing import Optional import re def _GetQueryId(filename: str) -> Optional[str]: """Extract query id from file name.""" match = re.match(r'(.*/)?q?([0-9]+[ab]?)\.sql$', filename) if match: return match.group(2)
c0768cec58eb5dd6a8ede18ce4ae0d5976b8db9e
62,227
def similarity_to_pattern(similarity: int) -> str: """ Inverse of pattern_to_similarity for 5 digit in base 3. :param similarity: str :return: pattern """ pattern = '' for i in range(5): pattern += str(similarity % (3 ** (i + 1)) // 3 ** i) return pattern
8fe9e6d4b6e20bcc3acfd52c674b475054084008
62,235
def _server_list_custom_func( client, resource_group_name=None): """ List servers by resource group name or subscription """ if resource_group_name: return client.list_by_resource_group(resource_group_name) return client.list()
8795a4475d2fb61259036e77c3d483d89a7a9d34
62,236
import re def parse_shader_error( error ): """Parses a single GLSL error and extracts the line number and error description. Line number and description are returned as a tuple. GLSL errors are not defined by the standard, as such, each driver provider prints their own error format. Nvidia print using the following format:: 0(7): error C1008: undefined variable "MV" Nouveau Linux driver using the following format:: 0:28(16): error: syntax error, unexpected ')', expecting '(' ATi and Intel print using the following format:: ERROR: 0:131: '{' : syntax error parse error """ # Nvidia # 0(7): error C1008: undefined variable "MV" match = re.match( r'(\d+)\((\d+)\):\s(.*)', error ) if match: return ( int(match.group( 2 )), # line number match.group( 3 ) # description ) # ATI # Intel # ERROR: 0:131: '{' : syntax error parse error match = re.match( r'ERROR:\s(\d+):(\d+):\s(.*)', error ) if match: return ( int(match.group( 2 )), # line number match.group( 3 ) # description ) # Nouveau # 0:28(16): error: syntax error, unexpected ')', expecting '(' match = re.match( r'(\d+):(\d+)\((\d+)\):\s(.*)', error ) if match: return ( int(match.group( 2 )), # line number match.group( 4 ) # description ) raise ValueError( 'Unknown GLSL error format' )
7b299394d93acc5235ea5e4116c874f03fab6615
62,241
from typing import Final from typing import get_origin def is_final_type(tp: type) -> bool: """ Test if the given type is a final type. """ return tp is Final or get_origin(tp) is Final
410d1067ed377e1268cf812b16bd758cf64c6e52
62,245
import json def pretty_response(response): """Return a pretty-formatted response.""" return json.dumps(response, indent=2, sort_keys=True)
9f748567b3ac38746c2357a103dbb245a24c2a3e
62,246
def ConstantRank(j, state, decision, collision): """Constant rank no matter what.""" return [state.memories[j]]
b8d5c4819229c59097ee760efcbbe0226cec492f
62,247
def _get_variable_read_op(variable, graph): """ Returns the /read operation for a variable """ return graph.get_operation_by_name(variable.name.split(':')[0] + '/read')
d454415edb11f8a3f9bdc5790331db5b2bf94a95
62,251
def convert_exponent_alpha(exponent): """Convert a powerlaw exponent to the expected DFA alpha value. Parameters ---------- exponent : float Aperiodic exponent value, representing a 1/f distribution. Returns ------- alpha : float Predicted DFA alpha value for the given exponent value. References ---------- .. [1] Schaefer, A., Brach, J. S., Perera, S., & Sejdić, E. (2014). A comparative analysis of spectral exponent estimation techniques for 1/fβ processes with applications to the analysis of stride interval time series. Journal of Neuroscience Methods, 222, 118–130. https://doi.org/10.1016/j.jneumeth.2013.10.017 Examples -------- Convert a powerlaw exponent for pink noise to the expected DFA value: >>> convert_exponent_alpha(-1) 1.0 Convert a powerlaw exponent for white noise to the expected DFA value: >>> convert_exponent_alpha(0) 0.5 """ return (-exponent + 1) / 2.
2ebcf472f28dbb319c3cbbcb014cec7e2eeaba38
62,252
def truncate_series_top_n(series, n=24): """ take a series which can be interpreted as a dict, index=key, this function sorts by the values and takes the top-n values, and returns a new series """ return series.sort_values(ascending=False).head(n)
8b8d651fe58ec7d1024d9e09e6952837f451bf3f
62,253
def array_to_grader(array, epsilon=1e-4): """Utility function to help preparing Coursera grading conditions descriptions. Args: array: iterable of numbers, the correct answers epslion: the generated expression will accept the answers with this absolute difference with provided values Returns: String. A Coursera grader expression that checks whether the user submission is in (array - epsilon, array + epsilon)""" res = [] for element in array: if isinstance(element, int): res.append("[{0}, {0}]".format(element)) else: res.append("({0}, {1})".format(element - epsilon, element + epsilon)) return " ".join(res)
cf608271e821da48a62c67a932ce0413aac19466
62,259
def extract_gn_build_commands(build_ninja_file): """Extracts from a build.ninja the commands to run GN. The commands to run GN are the gn rule and build.ninja build step at the top of the build.ninja file. We want to keep these when deleting GN builds since we want to preserve the command-line flags to GN. On error, returns the empty string.""" result = "" with open(build_ninja_file, 'r') as f: # Read until the second blank line. The first thing GN writes to the file # is the "rule gn" and the second is the section for "build build.ninja", # separated by blank lines. num_blank_lines = 0 while num_blank_lines < 2: line = f.readline() if len(line) == 0: return '' # Unexpected EOF. result += line if line[0] == '\n': num_blank_lines = num_blank_lines + 1 return result
64b57222721df2809ce6ef9746cc703b24a99662
62,260
def findLowTempIndex(lowTempMassFrac, X, Z): """ Find the index for the LA08 data which has the same mass fractions X and Z. """ idx = 0 for i in range(lowTempMassFrac.shape[0]): if lowTempMassFrac[i, 1] == X and lowTempMassFrac[i, 3] == Z: idx = i break if idx == 0: print("\nUh oh! Couldn't find default LA08 table for X = {} and Z = {}".format(X, Z)) print("4D interpolation hasn't been implemented because I'm being lazy") exit(1) return idx
38334c6f9f1f13b47f0ee558442d4e68683dd59d
62,262
def unique_experiment_identifier(chosen_hyperparam_indices): """ Returns a unique identifier to save the eval metrics. chosen_hyperparam_indices is a list of the indices of the hyperparameters in their respective lists that we are using in this configuration. """ # Store output ID. output_id = "" # Output is just the indices of the chosen hyperparameters. for i, hyperparam_ix in enumerate(chosen_hyperparam_indices): # Do not add underscore if we are at the end of the string. output_id += str(hyperparam_ix) + ("_" if i != (len(chosen_hyperparam_indices) - 1) else "") # Return output ID. return output_id
4f3b8477b85bd3d5b3a3d595d02ed92677c93287
62,268
def rigids_from_tensor4x4(m): """Construct Rigids object from an 4x4 array. Here the 4x4 is representing the transformation in homogeneous coordinates. Args: m: Array representing transformations in homogeneous coordinates. Returns: Rigids object corresponding to transformations m """ return m[..., 0, 0], m[..., 0, 1], m[..., 0, 2], m[..., 1, 0], m[..., 1, 1], m[..., 1, 2], m[..., 2, 0], \ m[..., 2, 1], m[..., 2, 2], m[..., 0, 3], m[..., 1, 3], m[..., 2, 3]
b94db458eb8cb4fd9b3f1b57901c3b3fdadaf971
62,269
def render_template(env, filename, values=None): """ Render a jinja template """ if not values: values = {} tmpl = env.get_template(filename) return tmpl.render(values)
b072cc2cea557eff1546ff7a6a4e8a55fc358ab0
62,272
def toys(w): """Hackerrank Problem: https://www.hackerrank.com/challenges/priyanka-and-toys/problem Priyanka works for an international toy company that ships by container. Her task is to the determine the lowest cost way to combine her orders for shipping. She has a list of item weights. The shipping company has a requirement that all items loaded in a container must weigh less than or equal to 4 units plus the weight of the minimum weight item. All items meeting that requirement will be shipped in one container. Solve: We sort the array, and then we iterate through the list seeing if each order fits within the current lowest order's weight. If it does, we can continue on, and if it doesn't, we then create a new "container" as this order no longer fits within the previous order limit, and continue on through the orders. Args: w (list): Array representing the weighted orders Returns: int: The minimum number of containers needed to ship the orders """ containers = 1 w.sort() cur_lowest = w[0] # Iterate through the sorted list, and add a container if the next weighted order doesn't fit within the current # lowest order's weight + 4 for i in range(1, len(w)): if w[i] > cur_lowest + 4: cur_lowest = w[i] containers += 1 return containers
6fde143e4760eefd3dc38b3ea1bdf38a9abe4129
62,273
def powop(a, b): """Method to raise a to power b using ** operator.""" return a ** b
c997b5ca4b9c9a398e0ca2a5cfee9dd825e07a16
62,274
def _get_fields(events): """ Returns a list of all fields in the given event set. @param events: The events as a list of dictionaries. @type events: list(dict(str: str)) @return: All the fields. @rtype: set(str) """ fields = set() for event in events: fields.update(list(event.keys())) return fields
1028e30ea761f4d8397a042863a243e71172539c
62,279
def cohens_d(mu_1, mu_2, std): """ Compute the standardized effect size as difference between the two means divided by the standard deviation. Parameters ---------- mu_1 : float Mean of the first sample. mu_2 : float Mean of the second sample. std : float > 0 Pooled standard deviation. It assumes that the variance of each population is the same. Returns ------- effect_size : float Effect size as cohen's d coefficient """ return (mu_1 - mu_2) / std
1321194f1a5c1d4b93f9735423549ce57232cfdf
62,281
def str_remove(df, column_name: str, pattern: str = ''): """Remove string patten from a column Wrapper around df.str.replace() Parameters ----------- df: pd.Dataframe Input dataframe to be modified column_name: str Name of the column to be operated on pattern: str, default to '' String pattern to be removed Returns -------- df: pd.Dataframe """ df[column_name] = df[column_name].str.replace(pattern, '') return df
f27d956bacaeaa35568f79105acbdec89e236e0e
62,286
import json def JsonObj(data): """Returns json object from data.""" return json.loads(str(data))
5efca3a024b6c312d769caf7dfa642aeff929ec1
62,290
def is_simplified(G): """ Determine if a graph has already had its topology simplified. If any of its edges have a geometry attribute, we know that it has previously been simplified. Parameters ---------- G : graph Returns ------- bool """ edges_with_geometry = [d for u, v, k, d in G.edges(data=True, keys=True) if 'geometry' in d] return len(edges_with_geometry) > 0
29bcb5acb3147d341d0aac47bf6ee720e25b221e
62,291
def is_valid_ecl(ecl: str) -> bool: """ (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. :return: Status of field (true = valid). :rtype: bool """ colours = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"] if ecl not in colours: return False return True
ad390b223b21003673fb1de8cda8b27cc047c7b0
62,292
def SizeScale(size): """ Rescale the size (currently only convert to float). :param size: a string :return: a float """ return float(size)
42127b3624388381cfc9293516fb30fd7cfbcd55
62,295
from typing import List from typing import Tuple from typing import Any import configparser def config_items(path: str) -> List[Tuple[Tuple[str, str], Any]]: """Return config file option, value pairs.""" config = configparser.ConfigParser() config.read(path) res = [] for section in config.sections(): for opt, val in config.items(section): res.append(((section, opt), val)) return res
0c7b8cc1b5bafe0aea4552b3f6a488dbe84bde2e
62,296
def inconsistent_typical_range_stations(stations): """A function which returns a list of stations that have inconsistent data (for typical range) from a given list of stations.""" inconsistency_list = [] for check in stations: inconsistency_check = check.typical_range_consistent() if inconsistency_check == False: inconsistency_list.append(check.name) inconsistency_list.sort() return inconsistency_list
ee11815836e04d791376367827f2678ebc958a7b
62,297
import requests def post_service3(url, rhythm_key_pair): """Using our existing user data from s1, we use this function to poll s3 with a post request for a new note length. Keyword Arguments: url: The url of service 3. rhythm_key_pair: A key pair rhythm list dictionary. """ service_3_response = requests.post(url, json=rhythm_key_pair) json_response_data = service_3_response.json() status_code_response = service_3_response.status_code print("\n ----------- Service 3 POST Response ----------- \n") print(f'Data: {json_response_data}') print(f'Response Code: {status_code_response}') print("\n ----------- End of Service 3 POST Response ----------- \n") return json_response_data
d95207fd4306d678a0048a2f5f78d19f91c1e42c
62,300
def is_apple(filename): """ check if the file is either a .app or a .ipa osX file """ with open(filename, "rb") as f: # magic bytes in .app files if f.read(4) == b"\xcf\xfa\xed\xfe": return True f.seek(0) data = f.read(200) # .app and the word "Payload" is always in an ipa file if b".app" in data and b"Payload" in data: return True return False
2b5383245bd9c47d04b4a681094c9c2ea5fa5aa9
62,304
import math def calc_hist_length_per_net(batch_count, epoch_count, plot_step): """ Calculate length of history arrays based on batch_count, epoch_count and plot_step. """ return math.floor((batch_count * epoch_count) / plot_step)
916b65439fa9ee3f10d90abc290ea8d4f1465465
62,312
def template_preferences(pulses_plugin, template_sequence): """Preferences corresponding to the template sequence. """ infos = pulses_plugin.get_item_infos(template_sequence) return infos.metadata['template-config']
4e5cdd017e7444c7782f788563139c204850b64c
62,315
def check_value(value: bytearray, size_value: int) -> bool: """ Check the correctness of the variable. This function checks the type ('bytes' or 'bytearray') and whether the size of the 'value' variable matches the 'size_value' value. Args: value: The variable that you want to check. saize_value: The required size of the variable. Returns: Check result. """ result = True if (not isinstance(value, (bytes, bytearray))) or len(value) != size_value: result = False return result
4a80852f366cf96e7742cf68fcbf58ef51d86b40
62,323
from typing import Any def listify(value: Any): """ Encapsulate value in a list if it isn't already. """ if isinstance(value, list): return value return [value]
8cdd5b0ca8fe31f2cc5da23b9a18cb316e6c06b9
62,331
import hashlib import struct def return_hash_of_GZ(fname, kwArgCheck = None, ignoreModificationTime = True): """ This function returns the SHA512 hash of the passed GZ file as if the first "Modification Time" field is set to zero. Using this function it is possible to discover that the only binary difference between two different GZ files is the first "Modification Time" field. Note that a GZ file may contain multiple "members"; this function only finds the "Modification Time" field for the *first* "member". If the optional second argument is passed as False then this function will return the SHA512 identically to any other method. """ # NOTE: The following websites have some very useful information on how to # parse GZ files. # * https://en.wikipedia.org/wiki/Gzip#File_format # * https://tools.ietf.org/html/rfc1952.html#page-5 # Import standard modules ... # Check keyword arguments ... if kwArgCheck is not None: print(f"WARNING: \"{__name__}\" has been called with an extra positional argument") # Open GZ read-only ... with open(fname, "rb") as fobj: # Construct a hash object ... hobj = hashlib.sha512() # Attempt to read 2 bytes and pass them to the hash object ... src = fobj.read(2) hobj.update(src) # Check that this is a GZ file ... if src != b"\x1f\x8b": raise Exception(f"\"{fname}\" is not a GZ") from None # Pass 2 bytes to the hash object ... hobj.update(fobj.read(2)) # Check what the user wants to do ... if ignoreModificationTime: # Pass 0 as a little-endian un-signed 32-bit integer to the hash # object ... fobj.read(4) hobj.update(struct.pack(">I", 0)) else: # Pass 2 bytes to the hash object ... hobj.update(fobj.read(4)) # Pass the rest of the file to the hash object ... hobj.update(fobj.read()) # Return hash hexdigest ... return hobj.hexdigest()
61a5f5a16acf74319e1c770cf8d78c92aa8418b3
62,334
import json def template_response(data, templating_dict): """ Change data from the Grafana dashboard API response based on the dashboard templates :param data: Data from the Grafana dashboard API :param templating_dict: dictionary of {template_name, template _value} :return: panel_info with all templating values filled in """ data = json.dumps(data) # Loop through all the templates and replace them if they're used in this panel for template in templating_dict: data = data.replace('${}'.format(template), templating_dict[template]) return json.loads(data)
772e02283a9679fe5ea6b6a9f41ac5156c98a66a
62,340
def _get_docstring_type_name(var_doc: str) -> str: """ Get the string of argument or return value type's description from docstring. Parameters ---------- var_doc : str Docstring's part of argument or return value. Returns ------- type_name : str Argument or return value's type description. """ type_name: str = var_doc.split('\n')[0] colon_exists: bool = ':' in type_name if not colon_exists: return '' type_name = type_name.split(':')[1] type_name = type_name.split(',')[0] type_name = type_name.strip() return type_name
fe07d6bb9869b4fcac404a3da6cdb0480880076b
62,345
def dblp_key_extract(entry): """Return dblp key if the bibtex key is already a DBLP one, otherwise return none.""" if entry["ID"].startswith("DBLP:"): return entry["ID"][5:] else: return None
0f2c9c03c9957306d35eb0156db632a4612c7b68
62,348
def STARTSWITH(x, y): """checks if 'x' startswith 'y'""" return x.startswith(y)
e810c4b5ebcfcc6b7849416b372ce4296de73d6c
62,349
import itertools def cartesian_params(*paramList): """Make a test function or method parameterized by cartesian product of parameters .. code-block :: python import unittest from nose2.tools import cartesian_params @cartesian_params((1, 2, 3), ('a', 'b')) def test_nums(num, char): assert num < ord(char) class Test(unittest.TestCase): @cartesian_params((1, 2, 3), ('a', 'b')) def test_less_than(self, num, char): self.assertLess(num, ord(char)) Parameters in the list must be defined as iterable objects such as tuple or list. """ def decorator(func): func.paramList = itertools.product(*paramList) return func return decorator
61e9b4ef4bc93b90a4a4bb59e0a96146e1d644ba
62,350
import re def strip(string, decimal='.'): """ Strip non-decimal characters out of string. """ return re.sub(r'[^\d{}]+'.format(decimal), '', string)
40d62b2656f48046299c8d4c476e1ae55e41251f
62,351
import inspect def classname(val): """ Returns a qualified class name as string. The qualified class name consists of the module and the class name, separated by a dot. If an instance is passed to this function, the name of its class is returned. Parameters ---------- val : instance or class The instance or a class of which the qualified class name is returned. Returns ------- str : The qualified class name. """ if inspect.isclass(val): return ".".join([val.__module__, val.__name__]) return ".".join([val.__class__.__module__, val.__class__.__name__])
d7c94c256207d413a2ff7935df9dfebb513f955e
62,354
def mk_fit_dict(coeff, order, func, xmin=None, xmax=None, **kwargs): """Generate a dict that is formatted for using func_val. Parameters ---------- coeff : array Coefficients of the fit order : :class:`int` The order of the function to be used in the fitting. func : :class:`str` Name of the fitting function: polynomial, legendre, chebyshev. xmin : :class:`float` Minimum value in the array (or the left limit for a legendre/chebyshev polynomial). xmax : :class:`float` Maximum value in the array (or the right limit for a legendre/chebyshev polynomial). Returns ------- :class:`dict` The formatted dictionary. """ # Finish fit_dict = dict(coeff=coeff, order=order, func=func, xmin=xmin, xmax=xmax, **kwargs) return fit_dict
ef4d7a19c3b25c1353e18ff8f42d50e588cd2794
62,355
from typing import Sequence def false_positive_rate(y_true: Sequence, y_pred: Sequence) -> float: """Calculates the false positive rate binary classification results. Assumes that the negative class is -1. """ assert set(y_true).issubset({1, -1}) assert set(y_pred).issubset({1, -1}) false_positives = 0 true_negatives = 0 for true_label, predicted_label in zip(y_true, y_pred): if true_label == -1 and predicted_label == 1: false_positives += 1 elif true_label == predicted_label == -1: true_negatives += 1 try: return false_positives / (false_positives + true_negatives) except ZeroDivisionError: return 0
8668d01c2628062efef67b04c912ef315d37fa13
62,357
def rename_candidate_hugo(candidate, renamings): """Renames a candidate name according to a renaming map.""" name_expr = candidate.split(".") base_name = name_expr[0] if base_name in renamings: base_name = renamings[base_name] name_expr[0] = base_name result = ".".join(name_expr) return result
eff58e01b4197ccf2a20d58a88f9c443d277a376
62,362
def award_types(row): """ "Award Type" for FPDS transactions if award <> IDV (`pulled_from` <> 'IDV'): use `contract_award_type` elif `idv_type` == B &`type_of_idc` is present: use "IDV_B_" + `type_of_idc` elif `idv_type` == B & ("case" for type_of_idc_description for specific IDC type): use IDV_B_* else use "IDV_" + `idv_type` "Award Type Description" for FPDS transactions if award <> IDV (`pulled_from` <> 'IDV'): use `contract_award_type_desc` elif `idv_type` == B & `type_of_idc_description` <> null/NAN: use `type_of_idc_description` elif `idv_type` == B: use "INDEFINITE DELIVERY CONTRACT" else: use `idv_type_description` """ pulled_from = row.get("pulled_from", None) idv_type = row.get("idv_type", None) type_of_idc = row.get("type_of_idc", None) type_of_idc_description = row.get("type_of_idc_description", None) if pulled_from != "IDV": award_type = row.get("contract_award_type") elif idv_type == "B" and type_of_idc is not None: award_type = "IDV_B_{}".format(type_of_idc) elif idv_type == "B" and type_of_idc_description == "INDEFINITE DELIVERY / REQUIREMENTS": award_type = "IDV_B_A" elif idv_type == "B" and type_of_idc_description == "INDEFINITE DELIVERY / INDEFINITE QUANTITY": award_type = "IDV_B_B" elif idv_type == "B" and type_of_idc_description == "INDEFINITE DELIVERY / DEFINITE QUANTITY": award_type = "IDV_B_C" else: award_type = "IDV_{}".format(idv_type) if pulled_from != "IDV": award_type_desc = row.get("contract_award_type_desc") elif idv_type == "B" and type_of_idc_description not in (None, "NAN"): award_type_desc = type_of_idc_description elif idv_type == "B": award_type_desc = "INDEFINITE DELIVERY CONTRACT" else: award_type_desc = row.get("idv_type_description") return award_type, award_type_desc
1fb6a6b6408c5e2a31d72ed03767dc1158311aaf
62,363
def labelIndex2Name(label_index): """Convert label index into name of piece""" return ' KQRBNPkqrbnp'[label_index]
ebe9903b9e342d864f180f0c4d1c720bef7274a7
62,368
def midpointint(f, a, b, n): """ Computes the Midpoint integration rule of f from a to b using a python for loop. """ running_sum = 0 for i in range(n): running_sum += f(a + ((b-a)/float(n))*(i + .5) ) return running_sum * (b-a)/float(n)
c99ce4440b8173fcc9e52002c653cb04dd4c7d88
62,371
def auto_repeat( obj, n, force=False, check=False): """ Automatically repeat the specified object n times. If the object is not iterable, a tuple with the specified size is returned. If the object is iterable, the object is left untouched. Args: obj: The object to operate with. n (int): The length of the output object. force (bool): Force the repetition, even if the object is iterable. check (bool): Ensure that the object has length n. Returns: val (tuple): Returns obj repeated n times. Raises: AssertionError: If force is True and the object does not have length n. Examples: >>> auto_repeat(1, 3) (1, 1, 1) >>> auto_repeat([1], 3) [1] >>> auto_repeat([1, 3], 2) [1, 3] >>> auto_repeat([1, 3], 2, True) ([1, 3], [1, 3]) >>> auto_repeat([1, 2, 3], 2, True, True) ([1, 2, 3], [1, 2, 3]) >>> auto_repeat([1, 2, 3], 2, False, True) Traceback (most recent call last): ... AssertionError """ try: iter(obj) except TypeError: force = True finally: if force: obj = (obj,) * n if check: assert (len(obj) == n) return obj
e7d369a2f3bc4f3c745943451ba96304a9e00460
62,373
from datetime import datetime def str2iso(input_string: str) -> datetime: """ Convert a specific type of ISO string that are compliant with file pathing requirement to ISO datetime. :return: """ iso_datetime = datetime.strptime(input_string, "%Y-%m-%d %H:%M:%S") return iso_datetime
bd57ff70d8f8f377ad396a60b326f1dd2166aac8
62,380
def get_top_n_indexes(val_list: list, n: int = 3) -> list: """ returns the indexes of the highest values in the list Params: ------- val_list : list of values from wich highest N are to returned as index position Returns: ------- top_indexes : list of indexes containing the highest N values """ top_scores_sorted = sorted(val_list, reverse=True) top_indexes = [val_list.index(val) for val in top_scores_sorted[:n]] return top_indexes
f98b1198580247560230a653fe93eb46891c0cfa
62,382
import hashlib def get_sha1_truncated_id(s: str) -> str: """ Return a truncated hash of any given string. Used on bbox and bbox sections to keep track of duplicate runs / work without having to store the entire lon,lat,lon,lat string every time. """ return hashlib.sha1(s.encode("UTF-8")).hexdigest()[:10]
67e0fe08862a6bbafb45f0d0a35e7ffc78380fba
62,386
def bisection_method(f, left, right, value, tol=1e-6, max_iters=1e12): """ Simple bisection solver, which works well for finding roots of monotonic functions """ mid = (left+right)/2 i = 1 # utilising the knowledge that the function is monotonically decreasing while (abs(f(mid) - value) > tol) & (i<max_iters): if f(left) < value: right = left left *= 0.5 elif f(right) > value: left = right right *= 2 elif f(mid) > value: left = mid else: right = mid mid = (left+right)/2 i += 1 return mid
96c9b224c4b0ed843f8833b615ca31e0d90422f7
62,389
def create_question_list(question_list): """ Function that creates a string for the questions_list given.""" string = "+".join([str(id) for id in question_list]) return string
024694ee7d3b86dd9f2dd495d03031134619dcc6
62,390
import pytz def datetime_as_utcdatetime(dt): """ Return a timezone-aware datetime as a UTCdatetime as specified in https://www.openarchives.org/OAI/openarchivesprotocol.html#Dates, §3.3. Note that the OAI-PMH specifcation mandates that the "Z" (zulu) specifier be used for the timezone as opposed to the equally valid "+00.00". """ # Convert to UTC utc_dt = dt.astimezone(pytz.utc) # Format appropriately return f'{utc_dt:%Y-%m-%dT%H:%M:%S}Z'
822f7b8f391dccd64a8e2d9386734fb693cb930f
62,391
def strip_comments(line): """Removes all text after a # the passed in string >>> strip_comments("Test string") 'Test string' >>> strip_comments("Test #comment") 'Test ' >>> strip_comments("#hashtag") '' >>> strip_comments("Test#comment") 'Test' """ if "#" in line: return line[:line.find("#")] else: return line
c21948eae449eb6037d7abc40cce467d00edf426
62,393
def get_relationship(from_id, to_id, rel_name, properties): """reformats a NetworkX edge for `generate_data()`. :param from_id: the ID of a NetworkX source node :param to_id: the ID of a NetworkX target node :param rel_name: string that describes the relationship between the two nodes :param properties: a dictionary of edge attributes :rtype: a dictionary representing a Neo4j POST request """ body = {"to": "{{{0}}}".format(to_id), "type": rel_name, "data": properties} return {"method": "POST", "to": "{{{0}}}/relationships".format(from_id), "body": body}
17516c972a1b6cf2893c7b4c7ea96d6ab9969f01
62,394
def _get_element_or_alt(data, element, alt): """ if :element: is in dict :data: return it, otherwise return :alt: """ if element in data: return data[element] else: return alt
a1a40105d6804c6bbd28af34b4216704c2bc498c
62,396