content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Optional from typing import Any def safe_list_get(list_: list, index: int, default: Optional[Any] = None) -> Any: """ A function to safely retrieve a list entry, retuning `None` as a default value. """ try: return list_[index] except IndexError: return default
6e1ed71fa7204412dbc20141543d29f1e0c5a9bc
31,521
import requests def is_valid_token(token: str): """ Check if a token is valid Pretty sure i'll remove this later """ url = "https://discord.com/api/v9/users/@me" headers = { "Authorization": token } response = requests.get(url, headers=headers) if response.status_code == 200: return True else: return False
665359e5f6f06c58a881f45f2458047d82b21774
31,522
import sys import traceback def parse_ping_results(results): """ arg results is a JSON structure """ tab_list = [] for result in results: try: probeid = result['prb_id'] mid = result['msm_id'] target_addr = result['dst_addr'] target_name = result['dst_name'] minvalue = result['min'] maxvalue = result['max'] rtts = [] for measurement in result['result']: if measurement.has_key('rtt'): rtt = measurement['rtt'] rtts.append(rtt) elif measurement.has_key('error'): rtts.append('x') elif measurement.has_key('x'): star = measurement['x'] rtts.append(star) else: sys.stderr.write('measurement: %d result has no field rtt and not field error\n' % mid) entry = (mid, probeid, target_addr, target_name, minvalue, maxvalue) + tuple(rtts) tab_list.append(entry) except: traceback.print_exc(file=sys.stderr) return tab_list
d47b7be7d5740332c4077e3febfd517f0944b621
31,524
import os def reproject_tile(ori_tile): """ Reproject tiles into {epsg:3035}""" # Change the name of the reprojected tile if ori_tile.endswith('.jp2'): reproj_tile = ori_tile.replace('.jp2','_rprj_3035.tif') else: reproj_tile = ori_tile.replace('.tif','_rprj_3035.tif') # Reproject tile into epsg:3035 CRS os.system( "gdalwarp -t_srs epsg:3035 -r bilinear -co BIGTIFF=YES -co COMPRESS=LZW %s %s"\ %(ori_tile,reproj_tile)) return reproj_tile
7cdb915241c9f7012bc135356e451921e437f34a
31,528
import json import ast def pystr2json(s: str) -> str: """Convert single-quoted "JSON" string to a double-quoted JSON string. Typical use-case: we receive input string `s = "['a', 'b', 'c']"`, which were generated essentially by `str(["a", "b", "c"])` and we cannot change this part. However, we would still like to convert these strings to valid, double-quoted JSON strings for downstreams. This implementation relies on ``ast.literal_eval()``. Note that the resulted double-quoted string is NOT guaranteed to be a valid JSON string, as clearly stated by ``ast.literal_eval()`` documentation where it can yield these "Python literal structures: strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None." As an example, first we generate synthetic .csv file: >>> import json >>> import pandas as pd >>> df = pd.DataFrame( { "a": ["apple", "orange"], "b": [ ["red", "green", "blue"], ["R", "G", "B"], ], }, ) >>> df.to_csv('/tmp/test-pystr2json.csv', index=False) >>> !cat /tmp/test-pystr2json.csv a,b apple,"['red', 'green', 'blue']" orange,"['R', 'G', 'B']" Then, we read the .csv file, and show an simple example of downstream task, which is to deser the JSON string in the second column to an object: >>> df = pd.read_csv('/tmp/test-pystr2json.csv', low_memory=False) >>> df a b 0 apple ['red', 'green', 'blue'] 1 orange ['R', 'G', 'B'] >>> # Directly deserialized single-quoted string gives an error. >>> df['b'].apply(lambda s: json.loads(s)) --------------------------------------------------------------------------- JSONDecodeError Traceback (most recent call last) ... JSONDecodeError: Expecting value: line 1 column 2 (char 1) >>> # Convert python string to JSON string. >>> df['b_json'] = df['b'].apply(lambda s: pystr2json(s)) >>> df a b b_json 0 apple ['red', 'green', 'blue'] ["red", "green", "blue"] 1 orange ['R', 'G', 'B'] ["R", "G", "B"] >>> # Sample downstream task: deserialize JSON strings to Python objects >>> df['b_obj'] = df['b_json'].apply(lambda s: json.loads(s)) >>> df[['a', 'b_obj']] a b_obj 0 apple [red, green, blue] 1 orange [R, G, B] >>> type(df.loc[0, 'b_obj']) list """ return json.dumps(ast.literal_eval(s))
56bc063fa586f86a7bfaa8d263b1f6bb91fa1389
31,529
import re def unescape(text): """Unescape some html chars.""" text = re.sub(">", r">", text) text = re.sub("'", r"'", text) return text
ed92ebf78faceae979f1aad26e2e722dfc502346
31,530
def add_time(df, stim_dict, stimulus_timestamps): """ add_time(df, stim_dict, stimulus_timestamps) Updates dataframe with time columns. Arguments: df (pandas): stimulus dataframe. stim_dict (dict): experiment stim dictionary, loaded from pickle stimulus_timestamps (1D array): timem stamps for each stimulus frames. Returns: df (pandas): updated stimulus table with time. """ df = df.copy() df = df.sort_values("start_frame_twop").reset_index(drop=True) df["start_time_sec"] = stimulus_timestamps[ df["start_frame_stim"].values.astype(int) ] non_final = range(0, len(df) - 1) df.loc[non_final, "stop_time_sec"] = stimulus_timestamps[ df["stop_frame_stim"].values[:-1].astype(int) ] # final line final_row = len(df) - 1 last_duration = ( df.loc[final_row, "stop_frame_stim"] - df.loc[final_row, "start_frame_stim"] ) / stim_dict["fps"] df.loc[final_row, "stop_time_sec"] = \ df.loc[final_row, "start_time_sec"] + last_duration df["duration_sec"] = df["stop_time_sec"] - df["start_time_sec"] return df
58c0ac6cf4544b04737317a08115a814a737d98c
31,531
from typing import Tuple from typing import List from typing import Any def get_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]: """ Sort columns when combining metrics. MultiIndex labels have the metric name as the last element in the tuple. We want to sort these according to the list of passed metrics. """ parts: List[Any] = list(label) metric = parts[-1] parts[-1] = metrics.index(metric) return tuple(parts)
007b0979bfd6db653dd21f3e4827668183d79529
31,534
def _get_shlib_stem(target, source, env, for_signature): """ Get the basename for a library (so for libxyz.so, return xyz) :param target: :param source: :param env: :param for_signature: :return: """ target_name = str(target) shlibprefix = env.subst('$SHLIBPREFIX') shlibsuffix = env.subst("$_SHLIBSUFFIX") if target_name.startswith(shlibprefix): target_name = target_name[len(shlibprefix):] if target_name.endswith(shlibsuffix): target_name = target_name[:-len(shlibsuffix)] return target_name
83ddc6341c0773c13e8e457718a2fef9707716d2
31,535
def normalize_axes(df, sample_axis, feature_axis): """Tests and transposes DataFrame to sample * feature format. Checks to make sure a user's axes inputs are properly formatted. Flips a DF to put samples in index and features in columns. Arguments: df: dataset sample_axis: axis containing samples (0, 1) feature_axis: axis containing features (0,1) Returns: DataFrame as samples * features """ if sample_axis == 1 and feature_axis == 0: df = df.T elif sample_axis != 0 and feature_axis != 1: raise Exception('Invalid axis! Should be 0 or 1') return df
6c0fe26b5d79dfba90a8e395edecd08eb15e4649
31,536
def MatchAll(): """ Retrieve all documents. There is a 10,000 size limit for Elasticsearch query results! The Scroll API can be used to retrieve more than 10,000. """ return {"match_all": {}}
f40e443259465145b7212ee5869d319b508cc044
31,538
def match_anat(fname, json_data): """ Match anatomical images """ folder, suffix, attrs, md = "anat", None, {}, {} desc = json_data["SeriesDescription"].lower() if "t1" in desc: suffix = "T1w" elif "t2" in desc: suffix = "T2w" if suffix: if "NORM" in json_data["ImageType"]: attrs["acq"] = "NORM" return folder, suffix, attrs, md
413c54a0fd1bf4e7e09ff06e3b5889263e12cafc
31,540
def check_texts(dataset, texts, reporter): """ A helper function to check the dataset """ results = [] for text in texts: if dataset.check_text(text): results.append(text) reporter.update.remote() return results
9ffc0d84403995094983c8fe7826ab59e3ef0d4e
31,542
def drop(self, parameters, inplace=True): """ Remove input parameters from WaterFrame.data. Parameters ---------- parameters: str, list of str Parameters of WaterFrame.data. inplace: bool If True, Drop in place and return 'True', If False, return a copy of the WaterFrame without the input parameters. Returns ------- new_wf: WaterFrame """ keys = [] if isinstance(parameters, str): keys.append(parameters) keys.append(parameters + '_QC') elif isinstance(keys, list): for parameter in parameters: keys.append(parameter) keys.append(parameter + '_QC') if inplace: self.data.drop(keys, axis=1, inplace=True) for key in keys: self.vocabulary.pop(key) return True else: new_wf = self.copy() new_wf.vocabulary = self.vocabulary.copy() new_wf.metadata = self.metadata.copy() new_wf.data = self.data.drop(keys, axis=1) for key in keys: new_wf.vocabulary.pop(key) return new_wf
6bd8e1c4414ca98d061cc661832746c40be6abdf
31,544
def GetDeprecatedTagWarning(models): """Returns a warning string iff any device model is marked deprecated.""" for model in models: for tag in model.tags: if 'deprecated' in tag: return ('Some devices are deprecated. Learn more at https://firebase.' 'google.com/docs/test-lab/available-testing-devices#deprecated') return None
0c5cc18597d7835c6df9c2efd84bb0bb4518c9e9
31,546
import requests import json def send_message(port, msg): """ Sends a JSON message to the node and returns its reply. """ return requests.post("http://localhost:{}/".format(port), json.dumps(msg), headers={'content-type': 'application/json'})
b830e9da3d22092fc78fc8f8b3ef804982bc3343
31,547
import difflib def strSim(txt1, txt2): """ String similarity returns a value between -1 and +1 """ if (not txt1) or (not txt2): return None #print 'strSim', txt1, ':', txt2 s = difflib.SequenceMatcher(None, txt1, txt2).ratio() return 2.0 * (s - 0.5)
d984ea87ac0e73defce2cd358f4221cdf577d167
31,551
import glob import os def lyrics_to_list(path:str): """returns a list of lyricstrings""" textfile = glob.glob(os.path.join(path, '*.text'), recursive=False) lyrics = [] for file_path in textfile: with open(file_path) as f_input: lyrics.append(f_input.read()) return lyrics
d5ea43d7d880388a5a7b644f74167f785ba6cde6
31,552
def nunique(ls): """ returns the number of unique values of a list. """ unique = [] for val in ls: if val not in unique: unique.append(val) return len(unique)
6f4a6ebdcc1b13f291b003be10da78bc403c3f81
31,553
def predict_class(prob_dry,prob_wet): """ Predict class ('Wet' or 'Dry') based on the input prediction probabilities for each class. Args: prod_dry (dataframe): prediction probabilities for class 'Dry'. prod_wet (dataframe): prediction probabilities for class 'Wet'. Returns: 'Dry'/'Wet' (string): predicted class. """ if prob_dry > prob_wet : return 'Dry' else: return 'Wet'
614f8fa3cabb09009e37bb40a7402a2b4b7981b6
31,554
def get_retcode(ret): """ Determine a retcode for a given return """ retcode = 0 # if there is a dict with retcode, use that if isinstance(ret, dict) and ret.get("retcode", 0) != 0: return ret["retcode"] # if its a boolean, False means 1 elif isinstance(ret, bool) and not ret: return 1 return retcode
d04f27231e2708f6e4d23ab81fa8c186649c767c
31,555
import torch def focal_binary_cross_entropy( logits: torch.Tensor, targets: torch.Tensor, gamma: int = 2 ) -> torch.Tensor: """ form https://www.kaggle.com/thedrcat/focal-multilabel-loss-in-pytorch-explained """ num_label = targets.shape[-1] logits = logits.reshape(-1) targets = targets.reshape(-1) pt = torch.sigmoid(logits) pt = torch.where(targets >= 0.5, pt, 1.0 - pt) logp = -torch.log(torch.clamp(pt, 1.0e-4, 1.0 - 1.0e-4)) loss = logp * ((1.0 - pt) ** gamma) loss = num_label * loss.mean() return loss
f1286eb70d07a73451fd3021d09600c9f48db5d5
31,557
def get_app_fullname(app): """ Returns the full python name of an app - e.g. django.contrib.auth """ return app.__name__[:-11]
c90310bbdd082023c01c25cc0736984c60b60a79
31,559
def is_summary(number): """ Takes a number as input Identifies a sentence as part of the summery or not """ if number != 0: return 'yes' else: return 'no'
753e3d698e484976c7cb42a85c4a6966c5bcc9c8
31,560
import itertools def complete_inds(n, d): """ Return all combinations of powers in an n dimensional d degree complete polynomial. This will include a term for all 0th order variables (i.e. a constant) Parameters ---------- n : int The number of parameters in the polynomial d : int The degree of the complete polynomials Returns ------- inds : filter A python filter object that contains all the indices inside a generator """ i = itertools.product(*[range(d + 1) for i in range(n)]) return filter(lambda x: sum(x) <= d, i)
cfbc90ab7810fdbe6a8982b40adf5e38b8054208
31,561
def _get(entry): """A helper to get the value, be it a callable or callable with args, or value """ if isinstance(entry, (tuple, list)): func, args = entry return func(*args) elif callable(entry): return entry() else: return entry
40742e0f86ea1a89b05e0354912c64683a9b9160
31,562
def redshiftToLOS(redshift, H): """ Convert redshifts to apparent line of sight distances, ignoring particle velocities. Input: redshifts and Hubble constant. Output: line of sight distances (Mpc). """ c = 3.0e5 return redshift*c/H
09f6c287dbc43267dfd239ac835394e41760d242
31,563
def _triplelist2triples_(triple_list, config): """ >>> _triplelist2triples_([1,2,3, 2,5,0]) {(1,2,3),(2,5,0)} >>> _triplelist2triples_([1,2,3, 1,2,3, 2,5,0]) {(1,2,3),(2,5,0)} >>> _triplelist2triples_([1,2,3, 2,5,0].extend(config.NA_TRIPLE)) {(1,2,3),(2,5,0)} """ triple_list = list(triple_list) triples = set([tuple(triple_list[i:i + 3]) for i in range(0, len(triple_list), 3)]) if config.NA_TRIPLE in triples: triples.remove(config.NA_TRIPLE) return triples
4bedf4495646aa08e1ba80b0f70b257ba6e01644
31,564
import operator def difference(vec1, vec2): """Return difference between given vectors. >>> v1 = [1, 2, 3, 4] >>> v2 = [5, 6, 7, 8] >>> v3 = [0, 0, 0, 0] >>> difference(v2, v1) [4, 4, 4, 4] >>> difference(v2, v3) [5, 6, 7, 8] """ return map(operator.sub, vec1, vec2)
1dcf334f856232d1b5213ed835a34ab1ec20dc9f
31,565
def generate_list(function, amount): """ Uses the inputted function for every index and returns a list """ return list(map(function, range(1, amount + 1)))
39e807402eddbe6c88578e3bfc870430fb8eb84e
31,566
import argparse def set_args(): """设置训练模型所需参数""" parser = argparse.ArgumentParser() parser.add_argument('--pretrained_model_path', default='./t5_pegasus_torch/', type=str, help='预训练的GPT2模型的路径') parser.add_argument('--class_size', type=int, default=4, help='类别') parser.add_argument('--class_hidden_size', type=int, default=768) parser.add_argument('--span_layer', type=str, default="endpoint", help='span层的类型') parser.add_argument('--class_proj_dim', type=int, default=256, help='span层的映射大小') parser.add_argument('--use_proj', action='store_true', help='是否使用映射') parser.add_argument('--generate_weight', type=int, default=1, help='生成模块权重') parser.add_argument('--class_weight', type=int, default=1, help='分类模块权重') return parser.parse_args()
a3656263dcb0383cf7a0dafb7a02b36d9ba2c096
31,567
import colorsys def map_colour(x, centre, start_hue, end_hue, day): """Given an x coordinate and a centre point, a start and end hue (in degrees), and a Boolean for day or night (day is True, night False), calculate a colour hue representing the 'colour' of that time of day.""" start_hue = start_hue / 360 # Rescale to between 0 and 1 end_hue = end_hue / 360 sat = 1.0 # Dim the brightness as you move from the centre to the edges val = 1 - (abs(centre - x) / (2 * centre)) # Ramp up towards centre, then back down if x > centre: x = (2 * centre) - x # Calculate the hue hue = start_hue + ((x / centre) * (end_hue - start_hue)) # At night, move towards purple/blue hues and reverse dimming if not day: hue = 1 - hue val = 1 - val r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(hue, sat, val)] return (r, g, b)
4699b630f30a06a3421bb5d18155edbffa70362d
31,568
def gadgetMultipleFiles(rootName, fileIndex): """ Returns the name of gadget file 'fileIndex' when a snapshot is saved in multiple binary files. It takes 2 arguments: root name of the files and the file number whose name is requested (from 0 to GadgetHeader.num_files-1).""" return rootName + "%i" % fileIndex
4dac2d23c6cba7600472cc84fa2384ef66f5f204
31,569
def normalize_index(index, length): """ Normalizes an index per sequence indexing. >>> normalize_index(0, 10) 0 >>> normalize_index(9, 10) 9 >>> normalize_index(-2, 10) 8 """ index = int(index) if 0 <= index < length: return index elif -length <= index < 0: return index + length else: raise IndexError("index out of range: {}".format(index))
bc9be3a3ef554ca95217f93d2d698934c2f1096f
31,570
def overlaps(s1, e1, s2, e2): """ Check if two start/end ranges have overlap :param `s1`: range 1 start :type `s1`: int :param `e1`: range 1 end :type `e1`: int :param `s2`: range 2 start :type `s2`: int :param `e2`: range 2 end :type `e2`: int :return: True if ranges overlap :rtype: bool """ s_cand = max(s1, s2) e_cand = min(e1, e2) return s_cand < e_cand
30656ce4bd685a64faf81efa59e4d1c694c299a0
31,571
def compareConfigs(name, c1, c2, shortcut=True, rtol=1E-8, atol=1E-8, output=None): """ Helper function for Config.compare; used to compare two Configs for equality. If the Configs contain RegistryFields or ConfigChoiceFields, unselected Configs will not be compared. Floating point comparisons are performed by :func:`numpy.allclose`; refer to that for details. Parameters ---------- name : Name to use when reporting differences c1 : LHS config to compare c2 : RHS config to compare shortcut : If True, return as soon as an inequality is found. rtol : Relative tolerance for floating point comparisons. atol : Absolute tolerance for floating point comparisons. output : If not None, a callable that takes a string, used (possibly repeatedly) to report inequalities. """ assert name is not None if c1 is None: if c2 is None: return True else: if output is not None: output("LHS is None for %s" % name) return False else: if c2 is None: if output is not None: output("RHS is None for %s" % name) return False if type(c1) != type(c1): if output is not None: output("Config types do not match for %s: %s != %s" % ( name, type(c1), type(c2))) return False equal = True for field in c1._fields.values(): result = field._compare( c1, c2, shortcut=shortcut, rtol=rtol, atol=atol, output=output) if not result and shortcut: return False equal = equal and result return equal
463c94f8912924a933d64c3f6eeeb38e8df5ed80
31,572
def pyAttrName2XmlAttrName(key): """ The @pyAttrName2XmlAttrName@ converts the Python XML attribute name @key@ to an appropriate XML attribute identifier. If the *key* is 'cssClass' then it is translated into 'class'. If an HTML5 attribute *data_xxxx* is used, then change that to *data-xxxx*. """ if key == 'cssClass': key = 'class' elif key == 'cssId': key = 'id' elif key.startswith('data'): key = key.replace('_', '-') return key
7bafbef748960b1e5e34ee27d14a39778bb8ec43
31,573
def get_images_helper(request, images): """ Helper method for gathering an object's list of images and formatting them along with their corresponding types. Parameters: request : Request object from the serializer instance. images : Queryset of image objects connected to the Object Returns: List of Image objects in JSON format. """ image_list = [] for image in images: image_dict = { "image_url": f"{request.scheme}://{request.get_host()}{image.image.url}", "image_type": image.type.type, } image_list.append(image_dict) return image_list
97b798ca406e63dcc00b9eefdd27edc9fcd9aef9
31,574
def A000110(n: int) -> int: """Bell or exponential numbers. Number of ways to partition a set of n labeled elements. """ bell = [[0 for i in range(n + 1)] for j in range(n + 1)] bell[0][0] = 1 for i in range(1, n + 1): bell[i][0] = bell[i - 1][i - 1] for j in range(1, i + 1): bell[i][j] = bell[i - 1][j - 1] + bell[i][j - 1] return bell[n][0]
d430624c46688ae2f18b86270bf4fdcb3a28dca8
31,575
def find_authorization_in_db(user_id, users_collection): """Queries the db to find authorization of the given user.""" first_user = users_collection.find_one({'user_id': user_id}) if first_user is None: # user not found return False authorized = first_user.get('is_organizer') return bool(authorized)
8834e83abf638e8a98c87fcd3f86b03e943ab5ad
31,576
def celciusToFarenheit(celcius): """ Convert a temperatur in Celcius to Farenheit """ if celcius is None: return None else: return float(celcius) * 1.8 + 32.0
ccf57c2d376de6c7b61b2ac48e6c22348dd83ee2
31,579
def decimal2dms(decimal_degrees): """ Converts a floating point number of degrees to the equivalent number of degrees, minutes, and seconds, which are returned as a 3-element list. If 'decimal_degrees' is negative, only degrees (1st element of returned list) will be negative, minutes (2nd element) and seconds (3rd element) will always be positive. Example: >>> decimal2dms(121.135) [121, 8, 6.0000000000184173] >>> decimal2dms(-121.135) [-121, 8, 6.0000000000184173] """ degrees = int(decimal_degrees) decimal_minutes = abs(decimal_degrees - degrees) * 60 minutes = int(decimal_minutes) seconds = (decimal_minutes - minutes) * 60 return [degrees, minutes, seconds]
14586909f670e4dd4a8173925eea4dc330ed6adc
31,580
def frame_ranges_to_string(frames): """ Take a list of numbers and make a string representation of the ranges. >>> frame_ranges_to_string([1, 2, 3, 6, 7, 8, 9, 13, 15]) '[1-3, 6-9, 13, 15]' :param list frames: Sorted list of frame numbers. :return: String of broken frame ranges (i.e '[10-14, 16, 20-25]'). """ if not frames: return '[]' if not isinstance(frames, list): frames = list(frames) frames.sort() # Make list of lists for each consecutive range ranges = [[frames.pop(0)]] current_range_index = 0 # index of current range for x in frames: if x - 1 == ranges[current_range_index][-1]: ranges[current_range_index].append(x) else: current_range_index += 1 ranges.append([x]) range_strings = [] for x in ranges: if len(x) > 1: range_strings.append('-'.join([str(x[0]), str(x[-1])])) else: range_strings.append(str(x[0])) complete_string = '[' + ', '.join(range_strings) + ']' return complete_string
9fdd3f05812a34144102f3727cfe49f37a4c0f60
31,581
import zlib def bytes_to_zlib(bytes_data): """ Compress a bytes array """ return zlib.compress(bytes_data)
d9e9583a35d52ba1f69972bcc2fcaa295348a4ec
31,582
def get_short_uuid(uuid): """get the first block of a 4-word UUID to use as a short identifier""" full_uuid = str(uuid) return full_uuid.split('-', 1)[0]
727847113068d7bd48356047d98b12d00d9c811a
31,583
def gen_bar_updater(pbar): """ TQDM hook for progress bar during download """ def bar_update(count, block_size, total_size): if pbar.total is None and total_size: pbar.total = total_size progress_bytes = count * block_size pbar.update(progress_bytes - pbar.n) return bar_update
455a0f1a0c3f9353c5ef92b99e4a7d7669b24092
31,584
def is_go_source(view, point=None): """Return True if the given view contains Go source code. :param sublime.View view: View containing the code to be formatted. :returns: bool """ if point is None: point = view.sel()[0].begin() return view.score_selector(point, 'source.go') > 0
f666309988d754f1c352b4febf92e5e443f17384
31,586
def is_negatively_charged_oxygen(atom_name, resname): """ Determine whether the oxygen atom of interest is either negatively charged (usually a carboxyl group or sulfate/phosphate), or has a lone pair (and no hydrogen atom) that would similarly repel anions. Parameters ----------- atom_name : str resname : str Returns ------- bool """ if ((atom_name in ["OD1","OD2","OE1","OE2"]) and (resname in ["GLU","ASP","GLN","ASN"])): return True elif ((atom_name == "O") and (not resname in ["HOH","WAT"])): return True # sort of - the lone pair acts this way elif ((len(atom_name) == 3) and (atom_name[0:2] in ["O1","O2","O3"]) and (atom_name[2] in ["A","B","G"])): return True elif (resname in ["SO4","PO4"]): return True return False
2e3e4b3aab87da8bf44708ca28ee11b3d8fb5e1e
31,588
import json import errno def write_config_file(path, config): """Writes the specified configuration to the specified file.""" contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n' try: with open(path, 'w') as f: f.write(contents) return True except IOError as ex: if ex != errno.ENOENT: raise return False
f72720aa3c96830b55726cfc961c66b5d8975802
31,589
def generate_file_token(self, file): """Generate file token. Args: file (str): File name. Returns: str: New token, 64 characters. """ return self.generate_random_token([file])
01deb31ae0e7a569fd89f8eb9ed221f7e24da41b
31,590
def create_reference_lookup_df(df): """Return a dataframe indexed by task_id for looking up the reference.""" df = df[df['tag'] == 'reference'] df.drop_duplicates(subset=['task_id'], inplace=True) df.set_index('task_id', verify_integrity=True, inplace=True) return df
3acdef32286af7ed6f596e0b3668fc441b775a11
31,596
def action_traffic_permitted(env, state, action): """ Return True if action is permitted in terms of firewall traffic """ if action.is_scan(): return True network = env.network service = action.service dest = action.target[0] # add 0 since 0 = internet compromised_subnets = set([0]) for m in env.address_space: if state.machine_compromised(m): compromised_subnets.add(m[0]) for src in compromised_subnets: if network.traffic_permitted(src, dest, service): return True return False
dcce59e1b9d48bd0c35a65f7e016a0477d86e2f7
31,597
def func(i: int) -> int: """函数测试 >>> func(10) 21 """ return i * 2
61e3858dd247f9d324f5d17dea40f6a368c7af38
31,598
def find_multilines(config_text): """ Parses condor config file looking for multiline entries Args: config_text: string, contents of a condor configuration file Returns: multi: dictionary. keys are first line of multi line config values are the rest of the multi line config keeping original formatting see parse_configs_for_multis() below for example muli dict """ multi = {} tag = None dict_key = None for line in config_text: parts = line.split() if tag is None: for idx in range(len(parts)): if parts[idx].startswith("@="): tag = parts[idx].replace("=", "").strip() dict_key = parts[idx - 1].strip() multi[dict_key] = "".join(parts[idx:]) + "\n" else: if "#" not in line: multi[dict_key] += line for idx in range(len(parts)): if tag in parts[idx]: tag = None return multi
551785ff3188b3194f5e3795308376c86ac42019
31,599
import subprocess def GetCommitDescription(commit): """Get the output of `git describe`. Needs to be called from inside the git repository dir.""" return subprocess.check_output( ['git', 'describe', '--long', '--abbrev=8', commit]).rstrip()
95694ce9ab30fc0da83e160e464e7f99f8ade5b8
31,600
from typing import List def get_effective_lindbladian_object_names() -> List[str]: """Return the list of valid effective-lindbladian-related object names.""" names = [] names.append("hamiltonian_vec") names.append("hamiltonian_mat") names.append("effective_lindbladian_mat") names.append("effective_lindbladian") return names
7c5f63e2ab48fd9cb6e0f53fcab895261311e5b5
31,601
def verify_edges(G, check_connected=True): """ Their must only be one input on each index and everything must be connected """ problems = [] for node_name in G: in_idxes = {} for edge in G.in_edges(node_name): if edge.to_idx in in_idxes: problems.append( f"edge {in_idxes[edge.to_idx]} connects to the same input as edge {edge}") if check_connected and G.num_in_edges(node_name) == 0 and G.num_out_edges(node_name) == 0: problems.append(f"{node_name} isn't connected to any other") return problems
561c7510cd35223196ee66f6cf4374cf026501af
31,602
import re def _remove_special_characters(text: str) -> str: """removes some special characters""" p_text = re.sub(r'[,;?!$*(){}<=>^~+`\"%#]', ' ', text, flags=re.ASCII) p_text = re.sub(r'<([uU]\+.+)>', ' ', p_text) # remove pesky Unicodes like <U+A> return p_text
824c7b91d3c61a9e9e277f233b1b4554a5a3380e
31,603
def _(element, compiler, **kw): """ This actually returns the squared STD, but as it is only required for tests we can live with it. """ if len(element.clauses) < 2: raise ValueError("We need to concat at least two elements") concat = "||".join([compiler.process(elem, **kw) for elem in element.clauses]) return concat
9c374ab84440e72140804bb2cc686c5c5737008d
31,604
import re def tokenize(library): """ Function that takes a string of text a tokenizes it. The text is returned tokenized. """ # Make sure all words are written with lower cases library = library.lower() # Remove non-alphabetic characters tokenizer = re.compile(r"\W+") # tokenize tokenized_library = tokenizer.split(library) return(tokenized_library)
b638cda4945aee3ed377614968998314121735cf
31,605
import os def get_sub_dir(current_dir): """Get a list of sub-directories of current_dir""" return [s for s in os.listdir(current_dir) if (os.path.isdir(os.path.join(current_dir, s)) and (s not in ['fplbp', 'MRICAD', 'slic']) and ('target_lesions' not in s))]
93e2fe1806be69f2d1d3f08dc350709ffea39191
31,606
def unflatten_series(x, feature_length): """ Unflatten a series that was flattened for dataset balancing. """ unflattened = [] num_series = len(x)/feature_length for i in range(num_series): unflattened.append(x[num_series*feature_length:num_series*feature_length+feature_length]) print(len(unflattened), len(unflattened[0])) return unflattened
9e5ed380e5cbec535bd040849417d537b55672da
31,607
def decorate_to_utf8(func): """只要參數是str就要to cp950給t4.dll 只要回傳值是bytes就要to utf8給 Api caller""" def func_wrapper(*args): new_args = list(args) for idx, arg in enumerate(args): if isinstance(arg, str): new_args[idx] = args[idx].encode('utf-8') res = func(*new_args) if isinstance(res, bytes): return str(res, 'cp950') else: return res return func_wrapper
ee89ab532ecfe71a1924465f12e68515452fb66c
31,608
import argparse def command_line(): """ Manage the arguments from command line """ parser = argparse.ArgumentParser() parser.add_argument("-n", "--website_name", help="add your website name here") parser.add_argument("-i", "--index_page", help="add your index page here") parser.add_argument("-e", "--error_page", help="add your error page here") parser.add_argument("-r", "--region", help="region where creating the bucket") parser.add_argument("-d", "--delete", help="Delete bucket", action="store_true") return parser.parse_args()
1c5d5e457dd8996f432cd5a1b27fea02be9b26e1
31,609
def sign_string(value): """ Return a string representing the sign of a numerical value, "+" for positive, "-" for negative, and "+/-" for 0. :param value: A numerical value. :return: The sign of that value as a string. """ return "+" if value > 0 else "-" if value < 0 else "+/-"
f305cd9199e174520ea675c3a46f340904e410e6
31,610
import requests from bs4 import BeautifulSoup def url2bs(url: str): """ HTTP GET response text with the url to a BeautifulSoup object parsed with lxml. :param url: URL. :return: BeautifulSoup object. """ try: response = requests.get(url) result = BeautifulSoup(response.text, 'lxml') except requests.exceptions.RequestException as e: print('Error in HTTP GET from the url:\n\t' + url + '\nERROR MESSAGE:') print(e) result = None return result
374a94bb3c9970a340fccce4b596ae15ad60022c
31,611
def agenda_width_scale(filter_categories, spacer_scale): """Compute the width scale for the agenda filter button table Button columns are spacer_scale times as wide as the spacer columns between categories. There is one fewer spacer column than categories. """ category_count = len(filter_categories) column_count = sum([len(cat) for cat in filter_categories]) # Refuse to return less than 1 to avoid width calculation problems. return max(spacer_scale * column_count + category_count - 1, 1)
7c26fdb708ddf622593388af0377fa3957e3a753
31,612
from typing import List def encode(obs: List[int], spaces: List[int]) -> int: """ Encode an observation from a list of gym.Discrete spaces in one number. :param obs: an observation belonging to the state space (a list of gym.Discrete spaces) :param spaces: the list of gym.Discrete spaces from where the observation is observed. :return: the encoded observation. """ assert len(obs) == len(spaces) sizes = spaces result = obs[0] shift = sizes[0] for o, size in list(zip(obs, sizes))[1:]: result += o * shift shift *= size return result
32e2f6c1b43e8567f113905d8fc2452dd7058750
31,614
def _transform_json_content(data, nodes: list): """ 处理a>b>c的json指示符 :param data: 字典数据 :param nodes: 指示符 :return: a>b>c返回的指示数据 """ results = [] def _get_data(inner_data, inner_nodes): # 如果路径没了,返回本身合并成的list try: if len(inner_nodes) == 1: if isinstance(inner_data, list): results.extend([inn.get(inner_nodes[0]) for inn in inner_data]) else: results.append(inner_data.get(inner_nodes[0])) else: node = inner_nodes[0] del inner_nodes[0] inner_nodes = inner_nodes temp_data = inner_data.get(node) if isinstance(temp_data, list): # 如果数据是list的形式,则单个继续递归 for da in temp_data: _get_data(da, inner_nodes) else: # 如果nodes还有个数,对于单个来说继续递归 _get_data(temp_data, inner_nodes) except Exception as e: raise e _get_data(data, nodes) return results
cfa987e0a922393e6456c71b3e80723184d787c8
31,615
def payscore_merchant_bill(self, bill_date, service_id, tar_type='GZIP', encryption_algorithm='AEAD_AES_256_GCM'): """商户申请获取对账单 :param bill_date: 账单日期,格式'YYYY-MM-DD',仅支持下载近三个月的账单。示例值:'2021-01-01' :param service_id: 支付分服务ID。示例值:'2002000000000558128851361561536' :param tar_type: 账单的压缩类型,'GZIP':文件压缩方式为gzip,返回.gzip格式的压缩文件。示例值:'GZIP' :param encryption_algorithm: 加密算法,对返回账单原文加密的算法'AEAD_AES_256_GCM',账单使用AEAD_AES_256_GCM加密算法进行加密。示例值:'AEAD_AES_256_GCM' """ if bill_date: path = '/v3/payscore/merchant-bill?bill_date=%s' % bill_date else: raise Exception('bill_date is not assigned.') if service_id: path = '%s&service_id=%s' % (path, service_id) else: raise Exception('service_id is not assigned.') path = '%s&tar_type=%s' % (path, tar_type if tar_type else 'GZIP') path = '%s&encryption_algorithm=%s' % (path, encryption_algorithm if encryption_algorithm else 'AEAD_AES_256_GCM') return self._core.request(path)
12663b79311c1eb2aad00da49df0df3edfadcb2a
31,616
import re def num_in_str(lst: list) -> list: """Create a list of strings that have numbers in it.""" return [i for i in lst if re.search(r"\d", i)]
a9c94acbf5812e1c5ecfc5bb6b08d325882d2806
31,617
import re def clean_genre(df): """ Clean genre by reducing multiple genres to singular genre. Parameters ---------- df : pd.DataFrame A data frame with a column of movie genres. Returns ------- df : pd.DataFrame A modified data frame with grouped genres. """ # Convert the movie Social to Drama df["GenreFix"] = df["Genre"].apply(lambda x: re.sub("social", "drama", x)) # Take the first genre from a tuple (A,B). E.g., Comedy, Romance => Comedy df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("(?<=[a-z]),\s*[a-z]+", "", x)) # Take the first genre from A-B. E.g., Comedy-romance => Comedy df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("(?<=[a-z])\s*-\s*[a-z]+", "", x)) # Take the first genre A/B. E.g., Comedy/Romance => Comedy df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("(?<=[a-z])\s*/\s*[a-z]+", "", x)) # Categorize as war movies df["GenreFix"] = df["GenreFix"].apply(lambda x: "war" if x.find("ww") != -1 or \ x.find("world war") != -1 or \ x.find("war") != -1 else x) # Categorize as animations df["GenreFix"] = df["GenreFix"].apply(lambda x: "animation" if x.find("anim") != -1 \ else x) # Categorize as crime movies df["GenreFix"] = df["GenreFix"].apply(lambda x: "crime" if x.find("crime") != -1 \ else x) # Remove the word "film" df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("film", " ", x)) # Historical drama => drama etc. df["GenreFix"] = df["GenreFix"].apply(lambda x: x.split()[-1] if x != " " else x) # Remove commas and dots alongside their whitespaces df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub(",|\.", "", x.strip())) return df
5be5331fc23b6e2ad8c1c56e9a21069ca06842a6
31,618
import base64 def base64解码(data): """ 将base64字符串 解码为 bytes 字节集 """ return base64.b64decode(data)
6b52b555b29595b1cbe4a9510fd8b2c0902511ae
31,619
def trackSpeeds_to_bodyFixed(right_track_speed, left_track_speed, track_width): """ Function maps speeds for individual skid-steering tracks to the body-fixed velocity and angular velocity Arguments: right_track_speed - speed of the right track left_track_speed - speed of left track track_width - track width of the vehicle (distance between the two tracks) right_track_max - maximum speed of right track, default = 100 right_track_min - maximum speed of right track, default = -100 left_track_max - maximum speed of right track, default = 100 left_track_min - maximum speed of right track, default = -100 Returns: velocity (m) angular_velocity (rad/) """ velocity = (right_track_speed + left_track_speed) / 2 angular_velocity = (right_track_speed - left_track_speed) / track_width return velocity, angular_velocity
a2b720f100776ef696d3479a28f35811ff75966d
31,620
def chunk(lst, n_chunks): """ https://stackoverflow.com/questions/2130016/ splitting-a-list-into-n-parts-of-approximately-equal-length Parameters ---------- lst : list n_chunks : int Returns ------- list chunked list """ k, m = divmod(len(lst), n_chunks) return [lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n_chunks)]
418fa1599676821bb897efd139d3c17c5facebb8
31,622
def convert_lon(lon): """Convert a single longitude value to a floating point number. Input longitude can be string or float and in -135, 135W, 225 or 225E format. Output longitude lies in the range 0 <= lon <= 360. """ lon = str(lon) if 'W' in lon: deg_east = 360 - float(lon[:-1]) elif 'E' in lon: deg_east = float(lon[:-1]) elif float(lon) < 0.0: deg_east = 360 + float(lon) else: deg_east = float(lon) assert 0 <= deg_east <= 360, "Longitude must lie between 0-360E" return deg_east
b86343766209d81957d74b8ba96ebc69f462289a
31,623
def parse_info_value(value): """ :param value: :return: """ try: if '.' in value: return float(value) else: return int(value) except ValueError: if ',' in value or '=' in value: retval = {} for row in value.split(','): key, val = row.rsplit('=', 1) retval[key] = parse_info_value(val) return retval return value
4c23915ef5184968f0a16ce7ec4f55595149c99f
31,624
def force_force_list(data): """Wrap data in list. We need to define this awkardly named method because DoJSON's method force_list returns tuples or None instead of lists. """ if data is None: return [] elif not isinstance(data, (list, tuple, set)): return [data] elif isinstance(data, (tuple, set)): return list(data) return data
5ba5e0f966294ea7aca38fc89378b4e09cf2eac9
31,625
def set_replay_speed_exponent(speed: int) -> None: """set_replay_speed_exponent(speed: int) -> None (internal) Set replay speed. Actual displayed speed is pow(2,speed). """ return None
92ef9d1f58a92241dcec8e7cd10d48c402bbcc48
31,626
def crop_from_quad(quad): """Return (left, top, right, bottom) from a list of 4 corners.""" leftmost = min([corner[0] for corner in quad]) topmost = min([corner[1] for corner in quad]) rightmost = max([corner[0] for corner in quad]) bottommost = max([corner[1] for corner in quad]) return (leftmost, topmost, rightmost, bottommost)
eeb2a2041f52fd342d5f121ecf4df5cf42f07ac2
31,629
def get_admin_auth_token(session): """Get admin token. Currently used for inspector, glance and swift clients. Only swift client does not actually support using sessions directly, LP #1518938, others will be updated in ironic code. """ return session.get_token()
56efe66c24111cad0fda185280d8179007122c7c
31,630
import json def load_jsonl_with_filtering(path, wordlist=None): """Loading large jsonl files line-by-line, optionally only storing results that are in a provided wordlist""" res = {} if wordlist: with open(path, "r") as f: for line in f: line = json.loads(line) for i in line: if i in wordlist: res[i] = line[i] else: with open(path, "r") as f: for line in f: line = json.loads(line) for i in line: res[i] = line[i] return res
f026c8084f1361f67084524db26d8188cf0a35b3
31,631
import ast def get_name(a): """ get the name of variable or an argument of the AST node.""" if isinstance(a, ast.Name): return a.id elif isinstance(a, ast.arg): return a.arg elif isinstance(a, ast.FunctionDef): return a.name else: raise NotImplementedError()
ed3d414edf664b7261cfb0eb001569818fdc38f7
31,632
import textwrap def reflow(text, width, protect): """Reflow the given `text` with line width `width` Return unchanged `text` if `protect` is True. """ if protect: return text else: dedented_text = textwrap.dedent(text.strip()) wrapped_text = textwrap.fill( dedented_text, width=width, break_on_hyphens=False, break_long_words=False) return wrapped_text
dedfb019188d4963cb0eaec185d46e60b6a9ab38
31,633
from datetime import datetime def tstr2iso_nocolon(input_string: str) -> datetime: """ Convert a specific type of ISO string that are compliant with file pathing requirement to ISO datetime. :return: """ iso_datetime = datetime.strptime(input_string, "%Y-%m-%dT%H%M%S") return iso_datetime
f482fabfe897e27d389c8193298858f6e783a7dd
31,634
def array(fr): """ Return a Numpy array for input frame or Numpy array. Parameters ---------- fr : Frame, or 2D ndarray Input frame or Numpy array Returns ------- data : ndarray Data array """ try: return fr.get_data() except AttributeError: return fr
4268494b205e484863c19e5bad72e69972ccb680
31,636
def makelabel(hmsdms, Hlab, Dlab, Mlab, Slab, prec, fmt, tex): #------------------------------------------------------------------------------- """ From the output of function *gethmsdms* and some Booleans, this function creates a label in plain text or in TeX. The Booleans set a flag whether a field (hours, degrees, minutes or seconds) should be printed or not. The *fmt* parameter is used if it does not contain the percentage character (%) but instead contains characters from the set HDMS. A capital overules the corresponding Boolean value, so if *fmt='HMS'*, the values for *Hlab*, *Mlab* and *Slab* are all set to True. :param hmsdms: The output of function :func:`wcsgrat.gethmsdms` :type hmsdms: Tuple with integer and floating point numbers :param Hlab: If False, there is no need to print the hours :param Dlab: If False, there is no need to print the degrees :param Mlab: If False, there is no need to print the minutes :param Slab: If False, there is no need to print the seconds :param fmt: String containing a combination of the characters ['H', 'D', 'M', 'S', '.', 'h', 'd', 'm', 's'] A capital sets the corresponding input Boolean (Hlab, Dlab, etc.) to True. A dot starts to set the precision. The number of characters after the dot set the precision itself. A character that is not a capital sets the corresponding input Boolean (Hlab, Dlab, etc.) to False. This is a bit dangerous because with this option one can suppress fields to be printed that contain a value unequal to zero. It is applied if you want to suppress e.g. seconds if all the seconds in your label are 0.0. The suppression of printing minutes is overruled if hours (or degrees) and seconds are required. Otherwise we could end up with non standard labels (e.g. 2h30s). :type fmt: String :param tex: If True, then format the labels in LaTeX. :type tex: Boolean :Returns: *lab*, a label in either hms or dms in plain text or in LaTeX format. :Examples: >>> # Set the format in Hours, minutes and seconds with a precision >>> # of three. The suppression of minutes will not work here: >>> grat.setp_tick(wcsaxis=0, fmt="HmS.SSS") >>> # The same effect is obtained with: >>> grat.setp_tick(wcsaxis=0, fmt="HmS.###") >>> # Let the system determine whether seconds are printed >>> # but make sure that degrees and minutes are included: >>> grat.setp_tick(wcsaxis=1, fmt="DM") >>> # If we know that all minutes and seconds in our labels are 0.0 >>> # and we want only the hours to be printed, then use: >>> grat.setp_tick(wcsaxis=0, fmt="Hms") >>> grat.setp_tick(wcsaxis=0, fmt="Dms") >>> # Plot labels in Degrees even if the axis is an equatorial longitude. """ #------------------------------------------------------------------------------- Ihours, Ideg, Imin, Isec, Fsec, sign = hmsdms hms = False if Ihours is not None: hms = True if not (fmt is None): if fmt.find('%') == -1: # Not a common format, must be a HMS/DMS format if fmt.find('H') != -1: Hlab = True if fmt.find('D') != -1: Dlab = True if fmt.find('M') != -1: Mlab = True if fmt.find('S') != -1: Slab = True if fmt.find('h') != -1: Hlab = False if fmt.find('d') != -1: Dlab = False if fmt.find('m') != -1: Mlab = False if fmt.find('s') != -1: Slab = False s2 = fmt.split('.') if len(s2) > 1: prec = len(s2[1]) lab = "" # Minutes must be printed if hours and seconds are required # otherwise one can end up with a minimal label with hours/degs and # no minuts in between if hms: if Hlab: if tex: lab += r"%d^{\rm h}"%Ihours else: lab += "%dh"%Ihours if Slab: Mlab = True else: if Dlab: si = ' ' if sign == -1: si = '-' if tex: lab += r"%c%d^{\circ}"%(si,Ideg) else: lab += "%c%dd"%(si,Ideg) if Slab: Mlab = True if Mlab: if tex: if hms: lab += r"%.2d^{\rm m}"%Imin else: lab += r"%.2d^{\prime}"%Imin else: lab += "%.2dm"%Imin if Slab: lab += "%.2d"%Isec if prec > 0: fsec = ".%*.*d" % (prec, prec, int(round(Fsec*10.0**prec,0))) lab += fsec if not tex: lab += 's' else: if hms: lab += r"^{\rm s}" else: lab += r"^{\prime\prime}" if sign == -1 and not Dlab: lab = "-"+lab return lab
f2d597064c1b0d087d4e6c3661f3a2d999fdeb13
31,638
def cast_as_sql_python_type(field, data): """Cast the data to ensure that it is the python type expected by SQL Args: field (SqlAlchemy field): SqlAlchemy field, to cast the data data: A data field to be cast Returns: _data: The data field, cast as the native python equivalent of the field. """ _data = field.type.python_type(data) if field.type.python_type is str: # Include the VARCHAR(n) case n = field.type.length if field.type.length < len(_data) else None _data = _data[:n] return _data
aa531d9c3ba7d68fa807d3b1631e867a9b344e03
31,639
import base64 import binascii def base64_decode(text, notice): """<string> -- Decode <string> with base64.""" try: return base64.b64decode(text.encode()).decode() except binascii.Error: notice("Invalid base64 string '{}'".format(text))
05fa8a0a560d74d83a51b247a99007a5e8f75e57
31,640
import os def is_local_file_path(s): """Does not consider paths above cwd to be valid.""" if ( isinstance(s, str) and s.startswith('./') and os.path.exists(s) and os.path.isfile(s) and os.path.abspath(s).startswith(os.getcwd()) ): return True
eb53deea6306fc399e6068519323c8ae658632d8
31,641
import getopt def get_command_line_args(argv): """ Gets the command line arguments which are: Returns ------- pm_model (str) - The PowerModels.jl power model e.g. "DCPPowerModel" pm_solver (str) - The solver to use, e.g. "juniper", "gurobi" grid_name (str) - optional if you only want to calculate one grid, e.g. "rts" lc (str) - the loadcases to calculate. Either "ts" or "scaled" kind (str) - the optimizations to run, e.g. "tnep,ots,repl". Can only be a part of these like "tnep,repl" """ pm_model, pm_solver, grid_name = None, None, None kind = "tnep,repl,ots" lc = "scaled" try: opts, args = getopt.getopt(argv, ":m:s:g:k:", ["model=", "solver=", "grid=", "kind="]) except getopt.GetoptError: raise getopt.GetoptError("error reading input") for opt, arg in opts: if opt in ("-m", "--model"): pm_model = arg if opt in ("-s", "--solver"): pm_solver = arg if opt in ("-g", "--grid"): grid_name = arg if opt in ("-k", "--kind"): kind = arg if pm_solver is None: UserWarning("pm_solver is None. You must specify a solver with '--solver='") if pm_model is None: UserWarning("pm_model is None. You must specify a model with '--model='") return pm_model, pm_solver, grid_name, lc, kind
126de7ba0aa6267ef9052b70ca4273ef1af97601
31,642
import collections import csv def read_dataset(dataset_path): """ Returns a tuple of three dicts. The gloss data is a {gloss: [[lang, trans, segments, cog_class],]} dict. The concepticon data is a {gloss: global_id} dict. The ISO codes data is a {lang: iso_code} dict. """ data = collections.defaultdict(list) gloss_ids = {} iso_codes = {} with open(dataset_path, newline='', encoding='utf-8') as f: reader = csv.DictReader(f, delimiter='\t') for row in reader: li = [ row['language'], row['transcription'], row['tokens'], row['cognate_class']] if li not in data[row['gloss']]: data[row['gloss']].append(li) gloss_ids[row['gloss']] = row['global_id'] iso_codes[row['language']] = row['iso_code'] return dict(data), gloss_ids, iso_codes
136e0ad25f798f870d975cf44c69d3b96aacdd43
31,643
def parse_header(ifh): """Modify ifh by advancing past the first line and parsing the header @return dict mapping string of field name to its index """ field_map = {} i = 0 # ignore leading "#" for field in ifh.readline()[1:].split("\t"): field_map[field] = i i += 1 return field_map
f7547938f6368c737ad9457b7b1d2e625edbf241
31,644
def interpolate_colors(colors, flat=False, num_colors=256): """ given a list of colors, create a larger list of colors interpolating the first one. If flatten is True a list of numers will be returned. If False, a list of (r,g,b) tuples. num_colors is the number of colors wanted in the final list """ palette = [] for i in range(num_colors): index = (i * (len(colors) - 1))/(num_colors - 1.0) index_int = int(index) alpha = index - float(index_int) if alpha > 0: r = (1.0 - alpha) * colors[index_int][0] + alpha * colors[index_int + 1][0] g = (1.0 - alpha) * colors[index_int][1] + alpha * colors[index_int + 1][1] b = (1.0 - alpha) * colors[index_int][2] + alpha * colors[index_int + 1][2] else: r = (1.0 - alpha) * colors[index_int][0] g = (1.0 - alpha) * colors[index_int][1] b = (1.0 - alpha) * colors[index_int][2] if flat: palette.extend((int(r), int(g), int(b))) else: palette.append((int(r), int(g), int(b))) return palette
6d8e87bfb727664f76a2b2db0c122b30200e175c
31,645
def polevl(x, coefs, n): """ Evaluate polynomial of degree n. :param x: polynomial variable :type x: float :param coefs: polynomial coefficients :type coefs: list :param n: degree :type n: int :return: result as float """ ans = 0 power = len(coefs) - 1 for coef in coefs: ans += coef * x**power power -= 1 return ans
f5ffb93d252a780fd02ac791afe8c0a0f61fb9a4
31,647
def reset_key_secret(api, configuration, api_version, api_exception, key_id): """ Resets the secret of an API key. :param api: The Deep Security API modules. :param configuration: Configuration object to pass to the api client. :param api_version: The version of the API to use. :param api_exception: The Deep Security API exception module. :param key_id: The ID of the key. :return: An APIKeysApi object that contains the secret of the key. """ try: # Reset the key api_keys_api = api.APIKeysApi(api.ApiClient(configuration)) return api_keys_api.replace_api_secret_key(key_id, api_version) except api_exception as e: return "Exception: " + str(e)
99ca4d56e07a4f5f9fb8fa3b0fb2c16f2cdcac32
31,648
import requests def url_ok(url): """ Checks that a given URL is reachable. :param url: A URL :rtype: bool """ return requests.head(url).ok
f4aa22e55a6c05948488fcb16098c4ed76f9c0d6
31,649
from typing import Union from typing import Any from typing import Sequence def supports_iteration(value: Union[Any, Sequence[Any]]) -> bool: """Returns ``True`` if the ``value`` supports iterations.""" try: for _ in value: return True except TypeError: pass return False
b74b0ffc85fdfdabfdd1fb5f352c23632966eb97
31,650
import re def getAllelesPop(vcffile, SampleNames): """Extract the alleles and their numerical code from a multiple-sample VCF file of a population with given sample names. Return\n\ the SNP number, the SNP position and the dictionary that specifies the reference and the observed alternative alleles for the SNP, as a list of tuples.""" _s=-1 alllist, SMnames = [], [] _header = False map_dict = dict() if not SampleNames: raise ValueError('No sample names were given to getAllelesPop to extract their alleles!') with open(vcffile, 'rU') as vfile: for _geno in vfile: if _geno[0]!='#': if not _header: raise ValueError('Unexpected header detected in the VCF file!') _alllst = _geno.split() #_s+=1 _allalleles_at_s = "" # Collect all of the alleles at s to throw away variants homozygous for all population members sample_alleles=[[] for _sample in SMnames] for _sample in SMnames: sample_alleles[map_dict[_sample]] = re.split('/|\|', _alllst[9:][map_dict[_sample]].split(':')[0]) # Collect the alleles for each member of the population _allalleles_at_s+="".join(sample_alleles[map_dict[_sample]]) if len(set(_allalleles_at_s)-set(["."]))>1: # Throw away variants homozygous in the population _s+=1 alleles = [_alllst[3]]+re.split("\ *,\ *|,\ *|\ *,|,", _alllst[4]) codes=[] for _alleles in sample_alleles: codes+=_alleles codes = sorted(set(codes)-set(['.','-']), key = lambda x: int(x)) # '.', '-' corresponds to missing genotypes if '0' not in codes: codes = ['0'] + codes alllist.append((_s, int(_alllst[1]), {_x.upper():int(_y) for _x, _y in zip(alleles, codes)})) elif _geno[1]!='#': if set(['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']).issubset(set(_geno.lstrip('#').split())): _header = True SMnames = _geno.split()[9:] # extract sample names in the VCF header if len(SMnames)!=len(SampleNames) or set(SMnames).symmetric_difference(set(SampleNames)): raise ValueError('The given sample names do not match those in the VCF header!') map_dict = dict(zip(SMnames, [SampleNames.index(_sample) for _sample in SMnames])) else: pass return alllist
f6179fdc88b83b95b37285ae9ee079a3c149d0e1
31,651
import requests def fetch_idol(idx): """Fetch data for a single idol.""" r = requests.post('https://www.produce101event.com/entry_info.php', data={'idx': idx}) r.raise_for_status() idol_data = r.json() if idol_data['result'] == 'success' and idol_data['name']: return idol_data else: return None
7378ba567336df0240116c4355c2fd8cf56e52d7
31,653