content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def spatial_binning(info_dict): """ Compute the spatial binning representing the number of pixels affected by the light in the spatial direction Parameters ---------- info_dict: dictionnary wavelength: float wavelength in angstrom Returns ---------- spabin: float spatial binning in arcsec """ # x2 to get the diameter, /pixel_scale to express it in pixels spabin = 2.*info_dict['radius_aperture_phot'] / info_dict['pixelScale_X'] info_dict['spatial_binning']=spabin return info_dict
a6af35e8e7afabc6dea2aa89937aecd096f265c2
697,640
def keep_alpha_numeric(input_text: str) -> str: """ Remove any character except alphanumeric characters """ return ''.join(c for c in input_text if c.isalnum())
6775d7ba72ae06c294acde9bc2964ae51a954ce3
697,641
def reunion(set1, set2): """Given the two sets, returns their reunion.""" return set1 | set2
3e7af2a784d570d2a28c6665905242f5b6e812de
697,642
def numberList(listItems): """Convert a string list into number @param listItems (list) list of numbers @return list of number list """ return [float(node) for node in listItems]
df50b5ce22f63c22cfa67051422cc4b13718cb57
697,643
import six def _safe_write(function): """Avoid issues related to unicode strings handling.""" def _wrapper(message): # Unicode strings are not properly handled by the serial module if isinstance(message, six.text_type): function(message.encode("utf-8")) else: function(message) return _wrapper
b467c959e0833f8cb016b48fc031e0223fc2809e
697,644
def results_ok(api, cfg, utils, size, packets): """ Returns true if stats are as expected, false otherwise. """ port_results, flow_results = utils.get_all_stats(api) return packets == sum( [p.frames_tx for p in port_results if p.name == "raw_tx"] )
618106436b0de5403f9f325c61fb2066a9493b31
697,645
def are_brackets_balanced(expr): """ Checks to see if there are parens or brackets that are unbalanced """ stack = [] # Traversing the Expression for char in expr: if char in ["(", "{", "["]: # Push the element in the stack stack.append(char) elif char in [")", "}", "]"]: # IF current character is not opening # bracket, then it must be closing. # So stack cannot be empty at this point. if not stack: return False current_char = stack.pop() if current_char == '(': if char != ")": return False if current_char == '{': if char != "}": return False if current_char == '[': if char != "]": return False # Check Empty Stack if stack: return False return True
3e548cb2999c404d6ed0587ef1a959767c11d479
697,646
def clean_rows(all_rows, original_rows): """ Remove duplicated values and remove original rows """ all_set = set(all_rows) original_set = set(original_rows) return all_set - original_set
76de02aa72e8753481b58b4eddcabc4330a92e0b
697,647
import re def parse_phmmer_output(result_path): """ Read the fasta header of the sequence with highest homology from a phmmer result file returns: string """ with open(result_path, "r") as infile: results_str = infile.read() # Hacky regex solution to parsing results... try: hit = re.findall(r"!!!!![0-9]*", results_str)[0] return hit except IndexError: return None
4e7f40ff0f16316ee88096d8f3f4084706cef42b
697,648
from typing import Optional import subprocess def get_command_output(command: str, cwd: Optional[str] = None): """ Executes the given "command" and returns the output of the command as string The additional "cwd" option can be used to pass a string path description, which is supposed to be used as the current working directory for the command execution. :param command: The command which is to be executed :type command: str :param cwd: The path string of the current working directory to be assumed for the command execution :type cwd: Optional[str] """ completed_process = subprocess.run(command, cwd=cwd, shell=True, stdout=subprocess.PIPE) byte_output = completed_process.stdout return byte_output.decode('utf-8')
7740f0a94212e2f47394cc55b55a13b127bc9c13
697,650
import sys def is_linux(): """ Is the current platform Linux? """ return sys.platform.startswith('linux')
31368146cdfee5ee81627084a1d055d59c697262
697,651
from pathlib import Path import requests from bs4 import BeautifulSoup def Soup(s, features='lxml', **kw): """ A fake class, of what BeautifulSoup should have been It accepts a url or a file, in addition to html/xml as usual """ if isinstance(s, Path): src = s.read_text() elif s.startswith('http'): src = requests.get(s).text elif Path(s).exists(): src = open(s).read() else: src = s return BeautifulSoup(src, features, **kw)
71ac4ad935caf5a97c45710d98a9bf6980751a81
697,652
import sqlite3 def is_unique_failure(e: sqlite3.IntegrityError) -> bool: """ Return if the given error is representing a foreign key failure, where an insertion was expecting something to exist already in the DB but it didn't. """ return str(e).startswith("UNIQUE constraint")
1292002a1296cd9ced4b35b59b3f58e9040b59a7
697,653
import torch def evaluate(model, dataset, labels): """ Evaluate the valid_data on the current model. :param model: Module :param train_data: 2D FloatTensor :param valid_data: A dictionary {user_id: list, question_id: list, is_correct: list} :return: float """ # Tell PyTorch you are evaluating the model. model.eval() total = float(dataset.shape[0]) correct = 0 output = model(dataset) guess = torch.argmax(output, dim=1) correct_guess = torch.sum((guess == labels)) return correct_guess / total
498b5c62a8921d698a31d8030b248ba391677536
697,654
def _get_p_y_z_halfspace(particles): """ This function calcualtes the probabilities of y and z half space for a given set of particles Parameters ---------- particles : list List of SourceParticle Returns ------- p_y_halfspace : float The probability of y half space p_z_halfspace : float The probability of z half space """ y_count, z_count = 0, 0 for s in particles: if s.y < 0.5: y_count = y_count + 1 if s.z < 0.5: z_count = z_count + 1 p_y_halfspace = float(y_count) / len(particles) p_z_halfspace = float(z_count) / len(particles) return p_y_halfspace, p_z_halfspace
fb822d7b03b4fc387fff8723d5a8d7dafe8f9bfc
697,655
import torch def get_data(generic_iterator, generic_loader): """Code to get minibatch from data iterator Inputs: - generic_iterator; iterator for dataset - generic_loader; loader for dataset Outputs: - data; minibatch of data from iterator - generic_iterator; iterator for dataset, reset if you've reached the end of the dataset""" try: data = next(generic_iterator)[0] except StopIteration: generic_iterator = iter(generic_loader) data = next(generic_iterator)[0] if torch.cuda.is_available(): data = data.cuda() return data, generic_iterator
e827ab7cea13c96953260d6b157a3e6ab370c6c9
697,656
import json def split_cmd(cmdstr): """Interprets JSON and method and params""" cmd = json.loads(cmdstr) return cmd['method'], cmd.get('params', {}), cmd.get('rune')
cb580c244699fe9657e7e43b243ddc48e8f5d6f6
697,657
def get_name(schema_url): """ Extract the item name from it's URL :param schema_url: the URL of the schema :return name: the name of the schema (eg: 'item_schema.json') """ name = schema_url.split("/")[-1].replace("#", '') return name
e7632dc959a4503b51cc5c3f851a063056893507
697,658
def create_time(t): """之后加入日 月 星期等条件""" return (f"null null null null null {t}")
1e5ae6c5cac9b984c4f02b8c02e58a2da5b7972f
697,659
import torch from typing import Callable def encode_and_aggregate(input_tensor: torch.Tensor, encoder: torch.nn.Module, num_encoder_input_channels: int, num_image_channels: int, encode_channels_jointly: bool, aggregation_layer: Callable) -> torch.Tensor: """ Function that encodes a given input tensor either jointly using the encoder or separately for each channel in a sequential manner. Features obtained at the output encoder are then aggregated with the pooling function defined by `aggregation layer`. """ if encode_channels_jointly: input_tensor = encoder(input_tensor) input_tensor = aggregation_layer(input_tensor) else: shape = input_tensor.shape channel_shape = (shape[0], num_encoder_input_channels, shape[2], shape[3], shape[4]) encode_and_aggregate = [] # When using multiple encoders, it is more memory efficient to aggregate the individual # encoder outputs and then stack those smaller results, rather than stack huge outputs and aggregate. for i in range(num_image_channels): start_index = i * num_encoder_input_channels end_index = start_index + num_encoder_input_channels encoder_output = encoder(input_tensor[:, start_index:end_index].view(channel_shape)) aggregated = aggregation_layer(encoder_output) encode_and_aggregate.append(aggregated) input_tensor = torch.cat(encode_and_aggregate, dim=1) return input_tensor
f2d65e1c2c214cfddae40dd235fba86a61866277
697,661
def max_dot_product(a, b): """ Given two sequences 𝑎1, 𝑎2, . . . , 𝑎𝑛 (𝑎𝑖 is the profit per click of the 𝑖-th ad) and 𝑏1, 𝑏2, . . . , 𝑏𝑛 (𝑏𝑖 is the average number of clicks per day of the 𝑖-th slot), we need to partition them into 𝑛 pairs (𝑎𝑖, 𝑏𝑗) such that the sum of their products is maximized. """ res = 0 a.sort(reverse=True) b.sort(reverse=True) for i in range(len(a)): res += a[i] * b[i] return res
afdf565cf32c3bcc9e0a69a4ea565a3dcdd8f8a4
697,662
def apply_each(functions, *args, **kwargs): """Returns list containing result of applying each function to args.""" return [f(*args, **kwargs) for f in functions]
15dc85cb155db030f4eaf2eccdbd40ed20585b82
697,663
def _model_delete_by_id_function_name(model): """Returns the name of the function to delete a model by id""" return '{}_delete_by_id'.format(model.get_table_name())
df3e1de727b585c1cab403dfcfea08eafe25aeec
697,664
def mark_all_tables_per_draw(draw, boards_data_dict): """ Once a draw has been drawn, we need to update the table with a say a * at that point :param draw: int [number drawn] :param boards_data_dict: dictionary of tables """ for key in boards_data_dict: table = boards_data_dict[key] for col_index, col in enumerate(table.columns): column_data = [i for i in table[col]] for row_index, value in enumerate(column_data): # print(f'row: {row_index} column: {col_index} value: {value}') if value == draw: table.iloc[row_index, col_index] = "*" boards_data_dict[key] = table return boards_data_dict
e1c3fc89e5082e6b365299be86b0ba5bb8ce46b5
697,665
import os import click def conf_file_exists(ctx, param, value): """ Validate conf file exists """ conf_file = "configuration.py" if value[-len(conf_file) :] != conf_file: value = os.path.join(value, conf_file) value = os.path.realpath(value) if os.path.isfile(value): return value else: raise click.BadParameter( "'configuration.py' not found at '{path}'".format( path=os.path.split(value)[0] ) )
114b836d52f7fe1f7491a5e20ad8e994ce04399b
697,666
def Hex2Rgb(hex="#ffffff"): """ 将十六进制的颜色字符串转换为 RGB 格式 https://blog.csdn.net/sinat_37967865/article/details/93203689 """ r = int(hex[1:3], 16) g = int(hex[3:5], 16) b = int(hex[5:7], 16) rgb = str(r) + "+" + str(g) + "+" + str(b) # 自定义 rgb 连接符号 return rgb
7a71fdac7907252e945774429a61ac60702b92a1
697,667
def subset_matrix(matrix, name_list_1, name_list_2): """Subsetting matrix into two symetric matrices given two label lists Parameter: ---------- matrix : Pandas DataFrame Full similarity matrix name_list_1 : list list of names name_list_2 : list list of names Returns: -------- list: list with subsets of matrix """ sim_matrix_1 = matrix[name_list_1].loc[name_list_1] sim_matrix_2 = matrix[name_list_2].loc[name_list_2] sim_matrix = matrix[name_list_1].loc[name_list_2] return sim_matrix_1, sim_matrix_2, sim_matrix
1b36308c73c864fa446b0bc756240d4d2a8639a6
697,668
def sanitize_image(author_img_url): """ Sanitizes the text from the author string. """ if author_img_url is None: author_img_url = "https://www.pngkey.com/png/detail/230-2301779_best-classified-apps-default-user-profile.png" return author_img_url
e980126e589a19f0ae750dfebdcc4095c700081b
697,669
def capitalize_all(line): """Amos has a habit of capitalizing the first letter of all words in a statement.""" words = line.split(" ") words = [word.capitalize() for word in words] return " ".join(words)
ab2b02bc8df4eb010fb040368724879ac129e5c6
697,670
import re def is_hash(text): """ >>> is_hash(sha256(b'blode')) True """ return bool(re.match(r'^[0-9a-f]{64}$', text))
23d821357a794dadc86a9becad7d0da709c05b1e
697,671
import torch def tokenize( seq, tokenizer, add_special_tokens=True, max_length=10, dynamic_padding=True, truncation=True, ): """ :param seq: sequence of sequences of text :param tokenizer: bert_tokenizer :return: torch tensor padded up to length max_length of bert tokens """ tokens = tokenizer.batch_encode_plus( seq, add_special_tokens=add_special_tokens, max_length=max_length, padding="longest" if dynamic_padding else "max_length", truncation=truncation, )["input_ids"] return torch.tensor(tokens, dtype=torch.long)
8573b19ab395c2957f8a95c80ca9e5514cdbea73
697,672
def my_sum(x_val: int, y_val: int) -> int: """Sum 2 integers. Args: x_val (int): integer to sum. y_val (int): integer to sum. Returns: int: result of the summation. """ assert isinstance(x_val, int) and isinstance( y_val, int ), "Input parameters should be integers." return x_val + y_val
c10001f9ff720ce3d180aaa89af555ac1860ae33
697,673
def which_axis(df, series): """Returns the axis matching the series' index (0=rows, 1=columns) or None if no match. Not exact, but sufficient. """ series_indices = set(series.index) if series_indices == set(df.index): return 0 elif series_indices == set(df.columns): return 1 else: return None
ebc28c2a5f0e796e1a1c95c0e0efdaf31783f76a
697,674
def get_beta(matrix_list, List=True): """function to get a beta from a list of cov matrices""" if List: beta_list = [] for matrix in matrix_list: beta = matrix[1][1]/matrix[0][1] beta_list.append(beta) return beta_list else: beta = matrix_list[1][1]/matrix_list[0][1] return beta
c43607162dd70439e04fea66eda8cc18b703673c
697,675
def cidr_to_mask(cidr): """ Converts decimal CIDR notation to a quad dotted subnetmask. :param cidr: Decimal CIDR number :return: Quad dotted subnetmask as string """ cidr = int(cidr) mask = (0xffffffff >> (32 - cidr)) << (32 - cidr) return (str((0xff000000 & mask) >> 24) + '.' + str((0x00ff0000 & mask) >> 16) + '.' + str((0x0000ff00 & mask) >> 8) + '.' + str((0x000000ff & mask)))
037e1cacfb392fad4a06edf1d9c16c4f27c0468b
697,676
import random def computer_choice() -> str: """ This function takes no inputs and returns either "rock", "paper", or "scissors" randomly to simulate the computer's decision. """ # 'random' is a library. A library is essentially a collection of code that # contains some kind of feature(s) that we want to use in our program. In # order to use these features, we need to bring the library into our program. # This is what is meant by 'importing' a library. # A function can 'return' a value, allowing it to directly represent some kind # of data. In this case, we are returning the output of the choice function # (from the random library). This choice function has 1 input (argument): a # tuple of 3 possible comma-separated choices that it may return. return random.choice( ("rock", "paper", "scissors") )
1f8e064adfbd6242f2bb199caf98b77acf38c596
697,677
def filter_ps(ps): """ ps -> List of paths Out of all the paths, select only the lowest weight paths that lead to the same end. """ best_ps = {} for p in ps: w_4 = p[0][1] w_5 = p[1][1] x = (w_4 + w_5, w_4) state_5 = p[1][0] if (state_5 not in best_ps) or (x < best_ps[state_5][0]): best_ps[state_5] = (x, [p]) elif x == best_ps[state_5][0]: best_ps[state_5][1].append(p) return [p for state_5 in best_ps for p in best_ps[state_5][1]]
d534d47a5faa04a5d207a8dff54c43c8ef966626
697,678
def convert_training_shape(args_training_shape): """Convert training shape""" training_shape = [int(args_training_shape), int(args_training_shape)] return training_shape
391a85eed34483461910be9bf0f94b936660abb0
697,679
def csm_value_repr(self): """ String representation for the CSMValue namedtuple which excludes the "created" attribute that changes with each execution. Needed for consistency of ddt-generated test methods across pytest-xdist workers. """ return f'<CSMValue exists={self.exists} raw_earned={self.raw_earned}>'
07917e16e6e3539916aac6e114375b598fad446b
697,680
def wants_child_support(responses, derived): """ Return whether or not the user wants an order for child_support """ return 'Child support' in derived['orders_wanted']
3468f83f541eee7cba2e8ee3664c9efee1c0225a
697,681
def get_valid_config(): """Helper function to return a valid config for the rule processor. This simulates what stream_alert.rule_processor.load_config will return in a very simplified format. Returns: dict: contents of a valid config file """ return { 'logs': { 'json_log': { 'schema': { 'name': 'string' }, 'parser': 'json' }, 'csv_log': { 'schema': { 'data': 'string', 'uid': 'integer' }, 'parser': 'csv' } }, 'sources': { 'kinesis': { 'stream_1': { 'logs': [ 'json_log', 'csv_log' ] } } }, 'types': { 'log_type1': { 'command': ['cmdline', 'commandline'] } }, 'global': { 'account': { 'aws_account_id': '123456123456' }, 'infrastructure': { 'monitoring': { 'create_sns_topic': True } } } }
774e0d9b40b8b0ea25ce10907b0f9954d0b7f966
697,682
import os def relpath(path, start='.'): """Create a relative path from the start directory. Adds './' if the file is in the start directory. """ path = os.path.relpath(path, start) if not os.path.dirname(path): path = "./{0}".format(path) return path
14872032783f933d352f8dad36d54bb8d4f95e81
697,683
def add_missing_columns(df1, df2, fill_value=0): """ Updates df1 to have all the columns of df2 """ # Things that are in df2 but not df1 for c in set(df2.columns) - set(df1.columns): df1[str(c)] = [fill_value] * len(df1) # Sort the columns to be the same for both df1 = df1[sorted(df1.columns)] return df1
1d038aecb8df1149467d98a6816b42e0e3bae12f
697,684
def _serial_from_status(status): """Find the best serialvalue from the status.""" serial = status.get("device.serial") or status.get("ups.serial") if serial and (serial.lower() == "unknown" or serial.count("0") == len(serial)): return None return serial
5075493926a946878d2263dc79879809a2af4fca
697,685
import os def read_from_handle_truncated(file_handle, max_len): """Read from file handle, limiting output to |max_len| by removing output in the middle.""" file_handle.seek(0, os.SEEK_END) file_size = file_handle.tell() file_handle.seek(0, os.SEEK_SET) if file_size <= max_len: return file_handle.read() # Read first and last |half_max_len| bytes. half_max_len = max_len // 2 start = file_handle.read(half_max_len) file_handle.seek(file_size - half_max_len, os.SEEK_SET) end = file_handle.read(half_max_len) truncated_marker = '\n...truncated %d bytes...\n' % (file_size - max_len) return start + truncated_marker + end
446f09fa2bf13a1199a7b3edf133b3ff8bf181d1
697,686
def craft_sms(machine, check, message, time_check): """Create a valuable alert with less text.""" # keep just the hour, i.e. strip year/month/day time_check = time_check[-5:] # remove the domain machine = machine.split('.')[0] alert = f'{time_check} {machine}!{check} {message}' return alert[0:156]
3e926b44fdfca29ba731bd29c15790c53f395c85
697,687
def outside(chart,psg,start_beta=1.,start='S'): """ Calculates the outside probabilities of the CKY entries and adds them into the chart I have the starting prob for the top right equal to the proportion of the sentence probability that the parse contributes. We might not need this: We might be able to multiply this in later, when we do the expected counts. Arguments chart : filled CKY chart psg : operations PSG start_beta: starting prob for the top right S. Usually this is 1, but we have multiple parses. start : start category Returns CKY chart with outside probs added to cell entries under key 'beta' """ # initialise all betas to 0 for i in chart: for lhs in chart[i]['phrase']: chart[i]['phrase'][lhs]['beta']=0. for lhs in chart[i]['lex']: chart[i]['lex'][lhs]['beta']=0. chart[0]['phrase'][start]['beta']=start_beta # initialise top right start to proportion of sentence prob this parse represents c=start def beta_cell(i,c): """row i, category c""" #print("\nfor",i,c) for bp in chart[i]['phrase'][c]['bp']: if len(bp)==2: # phrasal rule left,right=bp[0],bp[1] left_p = chart[i]['lex'][left]['p'] right_p = chart[i+1]['phrase'][right]['p'] #print("right: %s p: %.5f\nleft: %s p: %.5f"%(right,right_p,left,left_p)) rules = psg[c] #rule set for this LHS rule_p=0. # initialise in case we don't find it for (rhs,p) in rules: if rhs == [left,right]: # if it's the right rule rule_p=p # get this prob #print("rule p: %.5f"%rule_p) beta = chart[i]['phrase'][c]['beta']# beta of mother #print("beta: %.5f"%beta) #for each daughter, calculate the outside prob = p(rule) x beta(mother) x p(sister) chart[i]['lex'][left]['beta']+=rule_p*beta*right_p # put the outside prob in the chart chart[i+1]['phrase'][right]['beta']+=rule_p*beta*left_p # put the outside prob in the chart for i in range(len(chart)): # do it for the whole chart for c in chart[i]['phrase']: beta_cell(i,c) #print(chart2string_beta(chart)) return chart
48c1a3c2695dae639e0b19baac2937c849aff438
697,690
def head(your_list, default=None): """Simple head function implementation.""" return next(iter(your_list or []), default)
382e6f069b7aa15c710b41007bbd03be23d63bde
697,692
import collections def dict_merge(xs): """ >>> dict_merge([{1: 2}, {3: 4}, {3: 5}]) {1: 2, 3: 4} """ return dict(collections.ChainMap(*xs))
fff3a615c44df45b83b6bb8c9368c4f12ecd94bc
697,693
from typing import Iterable from typing import Any def is_empty(iterable: Iterable[Any]) -> bool: """ Check whether provided iterable is empty. :param iterable: iterable whose emptiness is to be checked. :return: flag indicating if the provided iterable is empty. """ return not any(map(lambda el: True, iterable))
cef865deec6a7fd4241e15abaac7fced9ac114b0
697,694
def gen_xacro_macro(name, links, joints): """ Generates (as a string) the complete urdf element sequence for a simple ROS-Industrial xacro macro that defines geometry (links, joints). It takes a single argument ``prefix`` that should be used when instantiating the macro in a composite parent scene. Note that the ``links`` and ``joints`` sequences should already be strings. The ``gen_link(..)`` and ``gen_joint_fixed(..)`` macros may be used for that. :param name: Name of the macro, ``str`` :param links: Sequence containing all the links that should be defined by macro, ``seq(str)`` :param joints: Sequence containing all the joints that should be defined by the macro, ``seq(str)`` :returns: urdf element sequence for a xacro macro, ``str`` """ links_str = ''.join(links) joints_str = ''.join(joints) return '<xacro:macro name="{name}" params="prefix">{links}{joints}</xacro:macro>'.format( name=name, links=links_str, joints=joints_str)
4be7f3353a9fba1f7127e81e29e3536150ed834e
697,695
def solution(A): # O(N) """ Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum. >>> solution([-2, 1, -3, 4, -1, 2, 1, -5, 4]) 6 >>> solution([-2, -1, -3, 1]) 1 """ summed_values = [] # O(1) summed_value = 0 # O(1) for value in A: # O(N) summed_value += value # O(1) summed_values.append(summed_value) # O(1) min_point = float('inf') # O(1) largest_sum = 0 # O(1) for value in summed_values: # O(N) if value < min_point: # O(1) min_point = value # O(1) elif value - min_point > largest_sum: # O(1) largest_sum = value - min_point # O(1) return largest_sum # O(1)
9c41707940d563ba10c97af53e72f8bbf30fd071
697,696
import re def re_comp_num_pos_name(): """ Compiles the regex pattern that extracts the pattern (num) (position) (last), (first) Example: s = '21 C Stepan, Derek' reg = re_comp_num_pos_name() num, pos, last, first = reg.findall(s)[0] :return: compiled regex """ return re.compile(r'(\d+)\s*(\w+)\s*([^\,]+)[\W]+(\w+)')
5994efcca453cef7201b92ca53863fc34791c0f1
697,697
def _index(key, sequence, testfn=None, keyfn=None): """Return the index of key within sequence, using testfn for comparison and transforming items of sequence by keyfn first. >>> _index('e', 'hello') 1 >>> _index('E', 'hello', testfn=_equalsIgnoreCase) 1 >>> _index('x', 'hello') """ index = 0 for element in sequence: value = element if keyfn: value = keyfn(value) if (not testfn and value == key) or (testfn and testfn(value, key)): return index index += 1 return None
0595726f9e14e8f4a3fd4b925de04c801be317a3
697,698
import platform import os def default_license_dir(): """Get license file location. Raises: Exception: Works only on Windows/Linux Returns: [str]: License file directory """ if platform.system() == "Windows": return os.path.join(os.environ["LOCALAPPDATA"], "Cubemos", "SkeletonTracking", "license") elif platform.system() == "Linux": return os.path.join(os.environ["HOME"], ".cubemos", "skeleton_tracking", "license") else: raise Exception("{} is not supported".format(platform.system()))
baa547820801bd73a61e959941fa00a77de2b7c6
697,700
import six def like_filter(query, cls, search_opts): """Add 'like' filters for specified columns. Add a sqlalchemy 'like' filter to the query for any entry in the 'search_opts' dict where the key is the name of a column in 'cls' and the value is a string containing '%'. This allows the value of a column to be matched against simple sql string patterns using LIKE and the '%' wildcard. Return the modified query and any entries in search_opts whose keys do not match columns or whose values are not strings containing '%'. :param query: a non-null query object :param cls: the database model class the filters will apply to :param search_opts: a dictionary whose key/value entries are interpreted as column names and search patterns :returns: a tuple containing the modified query and a dictionary of unused search_opts """ if not search_opts: return query, search_opts remaining = {} for k, v in six.iteritems(search_opts): if isinstance(v, six.string_types) and ( '%' in v and k in cls.__table__.columns): col = cls.__table__.columns[k] query = query.filter(col.like(v)) else: remaining[k] = v return query, remaining
c42318350a9c19715fc6a2e0c74d8bec423b4cfc
697,701
def CalculateTFD(torsions1, torsions2, weights=None): """ Calculate the torsion deviation fingerprint (TFD) given two lists of torsion angles. Arguments; - torsions1: torsion angles of conformation 1 - torsions2: torsion angles of conformation 2 - weights: list of torsion weights (default: None) Return: TFD value (float) """ if len(torsions1) != len(torsions2): raise ValueError("List of torsions angles must have the same size.") # calculate deviations and normalize (divide by max. possible deviation) deviations = [] for t1, t2 in zip(torsions1, torsions2): diff = abs(t1[0]-t2[0]) if (360.0-diff) < diff: # we do not care about direction diff = 360.0 - diff deviations.append(diff/t1[1]) # do we use weights? if weights is not None: if len(weights) != len(torsions1): raise ValueError("List of torsions angles and weights must have the same size.") deviations = [d*w for d,w in zip(deviations, weights)] sum_weights = sum(weights) else: sum_weights = len(deviations) tfd = sum(deviations) if sum_weights != 0: # avoid division by zero tfd /= sum_weights return tfd
621bdf9c51130b33721e48c52f72053d9a0b5f19
697,702
def compare_values(answer, submitted_answer): """Comparing values""" if answer["value_type"] == "number": if "comparison_type" in answer and answer["comparison_type"] == "absolute": return abs(answer["value"]) == abs(submitted_answer) if answer["value_type"] != "string": return answer["value"] == submitted_answer else: if not isinstance(submitted_answer, str): return False return answer["value"].lower() == submitted_answer.lower()
7f1e7a9a2b2eb5943fd15562fe3e1d376f30c8db
697,703
def read_file(filename): """ Return the contents of the file with the given filename as a string >>> write_file('read_write_file.txt', 'Hello World') >>> read_file('read_write_file.txt') 'Hello World' >>> os.unlink('read_write_file.txt') """ with open(filename) as in_fh: return in_fh.read()
756187a755a54b2d6e96ad6d297ceb2472afbb6c
697,704
import warnings def _hideWarnings(func): """ Decorator function that hides annoying deprecation warnings in find_peaks_cwt """ def func_wrapper(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore") return func(*args, **kwargs) return func_wrapper
cfbd9af74bb08cbfe4d6429c3b4ba2511fa3aa43
697,705
def whatLName(fName, listOfStudents): """ return the major of first student in listOfStudents with first name fName, False if none found >>> whatLName("FRED",[Student("MARY","KAY","MATH"), Student("FRED","CRUZ","HISTORY"), Student("CHRIS","GAUCHO","UNDEC")]) 'CRUZ' >>> """ for i in range(len(listOfStudents)): # step through every item in listOfStudents # when you find a match, return that students's major if listOfStudents[i].fName == fName: return listOfStudents[i].lName # if you got all the way through the loop and didn't find # the name, return False return False
81b070be78083697c51abdfed5da2287440b2717
697,706
def last_in_array(array, operation = None): """Return the last element in an array. I think this is just [-1] """ if len(array) == 0: return None return array[-1]
fb93f365368b31ed31ab9f4c2dde066b1add4425
697,707
from typing import Callable def linear_schedule(initial_value: float) -> Callable[[float], float]: """ Linear learning rate schedule. :param initial_value: Initial learning rate. :return: schedule that computes current learning rate depending on remaining progress """ def func(progress_remaining: float) -> float: """ Progress will decrease from 1 (beginning) to 0. :param progress_remaining: :return: current learning rate """ return progress_remaining * initial_value return func
47a580788c745b7566c22daa97c3aecee0bb8ff2
697,708
def fieldnames(cursor): """ 최근 실행한 쿼리의 field names 리스트 :param cursor: :return: list[str] """ return [f[0] for f in cursor.description]
f369ce8e904464082c65d855071bc2e1ebc2a21d
697,709
from datetime import datetime def is_datenum(datenum): """ Return True if given str is a date in format %Y%m%d """ try: datetime.strptime(datenum, "%Y%m%d") return True except (ValueError, TypeError): return False
c61998ebf18a3fbdd4c87463a2ab0790864c62b4
697,710
import math def special_case(sku_data, free_trigger, trigger_amount, free_sku, free_sku_value, free_deal=None): """free_deal can be tuple of amount and value""" free_items = 0 total = 0 if sku_data[free_trigger] / trigger_amount >= 1: # for every <trigger_amount> of <free_trigger> we get a free <free_sku> # how many free items are we eligible for free_items = math.floor(sku_data[free_trigger]/trigger_amount) if free_deal and sku_data[free_sku] / free_deal[0] >= 1: deal_amount = free_deal[0] deal_value = free_deal[1] # calculate which deal is best a_bundles = math.floor(sku_data[free_sku]/deal_amount) bundle_total = 0 bundle_total += a_bundles * deal_value bundle_total += (sku_data[free_sku] % deal_amount) * free_sku_value bundle_mod = sku_data[free_sku] % deal_amount if free_items > bundle_mod: bundle_total -= free_sku_value * bundle_mod else: bundle_total -= free_sku_value * free_items paid_for_items = sku_data[free_sku] - free_items if paid_for_items < 0: paid_for_items = 0 free_total = paid_for_items * free_sku_value if bundle_total < free_total: total += bundle_total else: total += free_total else: total += sku_data[free_sku] * free_sku_value if free_items > 0 and sku_data[free_sku] >= free_items: total -= (free_items * free_sku_value) return total
924c1c6f612d98f8b99f1622c2827d97481a7a30
697,711
def convert_crlf(value): """ Replace carriage return and line feed character by their javascript value Make possible to include title with those characters in the aloha links """ return value.replace('\r', '\\r').replace('\n', '\\n')
17210472fcccb27f02fc8dcf5ebbe654bfa58278
697,712
def _compute_position(input, index): """Compute line/column position given an index in a string.""" line = 1 col = 1 eol = None # last end of line character for c in input[:index]: if c == '\n' or c == '\r': if eol is None or eol == c: eol = c line += 1 col = 1 else: # ignore second of '\n\r' and '\r\n' sequences eol = None else: col += 1 return (line, col)
f08217651d11ed09c1e100368aa8cc869c37e386
697,713
import itertools def flatten(sequence): """ Get a flat list out of a list of lists. """ return list(itertools.chain(*sequence))
1c3c7c41969c7e172083e73f2e2aa5731fb56ada
697,715
def equal_nan(request): """Fixture to whether consider nan as equal when comparing fields.""" return request.config.getoption("equal_nan")
8fce969c2c84201822db735d46574644124c5c1a
697,716
def round2(number: float) -> float: """ Rounds a number to the second decimal. """ return round(number, 2)
5c788d01bce28831145391dcb261362b8a2208f3
697,717
def flipslash(value): """ Convert all backslashes to forward slashes (for apache) """ return value.replace("\\", '/')
8fa5abe7c334e0b229aa7e9b2477c3c9aecf38e3
697,718
def merge_lists(a, b): """ a function to zip together two linked lists augmenting the first linked list passed in to have the second LL values in it alternating """ if b.head == None: return a if a.head == None: return b if a._size >= b._size: temp1, temp2 = a.head, b.head while temp2 is not None: a.insert_before(temp1.val, temp2) temp1, temp2 = temp1._next, temp2._next return a else: temp1, temp2 = a.head, b.head while temp1 is not None: b.insert_before(temp2.val, temp1) temp1, temp2 = temp1._next, temp2._next return b
6a979d6f63c59531b202fcad6f0d3701c483a18b
697,719
import re def clean_label_or_catalog_number(string): """Clean label or catalog ID: '[TMZ12006]', '[Kalahari Oyster Cult]' """ string = string.replace('【', '[') string = string.replace('】', ']') result = re.search(r'(\[.*\])', string) if result is not None: label_or_catalog_number = result.groups()[0] string = string.replace(label_or_catalog_number, '') return string
3f52df574bf3c1676b67f2affac5287488982c1c
697,720
def binary_search(items, desired_item, start=0, end=None,): """Standard Binary search program takes Parameters: items= a sorted list desired_item = single looking for a match (groovy baby) start= int value representing the index position of search section end = end boundary of the search section; when end == start Returns: None = only returned if the desired_item not found in items pos = returns the index position of desired_item if found. """ if end == None: end = len(items) if start == end: return None # raise ValueError("%s was not found in the list." % desired_item) pos = (end - start) // 2 + start if desired_item == items[pos]: return pos elif desired_item > items[pos]: return binary_search(items, desired_item, start=(pos + 1), end=end) else: # desired_item < items[pos]: return binary_search(items, desired_item, start=start, end=pos)
7adb942e73e5c3945190c7d88882763f9f7b4f08
697,721
def max_intensities(burst_list): """Removes all but the max intensity for each burst interval.""" max_bursts = [{(j, k): i for i, j, k in x} for x in burst_list] return max_bursts
f29d7cb955904981fee31b6d6a98bf193dbc9d69
697,722
def probabilistic_mapping(codon, productions): """ Probabilistic mapping, using a PCFG.""" idx_selected_rule = len(productions) - 1 prob_aux = 0.0 for i in range(len(productions)): prob_aux += productions[i][1] if codon < prob_aux: idx_selected_rule = i break return idx_selected_rule
7dd3e5cd78cd6b32d59c1d62f761e8aeb1db1160
697,723
import importlib def get_class(m): """Import class from string :param m: string or class to be imported :type m: str or class :rtype: class >>> get_class('microtc.textmodel.TextModel') <class 'microtc.textmodel.TextModel'> """ if isinstance(m, str): a = m.split('.') p = importlib.import_module('.'.join(a[:-1])) return getattr(p, a[-1]) return m
9a8c55524a47224a4e916191d66428aad2bc08d5
697,724
import math def vec3(a, b, norm=1.0): """ x,y,z <- vec3(a, b, norm=1.0) returns the vector a, b scale to norm """ dx = b[0]-a[0] dy = b[1]-a[1] dz = b[2]-a[2] l = norm / math.sqrt( dx*dx + dy*dy +dz*dz) return [dx*l, dy*l, dz*l]
db0f53cad9c472dd903cb18aabd1a6fad275b5bd
697,726
import math def is_nan(value): """ Function which identifies the "nan" on empty cells """ try: return math.isnan(float(value)) except ValueError: return False
a76abc6c047461786109d84cb002867eeec77888
697,727
def path_to_url(path): """Convert a system path to a URL.""" return '/'.join(path.split('\\'))
79bfd1715420002371fe4201863d736bf9e3b2bf
697,728
def _use_reasonable_speed(preset, frame_count): """Return a reasonable speed parameter for the given animation length.""" return preset.settings.get("speed", 0.25) * (frame_count / 30.0)
9327131fbe8f55ba1ee5c1ccc932132ad3d3162a
697,729
def blend_color_dodge(cb: float, cs: float) -> float: """Blend mode 'dodge'.""" if cb == 0: return 0 elif cs == 1: return 1 else: return min(1, cb / (1 - cs))
5a96383ce6f71aca42639c7ac4962ea74ecd02c6
697,730
def radix_sort(array): """ Radix Sort Complexity: O((N + B) * logb(max) Where B is the base for representing numbers and max is the maximum element of the input array. We basically try to lower the complexity of counting sort. Even though it seems good, it's good in specific cases only. """ n = len(array) max_value = max(array) step = 1 while max_value: temp = [0] * 10 sorted_array = [0] * n for i in range(n): index = array[i] // step temp[index % 10] += 1 for i in range(1, 10): temp[i] += temp[i-1] # By finding the partial sums and passing through the # initial array from the end to the beginning we can know # each time how many values exist with the same or lower digit. # For example, if temp = [1, 1, 0, 1, 0, 1, 0, 0, 0, 0] # then it will become [1, 2, 2, 3, 3, 4, 4, 4, 4, 4]. # If the array is [3, 11, 10, 5] then in the first pass # (first digit), when we start from number 5 we will # have 5 mod 10 = 5, temp[5] = 4, which means that there # were 3 number before the number 5 with a smaller value # for their first digit. # So, this number will go to the 4 - 1 = 3rd position for i in range(n-1, -1, -1): index = array[i] // step sorted_array[temp[index % 10] - 1] = array[i] temp[index % 10] -= 1 for i in range(n): array[i] = sorted_array[i] max_value //= 10 step *= 10 return array
2ca57406f0d0190ce8849c50e1ffcadf382db328
697,731
def sphere(x): """Minimum at 0""" return x.dot(x)
67c24f0b5fd711ec9618284f800b75d1db894938
697,732
def tens2rgb(t): """Tensor (HWC) to RGB, [0, 1] -> [0, 255]""" t = t.detach().cpu().numpy() return (t * 255).astype('uint8')
63b8977b69517ed3f5ab5aba006e7d121dc43d53
697,733
def create_imagelist(pd_dataframe, img_dir, ext): """ Creates a list of image paths of the thumbnails Keyword arguments: pd_dataframe -- pandas DataFrame object containing the stub info stub_directory -- location of stub info files ext -- File extension of the thumbnail """ imgs = [] # list of image paths fields = pd_dataframe.Field.unique() # Loop over fields for field_no, field in enumerate(fields): # fetch the index of particles on the field particles = pd_dataframe[ pd_dataframe.loc[:, 'Field'] == field ].Part.values # Loop over particles for particle_no, particle in enumerate(particles): imgs.append('thumbnails/' + '{:0>4d}'.format(int(field)) + '{:0>4d}'.format(int(particle_no)+1) + ext) return imgs
1d41b1b02eab9123624c765b1529739b387e3e9c
697,734
import sys import logging import json def read_json(): """Function that reads json from stdin and returns json""" try: with sys.stdin as f: logging.info('\nReading input') json_file = json.loads(f.read()) return json_file except AttributeError as e: logging.error('\nFailed to read the input') raise e except json.JSONDecodeError as e: logging.error('\nInvalid JSON') raise e
3c6ecb83a3f1d4491532bb7b27c12e6687de0514
697,735
def drop(n, xs): """ drop :: Int -> [a] -> [a] drop(n, xs) returns the suffix of xs after the first n elements, or [] if n > length xs """ return xs[n:]
e9261686022f5419edade3b47e82c68bd52b5cd8
697,737
def backwards_search(p, cl, rank, total_length): """Perform backwards search using pattern p, count lookup cl and rank rank reference http://alexbowe.com/fm-index/""" start = 0 end = total_length - 1 for i in range(len(p)-1,-1,-1): if end < start: break char = p[i] count_for_char = cl[char] start = count_for_char + rank.rank(start-1, char) end = count_for_char + rank.rank(end, char) - 1 return start, end
f8e87dc65b3838d99b7162e1c2ff0eb12b815180
697,738
def formatIntervalHours(cHours): """ Format a hours interval into a nice 1w 2d 1h string. """ # Simple special cases. if cHours < 24: return '%sh' % (cHours,); # Generic and a bit slower. cWeeks = cHours / (7 * 24); cHours %= 7 * 24; cDays = cHours / 24; cHours %= 24; sRet = ''; if cWeeks > 0: sRet = '%sw ' % (cWeeks,); if cDays > 0: sRet = '%sd ' % (cDays,); if cHours > 0: sRet += '%sh ' % (cHours,); assert len(sRet) > 0; assert sRet[-1] == ' '; return sRet[:-1];
d7c9be3110eb1ecbfb57ae51854d2e576519ced3
697,739
def swap_http_https(url): """Get the url with the other of http/https to start""" for (one, other) in [("https:", "http:"), ("http:", "https:")]: if url.startswith(one): return other+url[len(one):] raise ValueError("URL doesn't start with http: or https: ({0})".format(url))
b0af8770ef31f73afa5c95c6bc2a17488eb3d1e2
697,740
def list_to_dict(lst): """ Takes a list an turns it into a list :param lst: the list that will be turned into a dict """ if len(lst) % 2 != 1: odd_indexes = [] even_indexes = [] for i in range(len(lst)): if i % 2 == 0: odd_indexes.append(lst[i]) elif i % 2 == 1 or i == 0: even_indexes.append(lst[i]) final_dict = dict(zip(odd_indexes, even_indexes)) return final_dict else: print("The list needs to have an even amount of")
2c84950f68cb2e8180511f010a558f370d3ac1ac
697,742
def Liquid_Enthalpy_Ref_Liquid(T, Cn_l, T_ref, H_ref): """Enthapy (kJ/kmol) disregarding pressure and assuming the specified phase.""" return H_ref + Cn_l.T_dependent_property_integral(T_ref, T)
5ac806600e59777ee64cc1564677d5d5f38d6540
697,743
import copy def sortindexes(self): """Sorts a list of indexes in an ascending order with no regard to parity""" selfcopy = copy.deepcopy(self) alwaystrue = 1 while (alwaystrue): done = 1 for nindexa in range(len(selfcopy)): if (not done): break indexa = selfcopy[nindexa] for nindexb in range(len(selfcopy)): if (not done): break if (nindexb <= nindexa): continue indexb = selfcopy[nindexb] if (indexa.isgreaterthan(indexb)): swap = copy.deepcopy(indexa) selfcopy[nindexa] = copy.deepcopy(indexb) selfcopy[nindexb] = swap done = 0 if (done): return selfcopy
f791bfe39130645ad2e33ef54c3a8c25e5964998
697,744
def attachURI(metaDict, acc, con, obj): """Add URI to dict as `label`""" if obj != "" and obj is not None: uri = '/'.join(['', acc, con, obj]) elif con != "" and con is not None: uri = '/'.join(['', acc, con]) else: uri = '/' + acc return {uri: metaDict}
a4b584a041fc9c02b6bdfed9ba61eb3029cc34d2
697,745
import requests import json def get_records_from_imoox(endpoint: str) -> dict: """Get the response from imoox.""" response = requests.get(endpoint) return json.loads(response.text.encode("utf-8"))
8ec1b2aa62341e2c126a9d42a212d2b592385a64
697,746
def get_both_filenames(filename): """ Get a list of both filenames for FUV data Regardless if rootname_corrtag_a.fits or rootname_corrtag_b.fits is passed in, both will be returned in a list. Parameters ---------- filename : str full path to COS file Returns ------- files : tuple rootname_corrtag_a.fits, rotname_corrtag_b.fits """ if '_a.fits' in filename: other_filename = filename.replace('_a.fits', '_b.fits') elif '_b.fits' in filename: other_filename = filename.replace('_b.fits', '_a.fits') else: raise ValueError("filename doesn't match FUV convention".format(filename)) filename_list = [filename, other_filename] filename_list.sort() return (filename_list[0], filename_list[1])
76449b6f2719d5c6b7ee6dd01f730b9193e368da
697,747
def get_data(data_creator): """ 使用参数data_creator来获取测试数据 Args: data_creator: 数据来源,可以是train()或者test() Return: result: 包含测试数据(image)和标签(label)的python字典 """ data_creator = data_creator data_image = [] data_label = [] for item in data_creator(): data_image.append((item[0],)) data_label.append(item[1]) result = { "image": data_image, "label": data_label } return result
03023a6f90ede92da05efee12ff17601b96fb2be
697,748
def _get_greensf_group_name(hdffile): """ Return the name of the group containing the Green's function elements :param hdffile: h5py.File of the greensf.hdf file :returns: str of the group name containing the Green's Function elements """ if '/GreensFunctionElements' in hdffile: return 'GreensFunctionElements' elif '/Hubbard1Elements' in hdffile: return 'Hubbard1Elements'
fa5a8c65cad63b3053d8b55af95c9c4547493793
697,749