content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def exist_in_list(word_list, lst, from_begin): """Return dictionary of count of words from word_list on lst :param word_list - list of string :param lst - list of char :param from_begin - boolean :return dictionary""" result = {} if not from_begin: lst = lst[::-1] substring_strings = list(''.join(lst[i:j + 1]) for i in range(len(lst)) for j in range(i, len(lst))) for sub_str in substring_strings: if sub_str in word_list: if sub_str in result.keys(): result[sub_str] += 1 else: result[sub_str] = 1 return result
5a6fb9a5f2732c57ed2928d4f64f1e50076a29b2
39,750
def predict_step(model, inputs): """A single predict step.""" logits = model(inputs, train=False) return logits
0ff4ec680bdd17eb11a88eee221c4d30a9f3e9a4
39,751
def _get_current_engagement(d, assignment): """ helper for WTAP solver Calculates the current engagement :param d: device :param assignment: class Graph. :return: """ if d in assignment: for d, t, v in assignment.edges(from_node=d): return t return None
18c9f310080a9ffe0d072590f516018442440b87
39,753
def strip_chr(chr): """Removes the 'chr' prefix if present. Args: chr (str): The chromosome. Returns: str: The chromosome without a 'chr' prefix. Examples: >>> strip_chr('22') '22' >>> strip_chr('chr22') '22' """ return chr[3:] if chr[0:3] == 'chr' else chr
39a833dda595140a38226ce5b52dd3b8fd97337d
39,757
def recording_in_db(rec_id, con): """ Returns True if the recording with ID rec_id is already in the database pointed to by "con" Parameters ---------- rec_id : int ID of th3 recording. con : SQLite3 connection Connection to open DB. Returns ------- Bool - True if the rec_id is already an ID in the database, Fasle otherwise """ cur = con.cursor() rows = cur.execute("SELECT id from scores where id = ?",(rec_id,)) r = rows.fetchone() return r is not None
8954ec39899e0d68483933cba66a2923c4b6d4f0
39,759
def centre_table(t): """Centre cells in a Markdown table.""" lines = t.split("\n") return t.replace(lines[1], "|".join([":-:"] * (lines[0].count("|") - 1)))
e75daec6ba10bb7a361bb1fcd588673b6ad52336
39,760
def _format_names(name: str) -> str: """Format dictionary key names to be human friendly. Args: name (str): The Unicode type name. Returns: str: The formatted Unicode type name. """ return name[0].upper() + name[1:].replace("_", " ")
5256bfad00079f658ea2a8a7d95f4fbea39cb6a1
39,762
import os import subprocess def default_branch(repository, remote='origin'): """ Detect default branch from given local git repository """ head = os.path.join(repository, f'.git/refs/remotes/{remote}/HEAD') # Make sure the HEAD reference is available if not os.path.exists(head): subprocess.run( f'git remote set-head {remote} --auto'.split(), cwd=repository) # The ref format is 'ref: refs/remotes/origin/main' with open(head) as ref: return ref.read().strip().split('/')[-1]
643d0e71fda055c4ba711268989ed3de7f3be893
39,763
import torch def check_torch_version_for_proj_in_lstm(): """ proj_size parameter is supported in torch.nn.LSTM layer started from 1.8.0 torch version """ me = False version = torch.__version__ major, minor, micro = version.split(".") if int(major) > 1: me = True elif int(major) == 1: if int(minor) >= 8: me = True return me
5bd4e4742c1d6aae69ed580e046d717bc43b1574
39,764
import requests def requests_patch(**kwargs): """Patch for any requests method.""" def req(url): if not url.startswith('http'): raise requests.exceptions.MissingSchema class Response(object): def __init__(self, **kwargs): for key, value in kwargs.items(): try: value = next(value) except TypeError: try: value = value(url) except TypeError: pass setattr(self, key, value) return Response(**kwargs) return req
0d84688ab10b01d7475351dc2518789dca04e33e
39,766
import time def yearMonthDay(): """ :returns: the year-month-day as 20200403 """ return time.strftime("%Y%m%d")
cdd80f2c358bc2e1064c15fbbdfd25c386980238
39,768
def map_lbls_id(skills, label, id, lookup): """Map the skills labels to the skills ids Args: skills (df): skills dataframe label (str): col to create, either 'class_lbl' or 'subclass_lbl' id (str): col to use for mapping, either 'class_id' or 'subclass_id' lookup (dict): to use for mapping - keys of ids, values of labels Returns: df: skills dataframe with additional class label column """ skills[label] = skills[id].astype(str).map(lookup) if label == "class_id": skills[id] = skills[id].astype(int) if label == "subclass_id": skills[id] = skills[id].astype(float) return skills
14751efb08aa0cbee4844a25c0119fb8e29ffe2e
39,769
import math def rad2deg(rad): """ from radian to degree """ return rad * 180 / math.pi
ee576c3c424e77690e40dc3c21623701737a8327
39,770
def num_or_string(v, d=None): """Loads a value from MO into either an int or string value. String is returned if we can't turn it into an int. """ try: return int(str(v))#.replace(',', '') except (ValueError, TypeError): try: _value = float(str(v).replace(',', '.')) if _value == 0: return 0 return _value except (ValueError, TypeError): pass return d or v
5dff9592e81dcdaa59858407fe8426c8f606e338
39,771
import typing import struct def unpackMIMEFieldImplHeap(heap: bytes, start: int, unused_http: object) -> typing.List[int]: """ Unpacks a MIMEFieldImpl from the given raw byte heap. Returns a list of the values of the data members of the stored object. """ fmt = "4xL4II4i?PIP" fmt += "3PhH4s"*16 return list(struct.unpack(fmt, heap[start:start+struct.calcsize(fmt)]))
61ec683ca5b112fb1028853adb0b8ecfcdd12f35
39,773
def unprefix(prefix, d, all=False): """ Returns a new dict by removing ``prefix`` from keys. If ``all`` is ``False`` (default) then drops keys without the prefix, otherwise keeping them. """ d1 = dict(d) if all else {} d1.update((k[len(prefix):], v) for k, v in d.items() if k.startswith(prefix)) return d1
81fc47898f9bde8c42b107b5838a3af6bfe3d7f5
39,774
def test_collision(snake_box, snake_pixels): """Function to detect box or snake collision.""" height, width = snake_box.getmaxyx() # If collision with wall, only head would collide. head = snake_pixels[-1] if (head[0] <= 0 or head[0] >= width): return True if (head[1] <= 0 or head[1] >= height): return True return len(snake_pixels) != len(set(snake_pixels))
f035b44b8ad4664bad977243ceab38ea058e6d3b
39,775
from pathlib import Path from datetime import datetime def get_recording_start(fname): """parse the recording date from the emg filename""" parts = Path(fname).stem.split(" ") subject = parts[0][0:4] recdate = datetime.strptime(parts[1], "%Y-%m-%d_%H-%M-%S") return recdate
598c76228496e6e5b46888ea30b42eb908194220
39,779
def max_simple_divider(simple_numbers, divs): """ Наибольший простой делитель :param simple_numbers: list :param divs: list :return: int """ return max(list(filter(lambda x: x in simple_numbers, divs)))
fc09cf95e5f699377168732338f0a46b4e6b55ce
39,780
import struct def _convert_filetime_to_timestamp(filetime): """ Windows returns times as 64-bit unsigned longs that are the number of hundreds of nanoseconds since Jan 1 1601. This converts it to a datetime object. :param filetime: A FILETIME struct object :return: An integer unix timestamp """ hundreds_nano_seconds = struct.unpack( b'>Q', struct.pack( b'>LL', filetime.dwHighDateTime, filetime.dwLowDateTime ) )[0] seconds_since_1601 = hundreds_nano_seconds / 10000000 return seconds_since_1601 - 11644473600
4e4f21b1f75ab367e66a136a58dc615c1d40cc5e
39,781
from typing import Any def is_list(input_: Any) -> bool: """ Check type of :code:`input` :param input_: Any arbitrary input :type input_: Any :return: True if :code:`input` is a :code:`list` else False :rtype: True """ return isinstance(input_, list)
12665e2e8bdc6c001860daa7b2513d52c1564b9c
39,782
async def redirect_to_project() -> str: """Redirects the root path to the project website""" return 'https://ibm.github.io/arcade'
d318b5ecc4896a0ae1d990b354c1e11fa0c30a1e
39,783
import re def acquire_star_rating(soup): """ Take a BeautifulSoup content of a book page. Return the rating of the book. """ review_rating = soup.find( class_=re.compile("^star") )['class'][1] return review_rating
f25ab2515a8c96957eb6b5154295cdcb811eae4f
39,784
def get_region(b): """Tries to get the bucket region from Location.LocationConstraint Special cases: LocationConstraint EU defaults to eu-west-1 LocationConstraint null defaults to us-east-1 Args: b (object): A bucket object Returns: string: an aws region string """ remap = {None: 'us-east-1', 'EU': 'eu-west-1'} region = b.get('Location', {}).get('LocationConstraint') return remap.get(region, region)
8a773d20348b3fc01f7bd426765546069f79f9d8
39,785
def safeFilename(filename): """Return a filename with only file safe characters""" validChars = '-_.()abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' return ''.join(c for c in filename if c in validChars)
450f392a1222741782c2b9c4d63d5757b534d6ed
39,786
def dist(graph, i, j): """ Returns the weight of edge from i -> j or vice-versa. The graph is undirected. Paramters --------- i : int j : int graph : Dict """ try: i, j = i % 8, j % 8 if i < j: return graph[i][j-i-1] elif j < i: return graph[j][i-j-1] else: return 0 except: print(i, j) exit()
533ad111def2f0fcf8098e143ac4dd0976a82c08
39,787
def sorter(entries): """order a list of entries by descending date first and then by name alphanumerically""" return sorted( sorted(entries, key=lambda x: x["name"]), key=lambda x: x["date"], reverse=True, )
697981b6b00f05e208fd2487c9ce56deef285d86
39,788
def tmpdirpath(tmpdir): """Convenience fixture to get the path to a temporary directory.""" return str(tmpdir.dirpath())
4adf46e970fcdd00250af4d03db1b6b632bb3260
39,790
def shapeFromHdr(hdr, verbose=0): """ return "smart" shape considering numTimes, numWavelenth and hdr.ImgSequence if verbose: print somthing like: w,t,z,y,x ot z,y,x """ zOrder = hdr.ImgSequence # , 'Image sequence. 0=ZTW, 1=WZT, 2=ZWT. '), nt, nw = hdr.NumTimes, hdr.NumWaves nx, ny, nsecs = hdr.Num if nt == 0: # 20051213(ref."other's" MRC) print " ** NumTimes is zero - I assume 1." nt = 1 if nw == 0: # 20051213(ref."other's" MRC) print " ** NumWaves is zero - I assume 1." nw = 1 nz = int(nsecs / nt / nw) if nt == nw == 1: shape = (nz, ny, nx) orderLetters = "zyx" elif nz == 1 == nw: shape = (nt, ny, nx) orderLetters = "tyx" elif nt == 1 or nw == 1: if zOrder == 0 or zOrder == 2: nn = nt if nt == 1: nn = nw orderLetters = "wyx" else: orderLetters = "tyx" shape = (nn, nz, ny, nx) else: # if zOrder == 1: if nt == 1: shape = (nz, nw, ny, nx) orderLetters = "zwyx" else: shape = (nt, nz, ny, nx) orderLetters = "tzyx" else: # both nt and nw > 1 if zOrder == 0: shape = (nw, nt, nz, ny, nx) orderLetters = "wtzyx" elif zOrder == 1: shape = (nt, nz, nw, ny, nx) orderLetters = "tzwyx" else: # zOrder == 2: shape = (nt, nw, nz, ny, nx) orderLetters = "twzyx" if verbose: print(",".join(orderLetters)) return shape
b3a446e48da6c72c48489c09299b8e8d2a01be59
39,791
import logging def dict_msg_processor(logger: logging.Logger, name: str, event_dict: dict): """Позволяет переложить конвертацию данных в строку на Formatter библиотеки logging. Данная возможность полезна при внедрении structlog в приложение, где часть логов уже пишется при помощи библиотеки logging. P.S. Иными словами, данный процессор служит заменой для structlog.dev.ConsoleRenderer или structlog.processors.JSONRenderer и позволяет сказать библиотеке structlog, что данные, которые необходимо залогировать сформированы и можно передать их дальше. Данный процессор должен быть ВСЕГДА ПОСЛЕДНИМ в списке процессоров. """ return (event_dict,), {}
6050cb604e7289976d10a28869d26a18ff35709c
39,792
def bytes_to_oid(data): """Convert bytes to OID str""" values = [ord(x) for x in data] first_val = values.pop(0) res = [] res += divmod(first_val, 40) while values: val = values.pop(0) if val > 0x7f: huges = [] huges.append(val) while True: next_val = values.pop(0) huges.append(next_val) if next_val < 0x80: break huge = 0 for i, huge_byte in enumerate(huges): huge += (huge_byte & 0x7f) << (7 * (len(huges) - i - 1)) res.append(huge) else: res.append(val) return '.'.join(str(x) for x in res)
252c5d2fba14f883cb3d1faf55fa993241b0b043
39,793
def _compute_pmf(gen_graph_distr): """ Compute the probability mass function (PMF) of graphs. It can be seen as a normalization between 0 and 1, where each count is converted into a probability. :gen_graph_distr: The distribution of generated graphs. It is a dictionary where, for each entry (key, value), the key is a graph (in graph6 format) and the value is the count of this graph. Make sure to generate enough graphs to get good results. :return: The probability mass function (PMF) of graphs. It is a dictionary where, for each entry (key, value), the key is a graph (in graph6 format) and the value is probability to get this graph. """ # Compute the sum of all counts. count_sum = sum(gen_graph_distr.values()) # Compute the PMF. pmf = {graph: (count / count_sum) for graph, count in gen_graph_distr.items()} return pmf
d2ec2e50387464782910e52e4ea464752c20b9f1
39,794
def remove_duplicate_QUBO(QUBO_storage): """ De-duplicates the QUBO storage list Parameters ---------- QUBO_storage : list List of QUBOs (each QUBO is a dictionary). Returns ------- unique : list de-duplicated list. """ unique = [] for a in QUBO_storage: if a not in unique: unique.append(a) return unique
7c0ece71be1def18de60fb1d72f69379dc0cf3a8
39,795
import os import textwrap def qsub_prep(script_path, cpus, mem, smiles_idx, run_nr, smiles, s_factor, time_ps, k_push, alp, random_seed, with_products=False): """ write qsub file for SLURM subsmissin """ pwd = os.getcwd() qsub_file = """\ #!/bin/sh #SBATCH --job-name={3}_{4} #SBATCH --nodes=1 #SBATCH --cpus-per-task={1} #SBATCH --mem={2} #SBATCH --error={10}/{3}/run{4}.stderr #SBATCH --output={10}/{3}/run{4}.stdout #SBATCH --ntasks=1 #SBATCH --time=8:00:00 #SBATCH --partition=kemi1 #SBATCH --no-requeue #mkdir /scratch/$SLURM_JOB_ID #cp ../initial_structures.xyz /scratch/$SLURM_JOB_ID cd /scratch/$SLURM_JOB_ID #run python code ({0} {3} {4} '{5}' {6} {7} {8} {9} {11} {12}) #cp output back cp run{4}/dataframe.pkl {10}/{3}/run{4}.pkl tar -zcvf structure_database.tar.gz run{4}/structure_database tar -zcvf run{4}.tar.gz run{4} cp run{4}.tar.gz {10}/{3}/run{4}.tar.gz cp structure_database.tar.gz {10}/{3}/run{4}_database.tar.gz rm {10}/{3}_run{4}_qsub.tmp #rm -r /scratch/$SLURM_JOB_ID """.format(script_path, cpus, mem, smiles_idx, run_nr, smiles, s_factor, time_ps, k_push, alp, pwd, random_seed, with_products) with open(str(smiles_idx) + '_run'+str(run_nr) + "_qsub.tmp", "w") as qsub: qsub.write(textwrap.dedent(qsub_file)) return str(smiles_idx) + '_run' + str(run_nr) + "_qsub.tmp"
b22254d334a1671288d112fe8bbb128dbe33fc65
39,796
def drop_keys(task_run_df, keys): """Drop keys from the info fields of a task run dataframe.""" keyset = set() for i in range(len(task_run_df)): for k in task_run_df.iloc[i]['info'].keys(): keyset.add(k) keys = [k for k in keyset if k not in keys] return task_run_df[keys]
be1c07e8abbfbd36a22800a13b1a88d1af775275
39,797
import os import re def get_dir_files(path: str, mode='F', absolute=False, filter_re=""): """获取目录下所有文件 :param path: 获取路径 :arg mode F 当前目录下所有文件 D 当前目录下所有文件夹 FD 当前目录下所有文件和目录 AD 当前目录和子目录 AF 当前文件和子目录文件 AFD 当前目录的子目录所有目录和文件 :arg absolute 是否全路径 :arg filter_re 正则正则 """ if not os.path.exists(path): raise Exception("文件目录不存在:" + path) if not os.path.isdir(path): raise Exception("不是目录:" + path) aa = [] if mode == "F" or mode == "D" or mode == "FD": for root, dirs, files in os.walk(top=path, topdown=False): if mode == "F": if absolute: ff = [] for f in files: if root == path and re.match(filter_re, os.path.join(root, f)) is not None: ff.append(os.path.join(root, f)) files = ff aa += files if mode == "D": if os.path.isdir(root) and re.match(filter_re, root) is not None: aa.append(root) if mode == "FD" and re.match(filter_re, root) is not None: aa.append(root) return aa if mode == "AF" or mode == "AD" or mode == "AFD": for root, dirs, files in os.walk(top=path, topdown=True): if mode == "AF": if absolute: ff = [] for f in files: if re.match(filter_re, os.path.join(root, f)) is not None: ff.append(os.path.join(root, f)) files = ff aa += files if mode == "AD": if os.path.isdir(root): if re.match(filter_re, root) is not None: aa.append(root) if mode == "AFD": if re.match(filter_re, root) is not None: aa.append(root) return aa
dc20020a683b6acbb72415bd709c07686f83a060
39,798
def add_bins_col_to_rank_df(df_feature, n_bins, bin_no_col='bin_no', item_rank_col='equity_rank', max_rank_col='max_rank' ): """ Description: This function takes as input a dataframe with ranks, and creates a column with the respective bin number computed from the rank. :param df_feature:Type pandas dataframe. feature and period level dataframe with rank values. :param n_bins:Type int. number of bins to split the equities into. :param bin_no_col:Type str. bin number column name. :param item_rank_col:Type str. individual item rank column name. :param max_rank_col:Type str. maximum possible rank column name. :return:Type pandas dataframe. feature and period level dataframe with bin assignments. """ df_feature[bin_no_col] = 1 + (n_bins * (df_feature[item_rank_col] - 1) // df_feature[max_rank_col]) return df_feature
380bec79d66d66cb3acd1e42de0edada76cc4024
39,799
import re import os def _parse_kvp_file(file_path, parent_test=None): """Parse details.txt and return True if successful""" test_info = None kvp = {} if parent_test is not None: test_info = parent_test.create_subtest('parse_kvp_file') line_format = re.compile("^([a-zA-Z0-9 ]+): +(.+)$") if not os.path.isfile(file_path): return kvp with open(file_path, "r") as file_handle: for line in file_handle: if len(line) <= 0: if test_info is not None: test_info.failure("Empty line in %s" % file_path) continue if line[0] == '#': # The line is a comment continue match = line_format.match(line) if match is None: if test_info is not None: test_info.failure("Invalid line: %s" % line) continue key = match.group(1) key = key.lower().replace(" ", "_") value = match.group(2) value = value.lower() value = value.strip() if key in kvp: if test_info is not None: test_info.failure("Duplicate key %s" % key) continue kvp[key] = value return kvp
40675f6bd12f06e2d309ada7f4a2b4ebafb7a871
39,800
def order(lst): """ Returns a new list with the original's data, sorted smallest to largest. """ ordered = [] while len(lst) != 0: smallest = lst[0] for i in range(len(lst)): if lst[i] < smallest: smallest = lst[i] ordered.append(smallest) lst.remove(smallest) return ordered
10ff93076af242431dc0831b14fb68af5663b8a4
39,801
def get_concat_level_bits(i, n, mul): """Create a string combining the bits of the current mul. Combine the bits of the multiplication of the current variable (at mul) by the i-th index of the previous variable. Args: i: An integer, the index of the previous variable. n: An integer, the number of bits in the bitvectors. mul: An integer, the index of the nested mul we're at. Returns: The resulting concat string. """ concats = [] if i > 0: concats.append(f"(concat m_{mul}_{i}_{i} #b{'0' * i})") else: concats.append(f"m_{mul}_0_0") if i < (n - 1): for j in range(i + 1, n): rhs = concats[j - i - 1] concat = ["(concat", f"m_{mul}_{i}_{j}", rhs + ")"] concats.append(" ".join(concat)) return concats[-1]
3e6516c570ea128a6c9d12bca82fb35d45a6686e
39,803
from typing import List import tokenize def fix_name(tokens: List[tokenize.TokenInfo]) -> List[tokenize.TokenInfo]: """ Fix the name tokens in the given list of tokens. """ ignorable_types = [tokenize.COMMENT] i = 0 while i < len(tokens): token = tokens[i] if token.type in ignorable_types or token.type == 62: i += 1 elif token.type == tokenize.NAME: name = token.string start = i token_start = token.start i += 1 token = tokens[i] while (token.type == tokenize.NAME or token.type == tokenize.ERRORTOKEN) and tokens[i - 1].end[1] == token.start[1]: name += token.string i += 1 token = tokens[i] new_token = tokenize.TokenInfo(tokenize.NAME, name, token_start, (token_start[0], token_start[1] + len(name)), token.line) tokens = tokens[:start] + [new_token] + tokens[i:] i = start + 1 else: i += 1 return tokens
d880450869624b0d7526edd9562ab1fba7e0e87f
39,804
import re def remove_tag_and_contents(s, tag=None, tags=None): """ >>> remove_tag_and_contents('hi there') 'hi there' >>> remove_tag_and_contents('<p>hi</p> <style>p {font-weight: 400;}</style><p>there</p>', tag='style') '<p>hi</p> <p>there</p>' >>> remove_tag_and_contents('<span class="foo">hi there</span>', tag='span') '' >>> remove_tag_and_contents('<p>hi</p> <style>p {font-weight: 400;}</style><p>there</p>', tags=('p', 'style')) ' ' >>> remove_tag_and_contents('<p>hi <span>there</span></p> <style>p {font-weight: 400;}</style><p>cat</p>', tags=('span', 'style')) '<p>hi </p> <p>cat</p>' >>> remove_tag_and_contents('<p>hi <span class="woot">there</span></p> <style>p {font-weight: 400;}</style><p>cat</p>', tags=('span', 'style')) '<p>hi </p> <p>cat</p>' >>> remove_tag_and_contents('<p>Hi There<object classid="clsid:38481807-CA0E-42D2-BF39-B33AF135CC4D" id=ieooui></object></p>', tag='object') '<p>Hi There</p>' >>> remove_tag_and_contents('<p>Hi </object>there</p>', tag='object') '<p>Hi there</p>' >>> remove_tag_and_contents('<p>Hi <br/>there</p>', tag='br') '<p>Hi there</p>' """ if tag: tags = [tag] if isinstance(tags, (list, tuple)): for t in tags: # Tries to match a normal tag structure s = re.sub(pattern=r'<{tag}.*?>.*?</{tag}>'.format(tag=t), repl='', string=s) # Match any hanging opening or closing versions s = re.sub(pattern=r'</{tag}[^>]*>'.format(tag=t), repl='', string=s) s = re.sub(pattern=r'<{tag}[^>]*/ *>'.format(tag=t), repl='', string=s) return s
9c6506b39ff6f926cf9b03f691bd1b4ecbed6c4a
39,805
def sprint_cmd_list(cmd_list): """Returns column of click-able commands from list.""" try: ret = '' for cmd in cmd_list: ret += '/' + cmd + '\n' return ret except Exception as E: print(E)
2d029ee4e25c538982108756b7e3202d2aa481a8
39,806
import logging def encoder_helper(df, category_lst, response=None): """ helper function to turn each categorical column into a new column with propotion of churn for each category - associated with cell 15 from the notebook input: df: pandas dataframe category_lst: list of columns that contain categorical features response: string of response name [optional argument that could be used for naming variables or index y column] output: df: pandas dataframe with new columns for """ encoder_cols = [] for col in category_lst: temp_dict = df.groupby(col).mean()["Churn"].to_dict() new_col_name = col + "_Churn" if response: new_col_name = response df[new_col_name] = df[col].map(temp_dict) encoder_cols.append(new_col_name) logging.info("SUCCESS: Encoded categorical features into churn proportion columns") return df, encoder_cols
d74f120356e926b2af03f1316fe860870b64a0f5
39,807
def version(): """ Return server version :return: server version :retype: str """ return '1.0'
d029be1e1b212ca5e4ccb9bde915957a08c0dcea
39,808
def not_empty(s): """过滤掉空字符串""" return s and s.strip()
044e0c86446beab808f649ffd4ae49f64c1ff63f
39,809
import copy def prune_maps(net, subsample_indices, output_name): """ In case not all the feature maps are used in feedforward connections, the unused ones can optionally be omitted from a model, if the model is not to include feedback or lateral connections. :param net: :param subsample_indices: result of subsample_maps; indices of maps that provide input to each connection :param output_name: name of layer used to feed output (not pruned) :return: """ # find which feature maps are used in output indices_by_layer = [] for layer in net.layers: connections = net.find_outbounds(layer.name) all_indices_for_layer = [] for connection in connections: conn_ind = net.find_connection_index(connection.pre.name, connection.post.name) all_indices_for_layer.extend(subsample_indices[conn_ind]) all_indices_for_layer = list(set(all_indices_for_layer)) all_indices_for_layer.sort() indices_by_layer.append(all_indices_for_layer) new_subsample_indices = copy.deepcopy(subsample_indices) # discard unused maps and condense indices for i in range(len(net.layers)): layer = net.layers[i] if not layer.name == output_name and not layer.name == 'INPUT': connections = net.find_outbounds(layer.name) for connection in connections: ind = net.find_connection_index(connection.pre.name, connection.post.name) for j in range(len(new_subsample_indices[ind])): new_subsample_indices[ind][j] = indices_by_layer[i].index(subsample_indices[ind][j]) old_m = layer.m layer.m = len(indices_by_layer[i]) return new_subsample_indices
67a0745a8d522c8ec157bfce906376d0edcc428e
39,810
def populateScoreDictionary(scoreLines): """ Cette fonction retourne un dictionnaire construit à partir de la matrice importé BLOSUM62 et qui contient la valeur des scores pour chaque combinaison de paires d'acides aminées """ scoringDictionary = {} seqList = scoreLines[0] seqList = seqList.split() i = 1 j = 1 for i in range(1, len(scoreLines) - 1, 1): row = scoreLines[i] row = row.split() for matchValue in row[1:len(scoreLines) - 1]: scoringDictionary[seqList[i-1], seqList[j-1]] = int(matchValue) j += 1 j = 1 return scoringDictionary
357e8fa0d60f183603b2bced240a227eec5e7cdf
39,811
def integrate_euler_explicit(x_t, dx_dt, dt): """ Explicit euler integration x(t+1) = x(t) + dx/dt * dt :param x_t: known value at timestep t :param dx_dt: derivative dx/dt :param dt: timestep :return: x(t+1); solution for the time t+1 """ x_tp1 = x_t + dx_dt * dt return x_tp1
862feb02512142da98929aedc97707853b41242a
39,812
import argparse def construct_argparser(): """ Controls the retrieval of command line arguments using the argparse module. """ parser = argparse.ArgumentParser(description="Elimination ordering to tree decomposition") parser.add_argument("eo_filename", type=str, help="Filename of input graph eo format (.eo)") parser.add_argument("cnf_filename", type=str, help="Filename of the cnf graph (.cnf)") return parser
c259ddaefd7801ee73a31433d890cdec87738322
39,813
def kafka_consumer(kafka_consumer_factory): """Return a KafkaConsumer fixture""" return kafka_consumer_factory()
e54d1013dd5020cc1b4459ed3b968ce02cd5523e
39,814
def has_attribute(t, key: str) -> bool: """ Check if a callable has an attribute :param t: the callable :param key: the key, the attributes name :return: True if callable contains attribute, otherwise False """ return hasattr(t, key)
09e39d98bfdd5b2d24a8b7b71c74383bf33eb5b1
39,815
def assert_errors(result, expected_errors): """Assert that result errors match expected errors Uses substring matching to coorelate expected to actual errrors. Raise if any expected error is not matched or if any actual errors are found that were not matched by an expected error. This function has O(n**2) complexity on the number of errors. """ def find_and_remove(expected_error): for i, actual_error in enumerate(actual_errors): if expected_error in actual_error: del actual_errors[i] return True return False actual_errors = list(result.errors) missing_errors = [e for e in expected_errors if not find_and_remove(e)] errors = [] if missing_errors: errors.append("missing expected errors:") errors.extend(missing_errors) if actual_errors: if errors: errors.append("") errors.append("unexpected errors:") errors.extend(actual_errors) assert not errors, "\n".join(errors)
6923c4edbc27c0c81ef884aabcaaad06ff4e317c
39,817
def linear_search(list, value): """This function takes a list as input and a value to find.Then linearly it searches for that value""" for i in range(len(list)): if list[i] == value: return i #Returning the index return -1
39fdbbdaed7090275ac75e1df39592017494f5fb
39,818
def reformat_line_files(line_upgrades_df, line_cost_database): """This function renames, reformats line upgrades dataframe to match cost database columns Parameters ---------- line_upgrades_df line_cost_database Returns ------- """ line_upgrades_df.rename(columns={"normamps": "ampere_rating", "kV": "voltage_kV"}, inplace=True) line_upgrades_df["ampere_rating"] = line_upgrades_df["ampere_rating"].astype(float).round(2) line_upgrades_df["phases"] = line_upgrades_df["phases"].astype(int) line_upgrades_df["voltage_kV"] = line_upgrades_df["voltage_kV"].astype(float).round(2) # assign original equipment length to new equipment line_upgrades_df["length"] = line_upgrades_df.groupby("original_equipment_name")["length"].transform("first") line_cost_database["ampere_rating"] = line_cost_database["ampere_rating"].astype(float).round(2) line_cost_database["phases"] = line_cost_database["phases"].astype(int) line_cost_database["voltage_kV"] = line_cost_database["voltage_kV"].astype(float).round(2) line_cost_database["cost_per_m"] = line_cost_database["cost_per_m"].astype(float) return line_upgrades_df, line_cost_database
e24aeff35381c5b591df76af4c31bbd2632a8025
39,819
def _splitDataByRegex(string_data, regex) -> list: """ Function Description : _splitDataByRegex : provide a collection of splitted string by regex accept string_data as a string that would be split into several part. And regex value as splitter EXAMPLE ARGS : (string_data = "this_dataset", regex = "_") EXAMPLE PROSSIBLE RESULT : ["this", "dataset"] """ res = string_data.split(regex) return res
716344eb14a3e751bf0ee28fd76bacbc1f047562
39,820
def read_models(models_file): """ Read the models file to get a list of filenames. :param models_file: models file path :type models_file: str :return: list of filenames :rtype: [str] """ files = [] with open(models_file, 'r') as f: lines = f.readlines() lines = [line.strip() for line in lines] lines = [line for line in lines if line != ''] files = [line.split(' ')[1] for line in lines] return files
bb0dbbcef77af7d3f04608a5a40fd8a8e94cf5a5
39,823
def create_ascii_file(ascii_file_name, args): """Convenience function for creating an output ascii file. This method writes the current state of the input_flags arguments to the header of the file and returns an open Python file handle object. The method will over write any file it is given so use with caution. ---------------------------------------------------------------------------- Args: ascii_file_name: string name of the file to write too args: argparse ArgumentParser.parse_args object from input_flags Returns: open Python file object """ ascii_file = open(ascii_file_name, 'w') ascii_file.writelines('# input_flags:\n') for arg in vars(args): ascii_file.writelines('#\t%s : %s\n' % (arg, getattr(args, arg))) return ascii_file
b431dfe53c39bb025931eddc5e3a985cac581b55
39,825
def currencies(): """ :rtype: set[str] """ return {"USD", "EUR", "CZK", "GBP"}
e6a3e6e122f80b646e0792dd7b90abc750441853
39,826
def fib_recurse(n): """采用递归的方式求值""" if n < 2: return n return fib_recurse(n - 1) + fib_recurse(n - 2)
fda8576a0c1afb971d8f949fc2d14689bde185bd
39,827
import os import pickle import sys def load_data(data_dir): """ Load word index dictionary """ word_idx_cache_path = os.path.join( data_dir, 'word_idx.pkl' ) idx_word_cache_path = os.path.join( data_dir, 'idx_word.pkl' ) word_idx_path_exists = os.path.exists(word_idx_cache_path) idx_word_path_exists = os.path.exists(idx_word_cache_path) if idx_word_path_exists and word_idx_path_exists: with open(word_idx_cache_path, mode='rb') as file: word_idx = pickle.load(file) with open(idx_word_cache_path, mode='rb') as file: idx_word = pickle.load(file) print("Dictionary loaded.") else: sys.exit('File containing the dictionary does not exist.') return word_idx, idx_word
ed9173c09bb9b8eaf94f62f1e4dec5e61babd7a6
39,828
def unpad(data: bytes) -> bytes: """Unpad a previously padded string.""" return data[: -ord(data[len(data) - 1 :])]
1fec7f3c08599b139e2e525b6c5af31e6f3681f3
39,829
def purge_duplicates(list_in): """Remove duplicates from list while preserving order. Parameters ---------- list_in: Iterable Returns ------- list List of first occurrences in order """ # Algorithm taken from Stack Overflow, # https://stackoverflow.com/questions/480214. Content by Georgy # Skorobogatov (https://stackoverflow.com/users/7851470/georgy) and # Markus Jarderot # (https://stackoverflow.com/users/22364/markus-jarderot), licensed # under CC-BY-SA 4.0. # https://creativecommons.org/licenses/by-sa/4.0/. seen = set() seen_add = seen.add return [x for x in list_in if not (x in seen or seen_add(x))]
349211a9ad9b949fb7061e2b4ad21cb1ec3354f7
39,832
def _prefix_callable(bot, msg): """ prefix :param bot: :param msg: :return: """ user_id = bot.user.id base = [f'<@!{user_id}> ', f'<@{user_id}> '] if msg.guild is None: base.append('!') base.append('?') else: base.append('¥') return base
c581176424de6addcb189a901b11f7af4786ebbf
39,833
from typing import Dict from typing import Any def build_data_columns(hparams: Dict[str, Any]) -> Dict[str, Any]: """Build data columns from hyper-parameters. Args: hparams: hyper-parameters for the data columns. Returns: data columns. """ try: input_columns = hparams["input"] except KeyError: input_columns = None try: target_columns = hparams["target"] except KeyError: target_columns = None # create dictionary if input_columns: data_columns = {"input": input_columns, "target": target_columns} else: data_columns = {"target": target_columns} return data_columns
21bfe8166234b7ed69aae7a3d49a0848c268611d
39,834
def normArray(data_array, maximum=1): """ Returns the normalized array and multiplies with the maximum. """ data_array = (data_array / data_array.max())*maximum return data_array
df0f91078a1460c995436b3038ea6cc82e870051
39,835
def get_mrca(pi, x, y): """ Returns the most recent common ancestor of nodes x and y in the oriented forest pi. """ x_parents = [x] j = x while j != 0: j = pi[j] x_parents.append(j) y_parents = {y: None} j = y while j != 0: j = pi[j] y_parents[j] = None # We have the complete list of parents for x and y back to root. mrca = 0 j = 0 while x_parents[j] not in y_parents: j += 1 mrca = x_parents[j] return mrca
faece8f1fabf09444f1d3f0d42c49ed6f510acd4
39,836
def stub_read(mote_id, chan_id, read_start_time): """ A stub to return nothing of interest; well, a value that is more easily picked up as "invalid". """ return -1
b3d94d002f9d112540d62ae1f985bf595b3f2558
39,837
def convert_str_to_int(data): """ An example expansion. """ data['employees'] = int(data['employees']) return data
b59bda9aac74c304f143f9fb5775216fc3c71e24
39,838
def status_str(status): """return status string from status code""" status_map = { 0: 'MATCH', 10: 'OK', 15: 'SKIP', 20: 'FAIL', 30: 'CRASH' } return status_map.get(status, 'UNKNOWN')
55fd5486a2c8360ceb13eb773dc957b799a8e831
39,839
import torch def normalize_torch(img: torch.Tensor) -> torch.Tensor: """ Standardize image tensor per channel Args: ----------- img (torch.Tensor): input image tensor. shape (C, H, W). Returns: ----------- torch.Tensor. Standardized image tensor. Shape (C, H, W). """ img = img.float() chl_means = torch.mean(img.float(), dim=(1, 2)) chl_stds = torch.std(img.float(), dim=(1, 2)) img.sub_(chl_means.view(-1, 1, 1)).div_(chl_stds.view(-1, 1, 1)) return img
f6459f8ff465cdb56ace492f4de114eee2321855
39,840
def format_string(string: str) -> str: """Replace specific unicode characters with ASCII ones. Args: string: Unicode string. Returns: ASCII string. """ string \ .replace("\u2013", "-") \ .replace("\u00a0", " ") \ .replace("\u2018", "'") \ .replace("\u2019", "'") \ .replace("\u201c", '"') \ .replace("\u201d", '"') \ .replace("\u00ed", 'i') return string
2a7efea0816096c549642b00bee6f50a29ede0a2
39,842
def run_tf_graph(sess, input_data, input_node, output_node): """ Generic function to execute tensorflow """ tensor = sess.graph.get_tensor_by_name(output_node) if isinstance(input_data, list): input_dict = {} for i, e in enumerate(input_node): input_dict[e] = input_data[i] else: input_dict = {input_node: input_data} output_data = sess.run(tensor, input_dict) return output_data
843af35faba84808ad0a6e3428cb65f2685a346c
39,843
import torch def to_tensor(x): """Make x to Tensor.""" try: return x.clone().detach().float() except: return torch.tensor(x).float()
14df794b76b8d1d4b845e68b6af340522331dd82
39,844
def createNamespace(benchmarkInfo, benchmarkResult): """ Creates a dictionary representing a namespace containing the member var/values on the benchmarkInfo and benchmarkResult passed in to eval/exec expressions in. This is usually used in place of locals() in calls to eval() or exec(). """ namespace = dict(benchmarkInfo.__dict__) namespace.update(benchmarkResult.__dict__) return namespace
79ff65e69e874c7d83085a7e5cc4c0df82ca572c
39,845
import torch def npvec_to_tensorlist(vec, params): """ Convert a numpy vector to a list of tensor with the same dimensions as params Args: vec: a 1D numpy vector params: a list of parameters from net Returns: rval: a list of tensors with the same shape as params """ loc = 0 rval = [] for p in params: numel = p.data.numel() rval.append(torch.from_numpy(vec[loc:loc+numel]).view(p.data.shape).float()) loc += numel assert loc == vec.size, 'The vector has more elements than the net has parameters' return rval
3cbed80b3896d6f0610a057903f09728ccae0a30
39,846
import fnmatch import os def fnmatch_any(filename, pattern): """Test whether `filename` or any of its parent directories match the glob pattern. """ while filename: # Try the current filename. if fnmatch.fnmatch(filename, pattern): return True # Try its parent directory. parent = os.path.dirname(filename) if parent == filename: break filename = parent # No matches. return False
e7008439f5fb5fa1f061e351943799d1cd3f7386
39,848
def list_certificate_issuer_admins(client, vault_base_url, issuer_name): """ List admins for a specified certificate issuer. """ return client.get_certificate_issuer( vault_base_url, issuer_name).organization_details.admin_details
37c8411b69c7bd3967d4ffe22c8b039236379625
39,850
def telnet_8742(command): """ Assemble raw commands for 8742 picomotor controller and iterface to telnet server """ def wrapped(func): def telnet_interface(self, *args, **kwargs): try: value = func(self, *args, **kwargs) self.connect_to_telnet() if "axis" in kwargs.keys(): axis = str(kwargs['axis']) else: axis = "" if value is not None: value = str(value) else: value = "" line = (axis + command + value).encode() self.send_to_telnet(line) if b"?" in line: data = self.receive_from_telnet() else: data = None self.close_connection() return data except Exception as error: print(error) print("\n More info: \n") print("Command: " + func.__name__ + "()") print(func.__doc__) return None return telnet_interface return wrapped
eb36b1b3698fd264f7cc5625e3471130e3bd80a5
39,851
def semiflatten(multi): """Convert a MutiDict into a regular dict. If there are more than one value for a key, the result will have a list of values for the key. Otherwise it will have the plain value.""" if multi: result = multi.to_dict(flat=False) for k, v in result.items(): if len(v) == 1: result[k] = v[0] return result else: return multi
75e2ea0a5ee05d53390e9469eb2239ef8a9d6d84
39,852
def _set_antecedent(self, descendants): """ Set antecedent property of descendents to current branch. Notes ----- We want the clusters to know who they are related to. An antecedent is the immediate parent of a cluster. So when branching set the antecedent property of all descendants in the current branch to the branch itself. Also set the antecedent value of the current branch to 'None' to indicate that it doens't have a parent. """ for descendant in descendants: descendant._antecedent = self return self._antecedent
a69a21954ae44548ea62a4fc290ca6df857bc891
39,853
import os def append_stem(filename, word, delim="."): """ returns a filename with word appended to the stem example: /path/to/test.run.sam -> /path/to/test.run.sorted.sam """ dirname = os.path.dirname(filename) filename = os.path.basename(filename) fsplit = filename.split(delim) fsplit.insert(len(fsplit) - 1, word) return os.path.join(dirname, delim.join(fsplit))
cb18d68695613ab1cca691a26de163d08d8cc1b9
39,854
import sys def humanize(num): """A utility function to help generate human readable number string""" try: num = int(num) except: sys.exit("Unalbe to humanize input value.") for unit in ['', 'K', 'M']: if num % 1000: return '%d%s' % (num, unit) else: num /= 1000 return "%d%s" % (num, 'G')
9a01c4df0c2903525a26b458ecaf123c747fa7cb
39,856
import collections import math def prior(training_data, label_list): """ return the prior probability of the label in the training set => frequency of DOCUMENTS """ smooth = 1 # smoothing factor logprob = {} num_files_by_label = collections.defaultdict(lambda: 0) for data_point in training_data: num_files_by_label[data_point['label']] += 1 for label in label_list: prob = (num_files_by_label[label] + smooth) / (len(training_data) + smooth*len(label_list)) logprob[label] = math.log(prob) return logprob
983912ce805ae42e212c1af5504d53560af19c1b
39,857
def udfize_def_string(code: str) -> str: """Given an unindented code block that uses 'input' as a parameter, and output as a return value, returns a function as a string.""" return """\ def udf(input): {} return output """.format( " ".join(line for line in code.splitlines(True)) )
71084f68ff268eaaa2eec2f8f22394e963fdd894
39,858
def get_astronomical_twilight(times, events, value): """ value = 0 for end of astronomical twilight value = 1 for the beginning of astronomical twilight (first occurrence) """ try: zindex = events.tolist().index(value) at_time = times[zindex] except: at_time = None return at_time
ce8780a833e6356ad169430720f6b4bdb555e8ed
39,859
import pkg_resources import os def list_gene_names(gene_information_file=None): """Create a list of all known gene names and their aliases as listed on SGD (or as provided as an optional input file) Input is a standard file downloaded from https://www.uniprot.org/docs/yeast. Output is list of all genes, which also includes all the aliases (if they exists). Parameters ---------- gene_information_file : str, optional Input is a standard file downloaded from https://www.uniprot.org/docs/yeast, by default None Returns ------- list Output is list of all genes, which also includes all the aliases (if they exists). """ if gene_information_file == None: default_path = pkg_resources.resource_filename("transposonmapper", "data_files/") gene_information_file = os.path.join( default_path, "Yeast_Protein_Names.txt" ) gene_name_list = [] # INLCUDES ALL GENE NAMES AND POTENTIAL ALIASES gene_oln_list = [] # INCLUDE ONLY THE OLN NAMING CONVENTION gene_sgd_list = [] # INCLUDE THE FIRST DESIGNATION NAME THAT IS IN THE INPUT LIST gene_counter = 0 with open(gene_information_file) as f: lines = f.readlines() for i in range( 58, len(lines) - 6 ): # THE GENES START AT LINE 58 AND STOP 6 LINES BEFORE THE END OF THE FILE. n = 0 l = lines[i] extra_columns = l.count( ";" ) # COUNT HOW MANY TIMES ';' OCCURS IN A LINE. THIS IS NEEDED TO GET THE RIGHT COLUMNS AS SOMETIMES ALIASES OF GENES ARE PRESENTED IN EXTRA COLUMNS l_short = " ".join(l.split()) l_list = l_short.split(" ") gene_name_list.append(l_list[0].strip(";")) gene_sgd_list.append(l_list[0].strip(";")) gene_oln = l_list[1 + extra_columns].strip(";") # GET THE OLN NAME if ( gene_oln == "GAG" or gene_oln == "POL" ): # CHECK WHETHER THE OLN IS 'GAG' OR 'POL'. IF YES, TAKE THE NEXT COLUMN gene_name_list.append(l_list[2 + extra_columns].strip(";")) gene_oln_list.append(l_list[2 + extra_columns].strip(";")) else: gene_name_list.append(gene_oln) gene_oln_list.append(gene_oln) if ( l_list[1 + extra_columns] == "GAG" or l_list[1 + extra_columns] == "POL" ): # THESE ARE SEQUENCES THAT SOMETIMES OCCUR WHICH HAVE TO BE IGNORED. extra_columns = extra_columns + 1 if extra_columns > 0: for n in range(extra_columns): gene_name = l_list[1 + n].strip(";") if not gene_name == "GAG" and not gene_name == "POL": gene_name_list.append(gene_name) gene_counter += 1 ###SAVING OLN LIST # from datetime import date # current_date = date.today() # oln_saving_file = r"S_Cerevisiae_protein_oln_name_full_genome.txt" # gene_oln_list_sorted = sorted(gene_oln_list) # with open(oln_saving_file, 'w') as f: # ## f.write("org=S. Cerevisiae ; type=Genomic ; naming='oln' ; source file='Yeast_Protein_Names.txt' ; creation date=%s using 'gene_names.py'\n" % current_date) # for oln_name in gene_oln_list_sorted: # f.write("%s\n" % oln_name) # sgd_saving_file = r"S_Cerevisiae_protein_designation_name_full_genome.txt" # gene_sgd_list_sorted = sorted(gene_sgd_list) # with open(sgd_saving_file, 'w') as f: # # f.write("org=S. Cerevisiae ; type=Genomic ; naming='designation' ; source file='Yeast_Protein_Names.txt' ; creation date=%s using 'gene_names.py'\n" % current_date) # for sgd_name in gene_sgd_list_sorted: # f.write("%s \n" % sgd_name) ### print("Number of genes found in file = ", gene_counter) return gene_name_list
437016f93a1e38851dbe0198921806981d1777be
39,860
def make_filename(traj): """ Function to create generic filenames based on what has been explored """ explored_parameters = traj.f_get_explored_parameters() filename = '' for param in explored_parameters.values(): short_name = param.v_name val = param.f_get() filename += '%s_%s__' % (short_name, str(val)) return filename[:-2] + '.png'
f118fb4c79356528773a2c054a8a0955b3e51242
39,861
import random def run_quiz(population, num_questions, num_countries): """Run a quiz about the population of countries""" num_correct = 0 for q_num in range(num_questions): print(f"\n\nQuestion {q_num + 1}:") countries = random.sample(population.keys(), num_countries) print("\n".join(f"{i}. {a}" for i, a in enumerate(countries, start=1))) # Get user input while True: guess_str = input("\nWhich country has the largest population? ") try: guess_idx = int(guess_str) - 1 guess = countries[guess_idx] except (ValueError, IndexError): print(f"Please answer between 1 and {num_countries}") else: break # Check the answer correct = max(countries, key=lambda k: population[k]) if guess == correct: num_correct += 1 print(f"Yes, {guess} is most populous ({population[guess]:,})") else: print( f"No, {correct} ({population[correct]:,}) is more populous " f"than {guess} ({population[guess]:,})" ) return num_correct
3d02fb8036e50f7dbaf4d52f360129fb8cfb0fe1
39,863
import numpy def _pfa_check_uvects(PFA, Position, Grid, SCP): """ Parameters ---------- PFA : sarpy.io.complex.sicd_elements.PFA.PFAType Position : sarpy.io.complex.sicd_elements.Position.PositionType Grid : sarpy.io.complex.sicd_elements.Grid.GridType SCP : numpy.ndarray Returns ------- bool """ if PFA.IPN is None or PFA.FPN is None: return True cond = True ipn = PFA.IPN.get_array(dtype='float64') fpn = PFA.FPN.get_array(dtype='float64') row_uvect = Grid.Row.UVectECF.get_array(dtype='float64') col_uvect = Grid.Col.UVectECF.get_array(dtype='float64') pol_ref_point = Position.ARPPoly(PFA.PolarAngRefTime) offset = (SCP - pol_ref_point).dot(ipn)/(fpn.dot(ipn)) ref_position_ipn = pol_ref_point + offset*fpn slant_range = ref_position_ipn - SCP u_slant_range = slant_range/numpy.linalg.norm(slant_range) derived_row_vector = -u_slant_range if numpy.linalg.norm(derived_row_vector - row_uvect) > 1e-3: PFA.log_validity_error( 'the Grid.Row.UVectECF ({}) is not in good agreement with\n' 'the expected value derived from PFA parameters ({})'.format(row_uvect, derived_row_vector)) cond = False derived_col_vector = numpy.cross(ipn, derived_row_vector) if numpy.linalg.norm(derived_col_vector - col_uvect) > 1e-3: PFA.log_validity_error( 'the Grid.Col.UVectECF ({}) is not in good agreement with\n' 'the expected value derived from the PFA parameters ({})'.format(col_uvect, derived_col_vector)) cond = False return cond
3f3d975824cb705aa8c5e534b4804adeab2260dd
39,865
def relu_loop(x): """ ReLu out = reLu(dot(W, input) + b) relu = max(x, 0) dot product: b1 ( a1 a2 a3 ) x ( b2 ) = a1b1 + a2b2 + a3b3 b3 """ assert len(x.shape) == 2 # assert it is 2D matrix x = x.copy() # deep copy for i in range(x.shape[0]): # go through each row for j in range(x.shape[1]): # and each column x[i, j] = max(x[i, j], 0.) # replace each value with > 0, implementation of max in ./max_pooling.py return x
a0f91be60bfec9a4d704a132f26108cd3a9d2c0d
39,866
import os def get_read_count(in_bam_dir): """ Extract read count from Log file. Required for downstream analysis (JunctionSeq) :param in_bam_dir: Result directory produced by STAR containing Log.final.out :return: Number of mapped reads """ in_log = os.path.join(in_bam_dir, "Log.final.out") token = "Number of input reads" fp = open(in_log) line = next(filter(lambda l: token in l, fp.readlines())) fp.close() return int(line.split("|")[1].strip())
36cedc575f4c71f666837219983ced29e5488a5c
39,868
import six def construct_mirror_name(volume): """Constructs MirrorView name for volume.""" return 'mirror_' + six.text_type(volume.id)
75ec30c8e5cf204f525301ea0fd988222c1d1cf5
39,870
def check_for_empty_string(input_data): """ Checks if data presented by a user is empty. """ if input_data.strip() == "": return 'All fields are required' return None
dab37e5778d1746e3e3d5c0985d9b24c56184aa3
39,871
def _count_dollars_before_index(s, i): """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 while dollar_index > 0 and s[dollar_index] == '$': dollar_count += 1 dollar_index -= 1 return dollar_count
4d140e63253ca0aee28f8bd6bb24e5a23e00a0f5
39,872
import bz2 import gzip def smart_open(filename, mode): """Unified front end for opening plain files and compressed files.""" if filename.endswith(".bz2"): opener = bz2.BZ2File elif filename.endswith(".gz"): opener = gzip.open else: opener = open return opener(filename, mode)
8dc8119c3d27d87d1cc7febe0202f727d925d7c3
39,873
def eglass_gl_input(): """Table C3.8.1 - generica material data""" return { "name": "e-glass", "density": 2540, "modulus_x": 73000000, "modulus_y": 73000000, "modulus_xy": 30000000, "poisson": 0.18, }
3bf1d84fbf6bae127049ea7e7010ea458695b6a5
39,874
import math def we_calc(xPLL,vd,Kp_PLL): """Calculate inverter frequency from PLL.""" return (Kp_PLL*(vd) + xPLL + 2*math.pi*60.0)
b958909ce7481a46ae0fb0176d33f96926e3f607
39,876
def f(x): """ A function for testing on. """ return -(x + 2.0)**2 + 1.0
52d2c4a4dec5acaa34371a5cead727d94a36a478
39,878
def detectsubstitutions(args): """Detect substitutions specified on the commandline. This method will be called from the hooks within the applications.py module. This is where the applications specific code should be placed so that Longbow can handle substitutions. """ # Initialise variables. removelist = [] sub = {} for index, item in enumerate(args): if item == "-var" or item == "-v": sub[args[index + 1]] = args[index + 2] removelist.append(item) removelist.append(args[index + 1]) removelist.append(args[index + 2]) for item in removelist: args.remove(item) return sub
eb8ff391948ee13bfa1e476a94ceb7b133b296c5
39,880