content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def fn_HMS_to_S(hrs,mins,secs): """ Convert from hours and minutes and seconds to seconds. Date: 02 October 2016. originally in AstroFunctions.py """ epoch_time = (hrs*60*60)+(mins*60.0)+secs; return epoch_time
cc0407fb4824f5688a73762376e9177542c48b2b
695,965
async def fixture_players(lms, request): """Return list of players.""" players = await lms.async_get_players() prefer = request.config.option.PREFER exclude = request.config.option.EXCLUDE if request.config.option.EXCLUDE else [] include_players = [] if prefer: print(f"Preferring {prefer}") include_players.extend( [ player for player in players if player.name in prefer or player.player_id in prefer ] ) exclude.extend([player.name for player in include_players]) print(f"Excluding {exclude}") include_players.extend( [ player for player in players if player.name not in exclude and player.player_id not in exclude ] ) return include_players
e4a1a51b3b7b5cc7a055b03645d7582b24459f2b
695,966
import os def _get_base_app_name(value): """Remove path and extension from an application name. Parameters ---------- value : str application name that may include a directory path and extension Returns ------- value : str application name base name with out directory path and extension """ value = os.path.basename(value) if ( value.endswith(".exe") or value.endswith(".dll") or value.endswith(".dylib") or value.endswith(".so") ): value = os.path.splitext(value)[0] return value
5002bba117645275dc328cae27e1c160c1df8173
695,967
import re def read_cal(infile): """ read calibration file """ out = [] out2 = [] for line in open(infile, "r"): tmp_ = re.split(r"\t+", line.rstrip("\n")) out.append(int(tmp_[0])) out2.append(float(tmp_[1])) return out, out2
68cbb6c62c054d85c6d9a6ff8fdbdb06e7ba4360
695,968
import bz2 def serialize_dataframe(report, compressed=True): """Serialize a dataframe for storing. The dataframe is serialized as CSV and compressed with bzip2. """ result = report.to_csv(index=False) if compressed: result = bz2.compress(result.encode()) return result
f879da2ebb6a9c4a1f5274efd6dfa506af585ce8
695,970
import random def get_random_string(length=10): """ Generates a random string of fixed length """ string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" return ''.join(random.choice(string) for i in range(length))
043f7ef3b495c199825242ffd55fabf9e7c1cda7
695,971
def global_dataframe_resolver(obj, resolver): """Return a list of dataframes.""" meta = obj.meta num = int(meta['partitions_-size']) dataframes = [] orders = [] for i in range(num): df = meta.get_member('partitions_-%d' % i) if df.meta.islocal: dataframes.append(resolver.run(df)) orders.append(df.meta["row_batch_index_"]) if orders != sorted(orders): raise ValueError("Bad dataframe orders:", orders) return dataframes
577c453ebff948fc8beaf99b63d8dc93e7be2dcc
695,972
def tpr(true_graph, est_graph): """ Calculates the True positive rate of an estimated adjacency matrix. """ N = len(true_graph) no_correct = 0.0 no_false_rej = 0.0 for i in range(N): for j in range(N): if est_graph.item(i, j) == 1 and true_graph.item(i, j) == 1: no_correct += 1 if true_graph.item(i, j) == 1: if est_graph.item(i, j) == 0: no_false_rej += 1 return no_correct / (no_correct + no_false_rej)
f2cb900989a311cddccca8a214d60f00c3174820
695,973
def _get_label_kwargs_lpt(plot, idx_key=None): """Function to get a dictionary for the label keyword arguments for formatting :param plot: (x, y, const_list, const_dict) :param idx_key: I do not even remember what the point of this argument is. """ return {'exp': plot[3]['exp'], 'N': plot[3]['N']}
7c821b7d960a8103598a33c1c488f44538ee7a73
695,974
import math def convert_size(size_bytes): """ Obtain human readable file sizes :param size_bytes: Size in bytes :return: """ if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "{} {}".format(s, size_name[i])
962b41d64c1892584ff436799b643890a946d17b
695,975
def sperm_translator(cell): """If the cell has the DNA for food donation to its offspring, giving 1/6th of its food in addition to the birthing partner, it is a sperm. Active DNA: x,B,(D/C),x,x,x """ dna = cell.dna.split(',') if dna[1] == 'B' and dna[2] == 'D': return True elif dna[1] == 'B' and dna[2] == 'C': return True else: return False del dna[:]
d5a8abb7bea9a6c53856ef069ba3890cb27750b9
695,976
def get_letter_grade_definitions(letter_grade: str) -> str: """ Returns the definition of the letter grade that is passed into the function Grading Scale - 89.50 - 100 = A \n 88.50 - 89.49 = B+ \n 79.50 - 88.49 = B \n 78.50 - 79.49 = C+ \n 69.50 - 78.49 = C \n 68.50 - 69.49 = D+ \n 59.50 - 68.49 = D \n 00.00 - 59.50 = F \n @param letter_grade: @return: """ letter = letter_grade.lower() if letter == "a": return "A is between 89.50 - 100" elif letter == "b+": return "B+ is between 88.50 - 89.49" elif letter == "b": return "B is between 79.50 - 88.49" elif letter == "c+": return "C+ is between 78.50 - 79.49" elif letter == "c": return "C is between 69.50 - 78.49" elif letter == "d+": return "D+ is between 68.50 - 69.49" elif letter == "d": return "D is between 59.50 - 68.49" else: return "F is anything lower than 59.50"
fd9697063aef5f9b56850a42d333d725e5bc1377
695,977
def assemble_api_url(domain, operators, protocol='https'): """Assemble the requests api url.""" return '{}://{}{}'.format(protocol, domain, operators)
105d541f2e9196583b2fd5eac1af75cf4c62669f
695,979
from datetime import datetime import uuid def generate_ami_name(prefix): """ Generate AMI image name. """ # current date and time now = datetime.now() s2 = now.strftime("%Y%m%d%H%M%S") x = uuid.uuid4().hex postfix = str(x)[:10] ami_name = prefix + "-" + s2 + "-" + postfix return ami_name
50cf4e6f8ab55b252503319429594242cea9a77e
695,980
def _cap_str_to_mln_float(cap: str): """If cap = 'n/a' return 0, else: - strip off leading '$', - if 'M' in cap value, strip it off and return value as float, - if 'B', strip it off and multiple by 1,000 and return value as float""" if cap == "n/a": return 0 capital = cap.lstrip("$") if capital[-1] == "M": return float(capital.replace("M", "")) if capital[-1] == "B": return float(capital.replace("B", "")) * 1000
a4c984013ba7c1e06b3569f61d65fe69d98ae2da
695,981
def _get_groupby_and_reduce_dims(ds, frequency): """ Get the groupby and reduction dimensions for performing operations like calculating anomalies and percentile thresholds """ def _same_group_per_lead(time, frequency): group_value = getattr(time.dt, frequency) return (group_value == group_value.isel(init=0)).all() if "time" in ds.dims: groupby = f"time.{frequency}" if (frequency is not None) else None reduce_dim = "time" elif "init" in ds.dims: if frequency is not None: # In the case of forecast data, if frequency is not None, all that # is done is to check that all the group values are the same for each # lead time = ds.time.compute() same_group_per_lead = ( time.groupby("init.month") .map(_same_group_per_lead, frequency=frequency) .values ) assert all( same_group_per_lead ), "All group values are not the same for each lead" groupby = f"init.month" reduce_dim = "init" else: raise ValueError("I can't work out how to apply groupby on this data") if "member" in ds.dims: reduce_dim = [reduce_dim, "member"] return groupby, reduce_dim
0edf7a0798afe5243aa4a8ee52a75c1b6cf36e5d
695,982
import re def message_match(tpl, key, text): """True if text matches tpl for the key""" for i in tpl["subject_templates"].values(): if re.fullmatch(i[key], text): return True return False
f81fe54ec60d592805a94bae700cf2e3a14464bd
695,983
def bed_get_region_id_scores(in_bed, no_float=False): """ Read in .bed file, and store scores for each region in dictionary (unique column 4 ID and column 5 score have to be present). Return dictionary with mappings region ID -> region score >>> test_bed = "test_data/test5.bed" >>> bed_get_region_id_scores(test_bed) {'CLIP2': 2.57, 'CLIP1': 1.58, 'CLIP3': 3.11} """ id2sc_dic = {} # Open input .bed file. with open(in_bed) as f: for line in f: cols = line.strip().split("\t") site_id = cols[3] site_sc = float(cols[4]) if no_float: site_sc = cols[4] id2sc_dic[site_id] = site_sc f.closed assert id2sc_dic, "nothing read in for in_bed \"%s\"" %(in_bed) return id2sc_dic
cd4305388251ab9ff9d301ff4bf0409783d1bcfd
695,984
import json def put_json(client, url, data, headers={}): """Send PUT request with JSON data to specified URL. :url: URL string :data: Data dict :headers: Optional headers argument (usually will be authorization) :returns: Flask response object """ return client.put(url, data=json.dumps(data), content_type='application/json', headers=headers)
e5fff7c1fdc9cf72e8b314854415c9426e5b261b
695,985
def sipi(b3, b4, b8): """ Structure Intensive Pigment Index \ (Peñuelas, Baret and Filella, 1995). .. math:: SIPI = b3/b8 - b4 :param b3: Green. :type b3: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :param b8: NIR. :type b8: numpy.ndarray or float :returns SIPI: Index value .. Tip:: Peñuelas, J., Baret, F., Filella, I. 1995. Semi-empirical \ indices to assess carotenoids/chlorophyll-a ratio from leaf \ spectral reflectance. Photosynthetica 31, 221-230. """ SIPI = b3/b8 - b4 return SIPI
b0a7181970e9165d0e75ab7319646fd6bd1c6bbd
695,986
import random def random_nucleotides(sample_size: int, seq_length: int, seed: int = 1789): """ Return a random list of DNA nucleotides sequences. Args: sample_size: generate N random sequences. seq_length: set sequence length. seed: random seed for reproducibility. Returns: list of generated sequences """ random.seed(seed) # set random seed alphabet = list("TAGC") # define DNA nucleotides # generate sequences seq_list = [ "".join(random.choices(alphabet, k=seq_length)) for i in range(0, sample_size) ] return seq_list
ce283d43495fa53be4276cf0ab3c6793e053a723
695,987
import difflib def get_hunks(fromlines, tolines, context=None): """Generator yielding grouped opcodes describing differences . See `get_filtered_hunks` for the parameter descriptions. """ matcher = difflib.SequenceMatcher(None, fromlines, tolines) if context is None: return (hunk for hunk in [matcher.get_opcodes()]) else: return matcher.get_grouped_opcodes(context)
b2a2c36be17a6f481564fa9edb6f480ef7e0361c
695,988
def populate_game_starting_lineups(gl, games, player_id): """ Populates all the starting lineups in a game by updating the "teams" dictionary in each Game object, with key = team_id, value = [list of player_ids on that team] Every "teams" dictionary has two keys because two teams play in a game Each key in the "teams" dictionary has a list of 5 players (starting lineup) :param gl: Dataframe of GameLineupNBA.csv :param games: Dictionary mapping game_id's to Game objects :param player_id: the unique id string of a player :return: updated games dictionary which maps game_id's to Game objects """ # Filters the dataframe to find all players with the specified player_id and the starting # lineup denoted by period 1 df = gl[(gl["Person_id"] == player_id) & (gl["Period"] == 0)] # Loop through each row of the df for index, row in df.iterrows(): game_id = row["Game_id"] team_id = row["Team_id"] if team_id in games[game_id].teams: # If the team_id already exists in the "teams" dictionary # then just append the current player_id to the list of players # on the team games[game_id].teams[team_id]["players"].append(player_id) else: # If the team_id does not exist yet in the "teams" dictionary # then just create a new team_id key and set its value to be a new list with # the first player id on the team games[game_id].teams[team_id] = { "players": [player_id] } # Returns the updated dictionary of games return games
9de9be892765581a4c74d060979d086a0bf6031c
695,989
def clean_venue_name(venue_name: str) -> str: """Clean the venue name, by removing or replacing symbols that are not allowed in a file name. Args: venue_name: Original venue name. Returns: Cleaned venue name. """ return venue_name.replace("*", "").replace("/", "_").replace(" ", "_")
90b6f8b3787af17750c548bb816383bf8a5b07a4
695,990
import os def test_fixtures_dir(): """the test fixture directory""" return os.path.join(os.path.dirname(__file__), "..", "fixtures")
16d27847103775120a109b6038b71e42b284522e
695,991
from typing import List def merge(intervals: List[List[int]]) -> List[List[int]]: """ Sort the interval by their starting points. Compare the ending point of current interval with next examining interval to determine if we can merge them :param intervals: list of intervals :return: merged interval """ if intervals is None or len(intervals) == 1: return intervals intervals = sorted(intervals, key=lambda a: a[0]) output = []; for interval in intervals: if not output or output[-1][1] < interval[0]: output.append(interval) else: output[-1][1] = max(output[-1][1], interval[1]) return output
e30c89ca7aef8a2050b7f3b91baf8f049bcc49c2
695,992
def get_model_args(parser, args=[]): """ parameters determing the network architecture -> model_type: Which model should be applied? -> load_model: Use pretrained model? -> model_path: pretrained from which path? """ # Model: # ------------------------------------------------------------------------ parser.add_argument( "--model_type", type=str, default="diagnosis", help='Model: "introvae", "dcgan", "bigan", "vqvae", "diagnosis", "mocoae", "rnnvae", "simsiamae"', ) parser.add_argument( "--load_model", type=bool, default=False, help="Determine whether to load pretrained model", ) parser.add_argument( "--model_path", type=str, default="./data/src/_model", help="Path to the model parameters", ) parser.add_argument( "--log_view", type=bool, default=True, help="Display losses in log view", ) # ------------------------------------------------------------------------ return parser
73d7cf35b37192abb87c4458f95ef9e874d83e9a
695,994
def _extract_symbols(simple_elf, symbols, default=None): """Fill 'symbols' field with required flash algo symbols""" to_ret = {} for symbol in symbols: if symbol not in simple_elf.symbols: if default is not None: to_ret[symbol] = default continue raise Exception("Missing symbol %s" % symbol) to_ret[symbol] = simple_elf.symbols[symbol].value return to_ret
43e8c1a51d68aecfdb4e259bedfa9d518a7f7a37
695,995
def get_border(char, length): """Get a border consisting of a character repeated multiple times. :param char: The character to make up the border. :param length: The length of the border. :return: A string consisting of the character repeated for the given length. """ border = '' for i in range(length): border += char return border
9cd73504dc450e1e31c75b398240a27184a130e4
695,996
import html def unescape_html(cbot, statement): """ 处理html中的空白 如: "&lt;b&gt;" becomes "<b>". """ statement.text = html.unescape(statement.text) return statement
3df2dd86d696356fa7ac349a8b1a7b50668fce3b
695,997
from datetime import datetime def extract_data(table): """ 提取数据 :param table: :return: """ rows = table.find_all("tr") assert len(rows) == 5 dates = rows[0].find_all("td") staff_counts = rows[1].find_all("td") student_counts = rows[2].find_all("td") other_counts = rows[3].find_all("td") totals = rows[4].find_all("td") # 工作人员总数 assert len(dates) == len(staff_counts) # 学生总数 assert len(staff_counts) == len(student_counts) # 其他人总数 assert len(student_counts) == len(other_counts) # 所有人总数 assert len(other_counts) == len(totals) data = [] for i in range(1, len(dates)): dt = datetime.strptime(dates[i].string, "%d %b %Y") data.append((dt.date().isoformat(), staff_counts[i].string, student_counts[i].string, other_counts[i].string, totals[i].string)) return data
236e65cb79839bcab5b910c686637cfda9da4223
695,999
def orbit_transfers(orbits, obj_name_1, obj_name_2): """ >>> orbit_transfers([["COM", "B"], ["B", "C"], ["C", "D"], ["D", "E"], ["E", "F"], ["B", "G"], ["G", "H"], ["D", "I"], ["E", "J"], ["J", "K"], ["K", "L"], ["K", "YOU"], ["I", "SAN"]], "YOU", "SAN") 4 """ orbit_dict = {orbits[i][1]: orbits[i][0] for i in range(len(orbits))} orbit_list1 = [] orbit_list2 = [] for orbit in orbit_dict.keys(): if orbit == obj_name_1: orbit_list1.append(orbit_dict[orbit]) while orbit_dict[orbit] in orbit_dict: orbit = orbit_dict[orbit] orbit_list1.append(orbit_dict[orbit]) elif orbit == obj_name_2: orbit_list2.append(orbit_dict[orbit]) while orbit_dict[orbit] in orbit_dict: orbit = orbit_dict[orbit] orbit_list2.append(orbit_dict[orbit]) return len(list(set(orbit_list1) - set(orbit_list2))) + len(list(set(orbit_list2) - set(orbit_list1)))
58be5a73493e75ff01f62aea4e30237463b2850e
696,000
def is_pass(record): """Return whether a PyVCF record passes all filters. From the VCF spec, that's if it's 'PASS' or '.'. But PyVCF uses '[]' for 'PASS' (older versions used 'None'). PyVCF uses None for "." now, presumably to be pythonic (not [] is true; not None is true) """ filters = record.FILTER return not filters or filters == '.'
839c6989fa4dae677da0d60adac78861cf81d18b
696,001
from typing import Iterable def hash_from_dict(dictionary): """ Creates a hashable string from a dictionary that maps values to their assignments. Ex: dictionary={"A": 1, "B": 5, "C": 1} => "A=1,B=5,C=1" """ hashstring = "" for i, key in enumerate(sorted(list(dictionary.keys()))): hashstring += str(key) if not isinstance(dictionary[key], Iterable) and dictionary[key] is not None: hashstring += "=" + str(dictionary[key]) if i < len(dictionary.keys()) - 1: hashstring += "," return hashstring
1e906f178a6353e9bdac7bed929be1e0f16ae060
696,002
def default_leader(): """Default leader for multi-region instances.""" return "us-east4"
999051105a4f93552d920734bef0cca4cc40ba03
696,003
def normalize_intensity(inten, max_num=None): """ The max_num here is the maximum number of intensity, and it will be 1 if is not defined """ if not max_num: max_num = 1 if isinstance(inten, dict): max_intensity = max(inten.values()) scale = max_num / max_intensity for each_fragment in list(inten.keys()): inten[each_fragment] *= scale return inten elif isinstance(inten, list): max_intensity = max(inten) scale = max_num / max_intensity return [_ * scale for _ in inten] else: raise
fd258cfb19eb666afb7aceef594df857fb28f0ac
696,005
def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool: """ Return True if two floats are almost equal """ return abs(value_1 - value_2) <= delta
ff1c29c57434a169824fe76e451053f3edc6e519
696,006
def stationObjectsByRiver(stations, rivers): """Returns a list of Monitoring Station objects which are on the rivers input""" stationObjectsByRiverOutput = [] for river in rivers: for station in stations: if station.river==river: stationObjectsByRiverOutput.append(station) return stationObjectsByRiverOutput
882c42acf4ef1d9af2aec8a1c4090f8bca4322e7
696,007
from typing import Tuple def _yymmdd2ymd(yymmdd: int) -> Tuple[int, int, int]: """yymmdd -> (year, month, day) Examples: >>> _yymmdd2ymd(321123) (32, 11, 23) >>> _yymmdd2ymd(320323) (32, 3, 23) """ year, mmdd = divmod(yymmdd, 10000) month, day = divmod(mmdd, 100) return year, month, day
9e9d3fa20b4684b603a203c5cc8c8284a8f45dd7
696,008
def _flatten_result(result): """ Ensure we can serialize a celery result. """ if issubclass(type(result), Exception): return result.message else: return result
7fbd9507b8b91398a6a0f3a8e530074d92c0ed5a
696,009
import subprocess def run_bash(cmd): """Run a subprocess Args: cmd (str): command Returns: result (str): stdout + stderr """ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') (stdout, stderr) = proc.communicate() return stdout + stderr
5fd6df3ebb003f0933d847f1dbf07e167e70d7a3
696,010
from datetime import datetime def test_data(): """Get a list of 8 records that follow the schema in test_table""" now = datetime.now() return [ { "id": 1, "id2": "a1", "str_attr": "sadf", "float_attr": 1.111_111_111_1, "int_attr": 11, "bool_attr": True, "created_at": now, }, { "id": 2, "id2": "b2", "str_attr": "qoiwensa", "float_attr": 2.222_222, "int_attr": 22, "bool_attr": False, "created_at": now, }, { "id": 3, "id2": "c3", "str_attr": "jkwqheoi", "float_attr": 3.333_333, "int_attr": 33, "bool_attr": True, "created_at": now, }, { "id": 4, "id2": "d4", "str_attr": "8q92qjkwlh", "float_attr": 4.4444, "int_attr": 44, "bool_attr": False, "created_at": now, }, { "id": 5, "id2": "e5", "str_attr": "aoca.,209jk", "float_attr": 5.555, "int_attr": 55, "bool_attr": True, "created_at": now, }, { "id": 6, "id2": "f6", "str_attr": "ma0s1-l,mf", "float_attr": 6.6666, "int_attr": 66, "bool_attr": False, "created_at": now, }, { "id": 7, "id2": "g7", "str_attr": "na*&#@d", "float_attr": 7.777_777_7, "int_attr": 77, "bool_attr": True, "created_at": now, }, { "id": 8, "id2": "h8", "str_attr": "m(*@&%l", "float_attr": 8.888_888_8, "int_attr": 88, "bool_attr": False, "created_at": now, }, ]
ccc053a50c3b4b33dd6b846bbd804d2e73283fd6
696,011
def parse_param(param, forced_device=None): """ Extract account representation string format is <account_name>@<domain_name>[@<device_name>] If @<device_name> is absent, <domain_name> is a global domain name Else <domain_name> is a local domain name of the forced_device if 'forced_device' is not None <device_name> should be empty or equal to forced_device Else <device_name> can be another device """ parsed = param.rsplit("@", 2) if len(parsed) > 1: account_name = parsed[0] domain_name = parsed[1] if (len(parsed) == 3 and parsed[2] and forced_device and (parsed[2] != forced_device)): return None device_name = (forced_device or parsed[2]) if len(parsed) == 3 \ else None return account_name, domain_name, device_name else: return None
ac16ca7715d2596798576da23474f083a0ab9ee9
696,012
def sqliteRowToDict(sqliteRow): """ Unpacks a single sqlite row as returned by fetchone into a simple dict. :param sqliteRow: single row returned from fetchone DB call :return: dictionary corresponding to this row """ return dict(zip(sqliteRow.keys(), sqliteRow))
979bb63142a797749937ee382d9b9eb1c26dd7bd
696,013
import os def get_image_path(raw_input_dir: str) -> list: """ get image path and id from root resource path. :return: a list contains all images' path. """ result = [] for root, dirs, files in os.walk(raw_input_dir): for file in files: result.append(os.path.join(root, file)) return result
879f49f931d73fad34a4ae28b9094f70427a0d46
696,014
def _get_average_score(concept, _keywords): """Get average score of words in `concept`. Parameters ---------- concept : str Input text. _keywords : dict Keywords as keys and its scores as values. Returns ------- float Average score. """ word_list = concept.split() word_counter = len(word_list) total = float(sum(_keywords[word] for word in word_list)) return total / word_counter
ce930ae5871dfc218ae5f057f7dc76d64671a7f6
696,015
from typing import Counter def print_machine_config(config): """Printable version of config""" cpu_cores = config['cpu']['physical_cores'] cpu_freq = int(round(config['cpu']['max_freq_MHz'], 0)) ram = int(round(config['cpu']['total_mem_GB'], 0)) cpu_config_message = f'{cpu_freq} MHz CPU with {cpu_cores} cores, {ram} GB RAM' gpu_devices = Counter([(x, int(round(y, 0))) for x, y in zip(config['gpu']['devices'], config['gpu']['mem_GB'])]) gpu_config_message = '' for (handle, mem), count in gpu_devices.items(): gpu_config_message += f'{count} x {handle} GPU(s)' return ', '.join([cpu_config_message, gpu_config_message])
406ff85abe548ea64d1de7f1a65af3c3c4ea4902
696,016
import sys import pickle import subprocess import os def outside(code_str, *import_modules): """Runs a code string in a separate process, pickles the result, and returns it""" import_modules_str = 'import %s' % ', '.join(import_modules) if import_modules else '' if sys.version_info.major < 3: command_string = 'import sys, pickle; sys.path = pickle.loads(sys.stdin.read()); %s; sys.stdout.write(pickle.dumps(%s))' % (import_modules_str, code_str) else: command_string = 'import sys, pickle; sys.path = pickle.loads(sys.stdin.buffer.read()); %s; sys.stdout.buffer.write(pickle.dumps(%s))' % (import_modules_str, code_str) pickle_path = pickle.dumps(sys.path) p = subprocess.Popen([sys.executable, "-c", command_string], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=os.environ) results, errors = p.communicate(pickle_path) if errors and errors.strip(): raise ValueError(errors) return pickle.loads(results)
ad94b7694b5681fb16a5db589e54b933e280266e
696,017
def issilk(data: bytes): """判断音频是否为silkv3格式""" f = data[1:11] if data.startswith(b'\x02') else data[:9] return f == b"#!SILK_V3"
6a964960e820eab1689fc5c4a760025a987ad98a
696,018
from datetime import datetime def get_start_next_month(date): """ Parameters ---------- date: datetime.datetime Returns ------- datetime.datetime date of the start of the next month """ if date.month+1 <= 12: return datetime(date.year,date.month+1,1) return datetime(date.year+1,1,1)
737ace5854593007ff62d169c3b69f3118056be1
696,019
def computePoint(triangle): """ Computes the last point D in an ABC square triangle where a and c are the diagonal - triangle = [a,b,c] D--- C | / | | / | |/ | A----B """ # get coordinates of each point a, b, c a, b, c = triangle xa, ya = a xb, yb = b xc, yc = c # due to subtriangle congruence xd = xc - (xb - xa) yd = (ya -yb) + yc d = (xd, yd) return d
e7b37852440eaf43844d5d6e1cd2c2921bc9e6b3
696,020
def _funcIsMethod(stackFromFunc): """ Determine whether a function being decorated is actually a method of a class, given the stack frames above the decorator invocation. """ funcFrame = stackFromFunc[0] potentialClassName = funcFrame[3] nextFrame = stackFromFunc[1] return nextFrame[3] == '<module>' and \ nextFrame[4][0].startswith('class ' + potentialClassName)
ecb80a8a7565b33aeae72672665c1e21a76cf2c7
696,021
def decrypt(key, enc): """Simple decryption. Reverse of the encryption function Please note that this is quite weak security, and completely breakable with little or no effort. If you take issue with this method, don't hesitate to offer your own implementation. I know very little about security and would be happy for someone to school me on this. Keyword arguments: key -- the key to use for encryption. Supa-secret! msg -- the text to be decrypted. Returns enc in decrypted form. """ msg = [] for i, c in enumerate(enc): key_c = ord(key[i % len(key)]) msg_c = ord(c) msg.append(chr((msg_c - key_c) % 127)) return ''.join(msg)
c277ccc6741bc3c471351429fd6f6a433a28ef40
696,022
def get_position_patterns(contours, hierarchy): """ get qr code position pattern :param contours: :param hierarchy: :return: found position pattern index """ found = [] for i in range(len(contours)): k = i c = 0 while hierarchy[k][2] != -1: k = hierarchy[k][2] c += 1 if c >= 5: found.append(i) return found
c560b60838823b14ac58f3ef1a0829afabe7a588
696,023
def filter_data(imgs, dt, inds): """ Picks out ind rows from various fields in dt. :params imgs: list of cv2 images (numpy arrays) :params dt: dictionary with data captured during calibration :params ind: numpy array of indices to sample from imgs and dt :type imgs: list of cv2 images (numy arrays) :type dt: dictionary of transforms :type inds: numpy array of indices :return: list of images referred by inds, dictionary dt with data sampled using indices. :rtype: list, dict """ imgs = [imgs[i] for i in inds] kk = ['rgb_tf_tree_quat', 'rgb_tf_tree_matrix', 'states', 'ar_quat', 'rgb_tf_tree_trans', 'ar_trans', 'arm_pose_ids', 'ar_tf_tree_quat', 'ar_tf_tree_trans', 'ar_img_loc'] for k in kk: dt[k] = dt[k][inds, ...] return imgs, dt
a5ece4f9f415c6a72f1d3d5597199dfcf7df5c2d
696,024
def get_metric_scores(ground_truth, simulation, measurement, metric, measurement_kwargs={}, metric_kwargs={}): """ Function to combine measurement and metric computations :param ground_truth: pandas dataframe of ground truth :param simulation: pandas dataframe of simulation :param measurement: measurement function :param metric: metric function :return: metric computation for measurements calculated from gold and simulation """ print("Calculating {} for {}".format(metric.__name__, measurement.__name__)) measurement_on_gt = measurement(ground_truth, **measurement_kwargs) measurement_on_sim = measurement(simulation, **measurement_kwargs) return measurement_on_gt, measurement_on_sim, metric(measurement_on_gt, measurement_on_sim, **metric_kwargs)
4087e60ce0578d11756f449e776e01ee81b6e4ac
696,025
def solution(x: int, y: int, d: int) -> int: """ >>> solution(1, 1, 1) 0 >>> solution(10, 85, 30) 3 >>> solution(10, 95, 30) 3 >>> solution(10, 105, 30) 4 >>> solution(1, 1000000000, 1) 999999999 >>> solution(1, 1000000000, 2) 500000000 >>> solution(1, 1000000000, 3) 333333333 >>> solution(1000000000, 1000000000, 1000000000) 0 :param x: initial position :param y: final position :param d: pase :return: The amount of jumps needed to go from @x to @y with a pase of @d """ distance = abs(x - y) jumps = distance // d if distance % d: jumps += 1 return jumps
b1b3651bf47ec476df71908ef6119f8143512b87
696,026
import os def descfile(font): """Get the path of the DESCRIPTION file of a given font project.""" if font: directory = os.path.dirname(font) descfilepath = os.path.join(directory, "DESCRIPTION.en_us.html") if os.path.exists(descfilepath): return descfilepath
daaf649979305680fb4ff614cf4a2682ab7ff533
696,028
import os def get_layers(loc): """ explore a repository and get image file names containing layers :param loc: path to the repository contening files (str) :return: list of image file names to transform (without path) """ # migh tbe useful to add a test on whether or not the file is compatible with image transformation (tiff, png, jpg...) try: rep_content = os.listdir(loc) if len(rep_content) == 0: return False else: rep_content[:] = (f for f in rep_content if f != ".DS_Store") layers = [] for f in rep_content: complete_path_to_f = os.path.join(loc, f) if os.path.isfile(complete_path_to_f): layers.append(f) return layers except Exception as e: print(e) return []
9604e61ebd3d3450c64d454e2b39af1e45eb631c
696,029
def parse_results(results, skip = 16): """ convert the print outs of a run to a csv file """ # analyze the results domain = [] variant = None total = [] halo = [] pack = [] wait = [] put = [] rows = [] counter = 0 with open(results, "r") as file: lines = file.readlines() for line in lines: if line.startswith(" - domain "): domain = [int(x) for x in line[len(" - domain "):].split(", ")] # activate this if you want to skip the first measurements counter = skip elif line.startswith(" - variant "): variant = str(line[len(" - variant "):]).rstrip() elif line.startswith(" - total time (min/median/max) [ms]: "): start = len(" - total time (min/median/max) [ms]: ") total = [float(x) for x in line[start:].split("/")] elif line.startswith(" - halo time (min/median/max) [ms]: "): start = len(" - halo time (min/median/max) [ms]: ") halo = [float(x) for x in line[start:].split("/")] # elif line.startswith(" - pack time (min/median/max) [ms]: "): # start = len(" - pack time (min/median/max) [ms]: ") # pack = [float(x) for x in line[start:].split("/")] # elif line.startswith(" - wait time (min/median/max) [ms]: "): # start = len(" - wait time (min/median/max) [ms]: ") # wait = [float(x) for x in line[start:].split("/")] # elif line.startswith(" - put time (min/median/max) [ms]: "): # start = len(" - put time (min/median/max) [ms]: ") # put = [float(x) for x in line[start:].split("/")] if counter == 0: row = [variant] row = row + [str(x) for x in domain] row = row + [str(x) for x in total] row = row + [str(x) for x in halo] row = row + [str(x) for x in pack] row = row + [str(x) for x in wait] row = row + [str(x) for x in put] rows.append(row) counter = max(0, counter - 1) return rows
cde628726ce436f2198058252db4828754295f14
696,030
def keywords2tag(tag) : """ remove the last number in tag e.g. "span1" to "span" """ i = 0 for x in tag[::-1] : # reversed checking if x.isdigit(): i += 1 if i > 0 : return tag[:-i] else : return tag
2e31e195c0fb7ceedc71418cd0382bbee95f52b8
696,032
def install_plugins_models(installed_app, config): """ Add to INSTALLED_APPS the plugin """ for plugin in config.Plugins: installed_app += 'yablog.plugins.%s' % plugin, # do not erase the comma return installed_app
e8b7b1a4995004fbdebeda75447596fb70e9d6c6
696,033
import json def util_json_get_value ( s_json, key ): """Returns value for supplied key in JSON document""" try: t = json.loads(s_json, strict=False) except ValueError: return '' try: value = t[key] except KeyError: return '' return value
773ae165ac58f4ac20772d5c344aca241c74786c
696,034
import torch def classification_margin(output, true_label): """Calculate classification margin for outputs. `probs_true_label - probs_best_second_class` Parameters ---------- output: torch.Tensor output vector (1 dimension) true_label: int true label for this node Returns ------- list classification margin for this node """ probs = torch.exp(output) probs_true_label = probs[true_label].clone() probs[true_label] = 0 probs_best_second_class = probs[probs.argmax()] return (probs_true_label - probs_best_second_class).item()
db9e557d1ed52249554ff1beb96725616d2fa426
696,035
def format_builtin(): """format: Formated representation of an object.""" return "{} ready".format(format(1, '.0%'))
6b39ec8e61908f3bcfba8bb34c18d2312b6e830e
696,036
import hashlib import json def treehash(var): """ Returns the hash of any dict or list, by using a string conversion via the json library. """ return hashlib.sha256(json.dumps(var, sort_keys=True).encode("utf-8")).hexdigest()
e196a8d601b59a893bf05bc903aa7e3af4927cef
696,038
def format_kv(batched): """Format raw data into key-value format""" aggregated_kv = [] for event in batched: kv_str = "" for key, value in event.items(): kv_str += "%s=\"%s\" " % (key, value) kv_str += "\n" aggregated_kv.append(kv_str) return aggregated_kv
39df6dc9fe5ebc8efafec0460a98d0c3457f6334
696,040
def is_float(dtype): """Return True if datatype dtype is a float kind""" return ('float' in dtype.name) or dtype.name in ['single', 'double']
3c6301e6d89fb8d825ac4181ca02b5cf95028066
696,041
def _find_method(obj, string): """Find methods in object that starts with `string`. """ out = [] for key in dir(obj): if key.startswith(string): out.append(getattr(obj, key)) return out
1250a6dc39d2ac47ca4a5e561f55f4fb2b456c96
696,042
def index_value_append(spl, sp_num, sample_vec, l1=0, l2=0): """ This function generates two lists: Indices and Values. Indices is used to record sampling position. Values is used to record sampling value from sample vector. Parameters ---------- spl: Sample list includes three rows a, b, c. Pairwise combine a, b, c, and generate three sampling indices. sp_num:int The sample number. sample_vec: The shape of tensor. l1,l2: Control the subscript from sampling list to choose. Returns ------- indices: Indices is used to record sampling position. values: Values is used to record sampling value from sample vector. Examples -------- >>> import tensorD.base.pitf_ops as ops >>> import tensorflow as tf >>> import numpy as np >>> shape = tf.constant([3, 4, 5]) >>> sp_num = 10 >>> a, b, c = pitf_ops.sample3D_rule(shape, sp_num) >>> spl = [a, b, c] >>> sample_vec = tf.constant([1,2,3,4,5,6,7,8,9,0]) >>> indices, values = pitf_ops.index_value_append(spl, sp_num, sample_vec, 0, 0) """ indices = [] values = [] for i in range(sp_num): indices.append([spl[l1][i], spl[l2][i]]) values.append(sample_vec[i]) return indices, values
fec835a4e933a3e6f75d6afa0ac7b693304ba37f
696,044
def offset_to_matlab(offset): """ Change offset to MATLAB --------------------- :param offset: base offet :return: MATLAB offset """ new = offset new[1] = offset[0] new[0] = offset[1] return new
a7abede8dd9795aa26da88cb2bdd3ab199b8cf00
696,045
def split_list(ls, size): """ Split list :param list ls: list :param int size: size :return list: result >>> split_list([1, 2, 3, 4], 3) [[1, 2, 3], [4]] """ if size == 0: return ls return [ls[i:i + size] for i in range(0, len(ls), size)]
de28981d576122f99be34a57c94d73457b63c04b
696,046
def getColumnsEndingAt(columns, zLevel): """Returns columns w/ endPoint at zLevel""" columnGroup = {} for columnID, column in columns.inventory.iteritems(): diff = abs(zLevel - column.endJoint.z) if diff <= 0.001: columnGroup[column.uniqueName] = column return columnGroup
4f6b7aac922bd5985b6faeb509d26bf6aec98629
696,047
def available_phrases(fs, phrases): """ return: set of phrase indexed tuple like {((1, "I"), (2, "am")), ((1, "I"),) ...} """ available = set() for i, f in enumerate(fs): f_rest = () for fr in fs[i:]: f_rest += (fr,) if f_rest in phrases: available.add(tuple(enumerate(f_rest, i+1))) return available
fee8e4f355786946532e64cc51d8cd1d4d15f12a
696,048
import inspect import os def is_blazing_query(): """ Method that returns true if caller of the utility is a blazing query, returns false otherwise Assumes that caller is 3 levels above the stack query_of_interest -> utils.push_to_google_sheet -> utils.build_payload -> utils.is_blazing_query Another potential solution is checking sys.modules.get("blazing") to check blazing is imported """ query_filename = inspect.stack()[3].filename return "sql" in os.path.basename(query_filename)
1f5cc26e059d0d4c7b6ed5917d7d9d19aefb027b
696,049
def hpo_job_describe(sm_client, tuning_job_name): """API call to describe a hyperparameter tuning job.""" try: response = sm_client.describe_hyper_parameter_tuning_job( HyperParameterTuningJobName=tuning_job_name ) return response except sm_client.exceptions.ResourceNotFound: raise Exception(f"Hyperparameter job not found: '{tuning_job_name}'")
98bb1ad03883e862a8730ec4740c3bb92b1a4830
696,050
import traceback def tb_log_str(exception) -> str: """ Format an exception as a full traceback. """ return "".join(traceback.format_exception(None, exception, exception.__traceback__))
6776a7416cb512bf23e6557833e3a95779172bd4
696,051
def addB(n1, n2): """adds 2 binary numbers without converting to decimal or using ints""" if n1 == "": return n2 elif n2 == "": return n1 elif n1[-1] == "0" and n2[-1] == "0": return addB(n1[:-1], n2[:-1]) + '0' elif (n1[-1] == "0" and n2[-1] == "1") or (n1[-1] == "1" and n2[-1] == "0"): return addB(n1[:-1], n2[:-1]) + '1' elif n1[-1] == "1" and n2[-1] == "1": return addB(n1[:-1], addB("1", n2[:-1])) + '0' else: return addB(n1[:-1], addB("1", n2[:-1])) + '1'
0c43ece2c83063401ad78182134570a67b1bd657
696,052
import requests def get_agol_token(username, password): """ purpose: get a security token from ArcGIS Online arguments: username: string password: string return value: string token, None if error """ try: url = "https://www.arcgis.com/sharing/rest/generateToken" params = { "username": username, "password": password, "referer": "something", "f": "json"} result = requests.post(url, params).json() return result.get("token") except Exception: return None
9086fbb7e199c7dd9410bfe4f89b65d34a2424be
696,055
from datetime import datetime def check_date(date): """ Функция проверки правильности ввода даты :param date: :return: date """ # проверка формата даты get_date = lambda d: datetime.strptime(d, '%Y.%m.%d').date() <= datetime.today().date() ex = False while ex != True: try: if get_date(date) != True: print(f"Вы неправильно ввели дату {date} Введите заново") date = input("Введите заново и нажмите ENTER: ") else: print("Ok ") ex = True return date except ValueError: date = input("Введите заново и нажмите ENTER: ")
fd651552148fa57c42c340ffe796689de5131496
696,056
def smallerNumbersThanCurrent(nums): """ :type nums: List[int] :rtype: List[int] """ res = [] c = 0 for i in range(len(nums)): count = 0 for e in nums: if e < nums[c]: count += 1 res.append(count) c += 1 return res
880ccbb19767c4643075d9a7947a2eb369063e2b
696,057
def no_annotation(num: int): """ This function contains no return annotation >>> no_annotation(5) 10 """ return num + 5
e0bdfde9a58d96140978a45d627857f7f51a5667
696,058
def read_grammar(grammar_file): """ Reads in the given grammar file and splits it into separate lists for each rule. :param grammar_file: the grammar file to read in. :return: the list of rules. """ with open(grammar_file) as cfg: lines = cfg.readlines() return [x.replace("->", "").split() for x in lines]
c9320a4126ed6bc05a5df8b05c9997890c7f620a
696,059
from pathlib import Path def list_files(dirpath: str, pattern: str = "*.csv") -> list: """ List files in a directory """ file_names = list(Path(dirpath).glob(pattern)) return file_names
0c291d6818c38f6f9219f92b900e5fd8ed5960d6
696,060
def getNetworkParameters(channels:int, layers:int): """ Returns the command line arguments for that specific network :param channels: :param layers: :return: """ # -1 because last layer is implicit # The parameter lists the feature size of the hidden features return ["--layers", ':'.join([str(channels)] * (layers - 1))]
b06fca71d4daee46a864d9a235361498de4d0442
696,061
def _plot(ax, coords, pos_columns, **plot_style): """ This function wraps Axes.plot to make its call signature the same for 2D and 3D plotting. The y axis is inverted for 2D plots, but not for 3D plots. Parameters ---------- ax : Axes object The axes object on which the plot will be called coords : DataFrame DataFrame of coordinates that will be plotted pos_columns : list of strings List of column names in x, y(, z) order. plot_style : keyword arguments Keyword arguments passed through to the `Axes.plot(...)` method Returns ------- Axes object """ if len(pos_columns) == 3: return ax.plot(coords[pos_columns[0]], coords[pos_columns[1]], zs=coords[pos_columns[2]], **plot_style) elif len(pos_columns) == 2: return ax.plot(coords[pos_columns[0]], coords[pos_columns[1]], **plot_style)
80cadfff00f864b9d38e51768e68674dfc981a06
696,062
def inverse_names(iformat): """ Inverses an input format name to generate the config used """ iformat_split = iformat.split("_") if "depth" != iformat_split[0]: return None # Remove _<number> iformat = iformat if len(iformat_split) < 3 else "_".join(iformat_split[:-1]) # Remove random seeds if any for style in ["height", "distance", "xyz"]: if style in iformat: break else: return None ssplit = iformat.split(style) if len(ssplit)>2 and len(ssplit[-1])==6: iformat = iformat[:-6] sbase = iformat.split(style)[0] style = sbase+"_"+style if sbase[-1] != '_' else sbase+style return style
655a565a07f0510851fc46544c6fcbf901736308
696,063
def get_i_colour(axis_handle) -> int: """ Get index appropriate to colour value to plot on a figure (will be 0 if brand new figure) """ if axis_handle is None: return 0 else: if len(axis_handle.lines) == 0: return 0 else: return len(axis_handle.lines)-1
b5001da3325168e0f359596bfe65487708f59e3b
696,064
def read_all(port, chunk_size=200): """Read all characters on the serial port and return them.""" if not port.timeout: raise TypeError('Port needs to have a timeout set!') read_buffer = b'' while True: # Read in chunks. Each chunk will wait as long as specified by # timeout. Increase chunk_size to fail quicker byte_chunk = port.read(size=chunk_size) read_buffer += byte_chunk if not len(byte_chunk) == chunk_size: break return read_buffer
cf894c2449fa4eba763dc7bf4da86b0072a78a19
696,065
import base64 import pickle def encode_store_data(store_data): """ Encode store_data dict into a JSON serializable dict This is currently done by pickling store_data and converting to a base64 encoded string. If HoloViews supports JSON serialization in the future, this method could be updated to use this approach instead Args: store_data: dict potentially containing HoloViews objects Returns: dict that can be JSON serialized """ return {"pickled": base64.b64encode(pickle.dumps(store_data)).decode("utf-8")}
0a576a8146c0657610b508ebc6338d3ed6790b70
696,066
from typing import List import os def listImages(dir: str) -> List[str]: """ List all images (where zoom accepts: jpeg jpg png) in a directory. """ return [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f)) and f.endswith((".png", ".jpeg", ".jpg"))]
653ce307f9e2ebfd46dad9381ae2dedfdbc183dc
696,067
def dependabot_prs(repo): """Returns all repository PRs created by Dependabot.""" prs = repo.get_pulls(state="open") return [u for u in prs if "dependabot" in u.user.login]
3963c6e39f99257c0e259abc7fe6f6fdffd00404
696,068
import warnings def count_free_closing_brackets(input_string, start_index=0): """ DEPRECATED - No more needed since eggPlant 20.1.0 as it uses new list format and quotes string value in returned lists. The function has been used to handle lists returned by eggplant which contain unpaired closing brackets. Examples of returned lists: - eggPlant before 20.1.0 - (xyz, (1234,he(llo), abc))) - eggPlant after 20.1.0 - ["xyz", [1234,"he(llo)", "abc)"]] Returns count of free list closing brackets in the string starting from specified index. """ warnings.warn("The 'count_free_closing_brackets' function should not be needed since eggPlant 20.1.0" " as it uses new list format and quotes string value in returned lists", DeprecationWarning, 2) count_closing = 0 count_starting = 0 length = len(input_string) count_closing_all = 0 count_starting_all = 0 i_remember_closing = 0 i_remember_starting = 0 for i in range(start_index, length): if input_string[i] == ')': # is this bracket a list closure? if i < length - 1: # don't count next brackets on the string end - they're definitely list end # we need only brackets which might close a list - and they must be followed by these characters if not input_string[i + 1] in [')', ',']: i_remember_closing = i continue # alright, this seems to be a list closure count_closing += 1 # calculate list open brackets as well - we deduct them from all found closure brackets if input_string[i] == '(': if i > 0: # but don't check next brackets on the string beginning - they're definitely list start # we need only brackets which might start a list - and they must be followed by these characters if not input_string[i - 1] in ['(', ',']: i_remember_starting = i continue # alright, this seems to be a list start count_starting += 1 if i_remember_closing + i_remember_starting == 0: return count_closing - count_starting # Error handling for lists in lists and strings with brackets # e.g. "(3163,(302,336),(300,270,1828,990),(280,318,488,990),S Spandau DB-Berlin Westkreuz (Stadtbahn))" for i in range(0, length): if input_string[i] == ')': # check if this bracket is a list closure if i < length - 1: # but don't check next brackets on the string end - they're definitely list end # we need only brackets which might close a list - and they must be followed by these characters if not input_string[i + 1] in [')', ',']: continue # alright, this seems to be a list closure count_closing_all += 1 # calculate list open brackets as well - we deduct them from all found closure brackets if input_string[i] == '(': if i > 0: # but don't check next brackets on the string beginning - they're definitely list start # we need only brackets which might start a list - and they must be followed by these characters if not input_string[i - 1] in ['(', ',']: continue # alright, this seems to be a list start count_starting_all += 1 # alright, this seems to be a list start if count_starting_all != count_closing_all and input_string.count('(') == input_string.count(')'): if count_starting_all < count_closing_all and i_remember_starting > 0: count_starting += 1 elif i_remember_closing > 0: count_closing += 1 return count_closing - count_starting
826ed3233a68e3fdc279d1d72305fff9768d737c
696,069
def find_feature_by_gi(gid, record, ftype): """ Loops over the ftype features in the passed SeqRecord, checking db_xref qualifiers for a match to the passed gid. Returns the first feature identified, or None if no feature found. """ for feature in [f for f in record.features if f.type == ftype]: try: if 'GI:%s' % gid in feature.qualifiers['db_xref']: return feature except KeyError: continue return None
050c5464a8d425f5db53440abd79c64b2938f81b
696,070
from datetime import datetime def format_date(date_str): """ format experience summary api date to mm/dd/yyyy""" no_time = date_str[:10] formatted = datetime.strptime(no_time, '%Y-%m-%d') return formatted.strftime("%m/%d/%Y")
2e6f0739fe269ff828bc19c0754909062d160254
696,071
def loglik_nats(model, x): """Compute the log-likelihood in nats.""" return - model.log_prob(x).mean()
f929be38cb70fe56b6bb1a0e5cc21cf02fead3b6
696,072
def _none_not_equal_scalar(x, y): """ Determine if number not_equals none. Args: x: None. y: Number. Returns: bool, return True. """ return True
9e29c770f0f3003a61199b4e340a2341c9df0e9f
696,073
def _get_item(node): """ Returns the item element of the specified node if [the node] is not null. :param node: The node to extract the item from. :return: A node's item. """ return node.item if node is not None else None
42dff5ef2e98a0dd78b822ee29a75c72d737e23f
696,075
def countWords(speech): """Create a dictionary and count the occurrences of each word. If a word already exists in the dictionary, add 1 to its counter otherwise set a counter for to to an initial value of 1""" counts = {} for word in speech: if word in counts: counts[word] += 1 else: counts[word] = 1 return counts
630685207d57098c18f1084e60a8f1dd21c92638
696,076
def change_action_status(action_type, new_status): """ This function changes the status of an action type. """ # replace the last bit of a dot separate string with the new_status return "%s.%s" % ('.'.join(action_type.split('.')[:-1]) , new_status)
1032486b1f5b32a36806d397a68f42f549b6228c
696,077