content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def findFirstWorldFilename(worldsFilename):
"""Get the first world file name."""
file = open(worldsFilename)
worldFilename = file.readline().strip()
file.close()
return worldFilename | c590887b31e53b73b14370dd1aaaebe44afaadb9 | 33,859 |
def get_crc(msg, div='1100001100100110011111011', code='0'*24):
"""Cyclic Redundancy Check
https://gist.github.com/evansneath/4650991
Args:
msg: 需要校验的信息
div: 生成多项式
code: 需要校验的信息
Returns:
CRC码,默认使用 CRC-24Q 方法
"""
# Append the code to the message. If no code is given, default to '000'
msg += code
# Convert msg and div into list form for easier handling
msg = list(msg)
div = list(div)
# Loop over every message bit (minus the appended code)
for i in range(len(msg)-len(code)):
# If that message bit is 1, perform modulo 2 multiplication
if msg[i] == '1':
for j in range(len(div)):
# Perform modulo 2 multiplication on each index of the divisor
msg[i+j] = str((int(msg[i+j])+int(div[j]))%2)
# Output the last error-checking code portion of the message generated
return ''.join(msg[-len(code):]) | 19c1cbab94dc329f984b115181d4a0dd84cf3fcf | 33,862 |
def getpopularnodes(rankednodes, n):
"""
Returns n popular nodes
"""
nodelist = []
nodesizeratios = []
c = 0
if not n:
n = 30
for key in rankednodes:
if c < int(n):
nodelist.append(int(key))
nodesizeratios.append(float(rankednodes[key])/10)
c += 1
else:
break
print("Got popular nodes")
#print(nodelist)
return nodelist, nodesizeratios | 32180cf8f1ea74efbd9d6a2d4f216ff8d2c08d02 | 33,863 |
def templatize(names, claim):
"""Transform a claim into a regex-capable template."""
for name in names:
while name in claim:
claim = claim.replace(name, "name")
while "knave" in claim:
claim = claim.replace("knave", "k_id")
while "knight" in claim:
claim = claim.replace("knight", "k_id")
while " am " in claim:
claim = claim.replace(" am ", " is ")
return claim.lower() | 816d29786591740a0f928f94880ca08dd1b7f525 | 33,864 |
import re
def get_annual_bonus(string):
"""
A function to find the annual bonuses that you get with a particular card
"""
credits = 0
for line in string.split('\n'):
credit_words = ['saving', 'credits'] # we want to see these words
comparitive_words = ['higher', 'lower', 'worse', 'better'] # we don't want to see these words
if any(map(lambda x: x in line.lower(), credit_words)) and not any(map(lambda x: x in line, comparitive_words)):
try:
credits += max([float(credit[1:]) for credit in re.findall('\$\d+', line) if float(credit[1:]) >= 50])
except ValueError:
pass
return credits | 1856a67113e1ba4d67d77590924116c418f6ae12 | 33,866 |
import requests
import json
def createKolideLiveQuery(base_url, kolide_token, osquery_query, kolide_hosts=[], kolide_labels=[]) -> int:
"""
Input: Takes in Kolide base URL, Kolide JWT for auth, a query to run on endpoints,
optional list of hosts, optional list of labels.
Note: kolide_hosts or kolide_labels must be specified
Output: Returns Kolide query campaign ID
"""
auth_header = {
"Authorization": f"Bearer {kolide_token}"
}
json_payload = {
"query": f"{osquery_query}",
"selected": {
"Labels": kolide_labels,
"Hosts": kolide_hosts
}
}
url = f"{base_url}/api/v1/kolide/queries/run_by_names"
r = requests.post(url=url, data=json.dumps(json_payload), headers=auth_header, verify=False)
return r.json()["campaign"]["id"] | 24316b2a21d451e9f8fec846945840b6c6d7cffa | 33,867 |
import sys
def parse_sub_type(sub_type):
"""Gets file extension for subtitle type from mkvmerge -i"""
if sub_type == "HDMV PGS":
return ".sup"
elif sub_type == "SubStationAlpha":
return ".ass"
elif sub_type == "SubRip/SRT":
return ".srt"
elif sub_type == "VobSub":
# creates both a .sub and a .idx but only need idx
return ".idx"
else:
print("Error: Didn't get a known sub type - exiting")
sys.exit(1) | dc3b13069be6d04fb8a0ead1bbda6416801cc0c2 | 33,868 |
def prompt_input(prompt, default=None, interrupt=None):
"""
Prompt the user for [y]es/[n]o input, return a boolean accordingly.
Parameters
----------
prompt : str
The prompt text presented on-screen to the user.
default : {'y', 'yes', 'n', 'no'}, optional
The default response if the user presses return without entering
any text. if `None` (default), re-prompt the user until input is
provided.
interrupt : {'y', 'yes', 'n', 'no'}, optional
The default response if a `KeyboardInterrupt` is raised
(`CTRL + c` from the command line, "interrupt kernel/runtime"
from a Jupyter/Colab notebook). If `None` (default), the
exception is raised. Use with caution to avoid unexpected
behavior and improperly silencing errors.
Returns
-------
bool
The boolean value corresponding to the user input (`True` for
`'y'`/`'yes'`, `False` for `'n'`/`'no'`).
Notes
-----
The `default` value is reflected in the casing of options displayed
after the `prompt` text (e.g., "`[Y/n]`" if `default="yes"`)
"""
response_values = {
'yes': True,
'y': True,
'no': False,
'n': False
}
if interrupt is not None:
interrupt = interrupt.lower()
if interrupt not in response_values.keys():
raise ValueError(
f"'interrupt' must be one of {tuple(response_values.keys())}"
)
if default is not None:
default = default.lower()
try:
default_value = response_values[default]
except KeyError as e:
raise ValueError(
f"'default' must be one of: {tuple(response_values.keys())}"
) from e
response_values[''] = default_value
opts = '[Y/n]' if default_value else '[y/N]'
else:
opts = '[y/n]'
while True:
try:
response = input(f"{prompt}\n{opts} ").lower()
return response_values[response]
except KeyboardInterrupt:
if interrupt is not None:
return response_values[interrupt]
raise
except KeyError:
pass | e112a9566b5809a1552f01a08e2ab448142b20ed | 33,869 |
def get_image_url(date):
""" Return the URL to the frontpage of the NYT on a certain date.
:param date: a `datetime.date` object.
:returns: a `str`, the URL to the image of the frontpage of the NYT on the
requested date.
"""
_frontpage_url_template = ('http://www.nytimes.com/images'
'/{year}/{month}/{day}/nytfrontpage/scan.jpg')
return _frontpage_url_template.format(
year=date.year,
month=str(date.month).zfill(2),
day=str(date.day).zfill(2),
) | 3f845c695e2db210be032971cfd71dbc6db96c72 | 33,870 |
import numpy as np
from math import sqrt
def getTriggerProbability(htsimfile, num_det=4, test=False):
"""Takes a single simFile (from bcSim.simFiles(config.yaml)) and
returns the probability of hitting in each detector
Parameters
----------
self : simFile
test : run a quick test over a limited number of events (20)
Returns
----------
prob_det_info : 1x6 numpy array containing information about the
energy, angles and probability of hitting a given detector
"""
det_vol = np.zeros(num_det)
energy = htsimfile.energy
ze = htsimfile.ze
az = htsimfile.az
hits = htsimfile.getHits()
if test:
dotest = 20
else:
dotest = len(hits)
print("analyzing", len(hits), "events")
stat_err = len(hits)
for key, value in htsimfile.logDict.items():
for i in range(num_det):
if str(i) in value[1]:
det_vol[i] += 1
elif i == 0:
if '_' not in value[1]:
det_vol[0] += 1
prob_det_info = [energy, ze, az]
prob_det_info = np.append(prob_det_info, sqrt(len(hits))/float(len(hits)))
for i in range(num_det):
prob_det_info = np.append(prob_det_info, det_vol[i]/float(len(hits)))
return prob_det_info | 9cd67af761ec5270d65d39f5adedc23d18711829 | 33,871 |
import random
def partitionByState(ser, holdouts=1):
"""
Creates training and test indexes by randomly selecting
a indices for each state.
:param pd.DataFrame ser: Classes for instances
:param int holdouts: number of holdouts for test
:return list-object, list-object: test, train
"""
classes = ser.unique().tolist()
classes.sort()
test_idxs = []
for cls in classes:
ser_cls = ser[ser == cls]
if len(ser_cls) <= holdouts:
raise ValueError(
"Class %s has fewer than %d holdouts" %
(cls, holdouts))
idxs = random.sample(ser_cls.index.tolist(),
holdouts)
test_idxs.extend(idxs)
#
train_idxs = list(set(ser.index).difference(test_idxs))
return train_idxs, test_idxs | c98d7ee7d7ddeafa97285db9df339db1a01a188f | 33,872 |
def getRows(matrix, columns):
"""
Entrega las filas que no cubren la lista columns
Let go to iterate by row.
For each row we ask to the actual total column, if these contain the row
If the row is not contain, then ad to R
Else, nothing to do.
"""
# print(columns)
# exit()
R = []
row, col = matrix.shape
# print(f'matrix {columns}')
# columns = list(columns)
#print 'Las Filas y Columnas', row, col, len(columns)
for i in range(0, row):
# print(f'i,columns {type(i)} {type(columns)}')
if sum(matrix[i,columns]) == 0:
R.append(i)
# exit()
return R | d0aa257942aca4b046f1d67c986d1e913f9bc2a1 | 33,873 |
def mkcorpus(sentences):
"""
>>> sent_pairs = [("僕 は 男 です", "I am a man"),
("私 は 女 です", "I am a girl"),
("私 は 先生 です", "I am a teacher"),
("彼女 は 先生 です", "She is a teacher"),
("彼 は 先生 です", "He is a teacher"),
]
>>> pprint(mkcorpus(sent_pairs))
[(['\xe5\x83\x95',
'\xe3\x81\xaf',
'\xe7\x94\xb7',
'\xe3\x81\xa7\xe3\x81\x99'],
['I', 'am', 'a', 'man']),
(['\xe7\xa7\x81',
'\xe3\x81\xaf',
'\xe5\xa5\xb3',
'\xe3\x81\xa7\xe3\x81\x99'],
['I', 'am', 'a', 'girl']),
(['\xe7\xa7\x81',
'\xe3\x81\xaf',
'\xe5\x85\x88\xe7\x94\x9f',
'\xe3\x81\xa7\xe3\x81\x99'],
['I', 'am', 'a', 'teacher']),
(['\xe5\xbd\xbc\xe5\xa5\xb3',
'\xe3\x81\xaf',
'\xe5\x85\x88\xe7\x94\x9f',
'\xe3\x81\xa7\xe3\x81\x99'],
['She', 'is', 'a', 'teacher']),
(['\xe5\xbd\xbc',
'\xe3\x81\xaf',
'\xe5\x85\x88\xe7\x94\x9f',
'\xe3\x81\xa7\xe3\x81\x99'],
['He', 'is', 'a', 'teacher'])]
"""
return [(es.split(), fs.split()) for (es, fs) in sentences] | 9facba0aec7dc78862ee19ef9fdbd17a1aba1cde | 33,874 |
def compute_delay(receive_time_list, chunk_duration_list):
"""compute delay
Args:
receive_time_list (list): Time to receive each packet
chunk_duration_list (list): The audio duration corresponding to each packet
Returns:
[list]: Delay time list
"""
assert (len(receive_time_list) == len(chunk_duration_list))
delay_time_list = []
play_time = receive_time_list[0] + chunk_duration_list[0]
for i in range(1, len(receive_time_list)):
receive_time = receive_time_list[i]
delay_time = receive_time - play_time
# 有延迟
if delay_time > 0:
play_time = play_time + delay_time + chunk_duration_list[i]
delay_time_list.append(delay_time)
# 没有延迟
else:
play_time = play_time + chunk_duration_list[i]
return delay_time_list | d0d45bb9e46174a7ed46422122dff149cb2ab1c2 | 33,875 |
def get_times(ts_full, ts_system, len_state, sys_position, sys_length):
"""
This is a function specifically designed for TEDOPA systems. It calculates
the proper 'ts' and 'subsystems' input lists for :func:`tmps.evolve` from a
list of times where the full state shall be returned and a list of times
where only the reduced state of the system in question shall be returned.
ts then basically is a concatenation of ts_full and ts_system,
while subsystems will indicate that at the respective time in ts either
the full state or only a reduced density matrix should be returned.
Args:
ts_full (list[float]):
List of times where the full state including environment chain
should be returned
ts_system (list[float]):
List of times where only the reduced density matrix of the system
should be returned
len_state (int):
The length of the state
sys_position (int):
The position of the system (first site would be 0)
sys_length (int):
Length of the system, i.e. number of sites the system is
comprised of
Returns:
tuple(list[float], list[list[int]]):
Times and subsystems in the form that has to be provided to
:func:`tmps.evolve`
"""
ts = list(ts_full) + list(ts_system)
subsystems = [[0, len_state]] * len(ts_full) + \
[[sys_position, sys_position + sys_length]] * len(ts_system)
return ts, subsystems | 26495149a867ed9b42da4c0db288d81c4db350dc | 33,877 |
def find_distance(a, b, c):
"""Determine if distance between three ints is equal
assuming unsorted entries"""
int_holder = [a, b, c]
int_holder.sort()
distance_1 = int_holder[1] - int_holder[0]
distance_2 = int_holder[2] - int_holder[1]
if distance_1 == distance_2:
return 'They are equally spaced'
return None | 3e488b631c248de3069f4167f157022358114852 | 33,879 |
def _get_value_by_key(headers, key1, key2=None):
"""Return value by two-level keys where the second key is optional
Example
-------
>>> headers
{'s0': {'legend': {'_val': 'Potential Energy (kJ/mol)'}},
'subtitle': {'_val': 'T = 310 (K) \\xl\\f{} state 38: (coul-lambda,
vdw-lambda) = (0.9500, 0.0000)'}}
>>> _get_value_by_key(header, 's0','legend')
'Potential Energy (kJ/mol)'
>>> _get_value_by_key(header, 'subtitle')
'T = 310 (K) \\xl\\f{} state 38: (coul-lambda, vdw-lambda) = (0.9500, 0.0000)'
"""
val = None
if key1 in headers:
if key2 is not None and key2 in headers[key1]:
val = headers[key1][key2]['_val']
else:
val = headers[key1]['_val']
return val | c43dd320b2ba61d1e776d8e018905e9bdf1e5456 | 33,881 |
def read_playlist(filename):
"""
Input: filename of CSV file listing (song,artist,genre) triples
Output: List of (song,artist,genre)
"""
playlist = []
for line in open(filename):
bits = [b.strip() for b in line.split(',')]
playlist.append(bits)
return playlist | efb8a03e3539c6b3b63ab8a806453e4bc04469c2 | 33,883 |
import re
def clean_string(varStr):
""" sanitize string for use as a variable name"""
return re.sub("_+", "_", re.sub("\W|^(?=\d)", "_", varStr)) | 53417a3ea1b38c68428b3477eed77c2e4c208058 | 33,886 |
def _parser_setup(parser_obj, value, reset_default=False):
"""Add argument to argparse object
Parameters
----------
parser_obj : object
argparse object
value : dict
argparse settings
reset_default : bool
boolean that defines if default values should be used
Returns
-------
parser_obj : object
updated argparse object
"""
if reset_default:
default = None
else:
default = value["default"]
if value["action"] is None:
parser_obj.add_argument(
*value["tag"],
help=value["help"],
default=default,
choices=value["choices"],
)
else:
parser_obj.add_argument(
*value["tag"],
help=value["help"],
default=default,
action=value["action"],
)
return parser_obj | 58d1ccde88a6ada8b7f4a09d50d083e3e86677bd | 33,887 |
def get_exports(pe):
"""Gets exported symbols.
@return: exported symbols dict or None.
"""
exp_list = []
if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"):
for exported_symbol in pe.DIRECTORY_ENTRY_EXPORT.symbols:
if exported_symbol.name is not None:
export_name = exported_symbol.name.decode('utf-8',errors='ignore')
else:
export_name = 'n/a'
exp_list.append({
"address": hex(pe.OPTIONAL_HEADER.ImageBase +
exported_symbol.address),
'name': export_name,
"ordinal": exported_symbol.ordinal})
return exp_list | 2a2bf6c9aa26dbaca5bd38c5630b876c48ebd6af | 33,888 |
from typing import IO
def constraints_cmd(file: IO[str]):
"""Check constraints.
"""
return ('constraints', {'file': file}) | bf47038edfa49e94f10aaf9b65fd6d54679fe956 | 33,889 |
def _occ_and_virt(log):
"""
Extract the number of occupied and empty orbitals from a logfile
"""
norb = log.log['Total Number of Orbitals']
if log.log['Spin treatment'] == 'Averaged':
norbv = log.evals[0].info[0]-norb
return (norb,), (norbv,)
elif log.log['Spin treatment'] == 'Collinear':
mpol = log.log['dft']['mpol']
norbu = int((norb+mpol)/2)
norbd = norb-norbu
norbvu = log.evals[0].info[0]-norbu
norbvd = log.evals[0].info[0]-norbd
return (norbu, norbd), (norbvu, norbvd)
else:
raise ValueError('Information for the orbitals to be implemented') | 5712a819b007a312584c74a1985cffc6cea8b4ed | 33,890 |
def testAnalysis(data):
"""
Test Process to sum the data from the csv
"""
new_data = data.sum()
return new_data | 55a079b94b3819e2c1b2be3aac9027c922bfddba | 33,891 |
import argparse
def default_argument_parser():
"""
Create a parser with some common arguments used by users.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--config_file", default="", metavar="CONFIG_FILE", help="path to config file")
parser.add_argument('-v', '--version', help='output version information', action="store_true")
return parser | a18917850bfda42dd925b38fd69f875b75d28f87 | 33,892 |
def map_string_to_int(s: list) -> object:
"""
:rtype: list
"""
temp_dict = {}
counter = 0
for item in s:
if counter > 0 and item in temp_dict:
continue
else:
counter = counter + 1
temp_dict[item] = counter
k = []
for item in s:
k.append(temp_dict[item])
return k | 870e97a54a03baaaebe0fa7edd884d666fc1fa10 | 33,893 |
from typing import Any
import yaml
def parse_yaml_string(string: str) -> Any:
"""Parse a YAML string and raise a ValueError if parsing failed.
This method is needed because :meth:`prompt` requires a ``ValueError``
to repeat falied questions.
"""
try:
return yaml.safe_load(string)
except yaml.error.YAMLError as error:
raise ValueError(str(error)) | 1aaedd65eb374bead33119280a2fdc2cb6b15baa | 33,896 |
def full_email(service_account):
"""Generate the full email from service account"""
return "{0}@{1}.{2}".format(service_account.name, service_account.project,
service_account.suffix) | 8b5305f794fd59b24adfefca1338db594fb799bc | 33,897 |
def get_cpu_temp() -> float:
"""Get the core temperature.
Run a shell script to get the core temp and parse the output.
Raises:
RuntimeError: if response cannot be parsed.
Returns:
float: The core temperature in degrees Celsius.
"""
with open('/sys/class/thermal/thermal_zone0/temp') as file:
temp_str = file.read()
try:
return int(temp_str) / 1000
except (IndexError, ValueError,) as e:
raise RuntimeError('Could not parse temperature output.') from e | f2f89ba4449fc88a92a517de41eaf83c84083687 | 33,899 |
def format_parts_id(data):
"""
format canonical representation of ATT&CK/MBC parts and ID
"""
return "%s [%s]" % ("::".join(data["parts"]), data["id"]) | cdb1f5a6b15a7f6a03c2f8aa11d28d257dc2f219 | 33,901 |
import os
import sqlite3
def register_user(name, device1, number, path):
"""
ユーザ情報の登録
Parameters
----------
name : str
ユーザ名
device1 : str
idm
number : str
学生証番号/教員・職員証番号
path : str
homeディレクトリまでのパス
Returns
-------
success : bool
登録に成功(true)したか失敗(false)したか
"""
path_db = os.path.join(path, 'data', 'list.db')
if "" in [name, device1, number]:
return False
with sqlite3.connect(path_db) as conn:
cur = conn.cursor()
cur.execute('select max(id) from miyano')
_id = cur.fetchall()[0][0] or 0 # 初期登録時は0でいいのか問題は顕在。この処理で不適合ある?
try:
cur.execute('insert into miyano (id, name, device1, number) values(?, ?, ?, ?)', (_id+1, name, device1, number))
conn.commit()
success = True
except sqlite3.IntegrityError as e:
success = False
cur.close()
return success | 8359de3abb4c953700cc6191fb3f98f7e925e1ae | 33,903 |
def _guess_max_plate_nesting(model_trace):
"""
Guesses max_plate_nesting by using model trace.
This optimistically assumes static model
structure.
"""
sites = [site for site in model_trace.values() if site["type"] == "sample"]
dims = [
frame.dim
for site in sites
for frame in site["cond_indep_stack"]
if frame.dim is not None
]
max_plate_nesting = -min(dims) if dims else 0
return max_plate_nesting | 2be50e37689e8eda9841fbbd43c395c08c369ddb | 33,904 |
def is_sum(n, power):
"""Returns whether n is equal to the sum of its digits to the given power"""
if n == 1:
return False
return n == sum([int(d)**power for d in str(n)]) | 480c3ca2a4afbc3836c40620152aff020d351cde | 33,906 |
import argparse
def create_parser():
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mod", help="modality", required=True)
parser.add_argument("-ks", "--knns", help="a list of knns", nargs="+", required=True)
parser.add_argument("-sn", "--subsample_times", help=">1", type=int, required=True)
parser.add_argument("-tag", "--input_name_tag", help="input_name_tag", required=True)
return parser | c9203a7c7804e67014055c273732141d969caf36 | 33,908 |
import inspect
def get_current_method_name():
"""Auxiliary function to not to do DRY"""
return inspect.stack()[1][3] | a03deb71850b6bf3b6fb29b3616538c7f80aecf0 | 33,909 |
def medium_sized_content():
"""the rename algorithm doesn't work well on content that's too small"""
contents = 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
contents += 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
contents += 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
contents += 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
return contents | 82bbd3df8fe8d8750fa8d6e9ccc532316418eb8c | 33,910 |
def ExtractWordsFromLines(lines):
"""Extract all words from a list of strings."""
words = set()
for line in lines:
for word in line.split():
words.add(word)
return words | 1b5d53466b495a578b8656f606093fedadf56161 | 33,911 |
def jsonpath_to_variable(p):
"""Converts a JSON path starting with $. into a valid expression variable"""
# replace $ with JSON_ and . with _
return p.replace('$', 'JSON_').replace('.', '_') | ee09b0a6d0a24c414f446d7ef18edb3ef118fd1e | 33,912 |
def _is_scrolled_into_view(driver, element, fully_in_view=True):
"""Returns True if the element is scrolled into view, False otherwise
Currently, Selenium doesn't offer a means of getting an element's location
relative to the viewport, so using JavaScript to determine whether the
element is visible within the viewport.
:param driver: Selenium WebDriver object
:param element: WebElement for the element to check
:param fully_in_view: (Default = True) If True, check that the element is
fully in view and not cut off. If False, check that it's at least
partially in view
:return: True if the element is scrolled into view, False otherwise
"""
# the JavaScript used to check if the element is in view.
script_string = '''
return function(el, strict) {
var rect = el.getBoundingClientRect();
var elemTop = rect.top;
var elemBottom = rect.bottom;
if (strict)
var isVisible = (elemTop >= 0) && (elemBottom <= window.innerHeight);
else
isVisible = elemTop < window.innerHeight && elemBottom >= 0;
return isVisible;
}(arguments[0],arguments[1])
'''
return driver.execute_script(script_string, element, fully_in_view) | e83a93a18024e185c6cbfce73dc6af6ebc35ca48 | 33,913 |
import logging
def drop_null_columns(df, threshold=0.01):
"""drop columns with null values more threshold % or higher of the total rows
:return: dataframe
"""
df_clean = df.loc[:, df.isna().sum() < df.shape[0]*threshold]
logging.info(f"{df.shape[1] - df_clean.shape[1]} null columns dropped: {list(df.columns[df.isna().sum() >= df.shape[0]*threshold])}")
return df_clean | c11b06327e70b6c64719a00e672b5d93cabf26d0 | 33,914 |
def district_str_form(name):
"""
Функция принимает на вход текстовую строку name и проводит ее к виду, в котором названия районов написаны на сайте ЦИК
(каждое отдельное слово, а также части слова через дефис с заглавный буквы, остальные строчные).
Возвращает строчку в новом виде.
"""
good_str = []
for word in name.split():
if len(word) > 1:
good_substr = []
for subword in word.split('-'):
if len(subword) > 1:
good_substr.append(subword[0].upper() + subword.lower()[1:])
else:
good_substr.append(subword.upper())
good_str.append('-'.join(good_substr))
else:
good_str.append(word.upper())
return ' '.join(good_str) | a188876d593f7c0070066ae97bef92e5d9efb153 | 33,916 |
import random
import string
def generate_job_id() -> str:
"""Returns a job_id of the form 'raysubmit_XYZ'.
Prefixed with 'raysubmit' to avoid confusion with Ray JobID (driver ID).
"""
rand = random.SystemRandom()
possible_characters = list(
set(string.ascii_letters + string.digits)
- {"I", "l", "o", "O", "0"} # No confusing characters
)
id_part = "".join(rand.choices(possible_characters, k=16))
return f"raysubmit_{id_part}" | 112c81e35ee75f4486bc68d20c4c97e66b6ece22 | 33,917 |
def quote_message(body: str, message):
"""Construct a body (with a signature) and a quoted reply."""
original = body.split("\n")
original.append("")
original.append(message.conversation.sender_name)
original.append("CEO, %s" % message.conversation.domain.company_name)
reply = []
reply.append(
"On %s, %s wrote:"
% (message.timestamp.strftime("%d/%m/%Y %H:%M %p"), message.sender_name)
)
reply.extend(["> " + line for line in message.best_body.split("\n")])
return "\n".join(original), "\n".join(reply) | bbff4d670f94768aba6c5dff6a309abf9fb0c238 | 33,918 |
def order_search(target, items):
"""
顺序搜索,常用遍历方法, O(n)
:param target:
:param items:
:return:
"""
position = 0
while position < len(items):
if target == items[position]:
return position
position += 1
return '404 Not FOUND!:(' | 89bb98a21be16725897a7e248a2b7c9240c9ed7a | 33,919 |
def print_rangoli(size):
"""Prints a rangoli of variable length size.
Starts with an a in the middle, and then goes up a letter for each column
and row offset off the middle.
"""
rangoli = ""
width = 4 * size - 3
for row in range(1, size * 2):
line = ""
for column in range(1, size * 2):
if row == size and column == size:
line = line + "a-"
else:
row_offset = abs(row - size)
col_offset = abs(column - size)
total_offset = row_offset + col_offset
if total_offset < size:
line = line + chr(ord("a") + total_offset)
# Only add a dash between characters if not last column
if column != size * 2 - 1:
line = line + "-"
rangoli = rangoli + line.center(width, "-") + "\n"
return rangoli | fcdff78791bb9ebf554161edaa7acc531b0ce940 | 33,920 |
def extract_cytoplasm(sln):
"""Given a solution from segmentation
return a BW image corresponding to cytoplasm
"""
return sln > 0.0 | 87a88d337023c748284cbe51119fa45dc9f54770 | 33,921 |
import requests
def make_request(request):
"""
HTTP Cloud Function that makes another HTTP request.
Args:
request (flask.Request): The request object.
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/0.12/api/#flask.Flask.make_response>.
"""
# The URL to send the request to
url = 'http://example.com'
# Process the request
response = requests.get(url)
response.raise_for_status()
return 'Success!' | 172f4db34f3e3e25ead1e269159c5f0f08cdf9a4 | 33,922 |
import yaml
def get_yaml_tests(test_suites, setup=None):
"""Provide a yaml representation for the parameters obtained from an
infrared test.yml file.
:param test_suites: Suites to be run
:type test_suites: list
:param setup: Source of setup packages
:type setup: str
"""
test_dict = {'tests': []}
for suite in test_suites:
test_dict['tests'].append(f"/path/to/suite/{suite}.yml")
if setup:
test_dict['setup'] = setup
return yaml.dump({'test': test_dict}) | 9270360bb408f015bec9b3ea46f360807f60505d | 33,923 |
import os
from re import VERBOSE
def parse_result_file(result_file: str) -> float:
""" """
if not os.path.isfile(result_file):
if VERBOSE:
print(result_file + ' does not exist!')
return 100000
with open(result_file, 'r') as f:
tmp = f.readlines()[0]
miou = tmp.split('/')[2].split(' ')[-1]
miou = float(miou) * 100
# miou = "{:.1f}".format(miou)
return miou | 67aecaad97893999c2e19a129353cfd3bc437c9a | 33,924 |
def transform_proteinid(df):
"""
get back the protein name without genomeid
"""
for index, row in df.iterrows():
protein_id = row.protein_id.split('--')[0]
df.at[index, 'protein_id'] = protein_id
return df | e46ccfa3088ad08065be7acb9c64fdc7f9b8a0b4 | 33,925 |
import urllib.request, zipfile
import os
def fetch_dataset(dataset_id: str, output_dir='~/.ancp-bids/datasets'):
"""Downloads and extracts an ancpBIDS test dataset from Github.
Parameters
----------
dataset_id :
The dataset ID of the ancp-bids-datasets github repository.
See `https://github.com/ANCPLabOldenburg/ancp-bids-dataset` for more details.
output_dir :
The output directory to download and extract the dataset to.
Default is to write to user's home directory at `~/.ancp-bids/datasets`
Returns
-------
The path of the extracted dataset.
"""
output_dir = os.path.expanduser(output_dir)
output_dir = os.path.abspath(os.path.normpath(output_dir))
output_path = os.path.join(output_dir, dataset_id)
if os.path.exists(output_path):
return output_path
os.makedirs(output_path)
download_file = f'{dataset_id}-testdata.zip'
download_path = os.path.join(output_dir, download_file)
if os.path.exists(download_path):
return output_path
url = f'https://github.com/ANCPLabOldenburg/ancp-bids-dataset/raw/main/{download_file}'
with urllib.request.urlopen(url) as dl_file:
with open(download_path, 'wb') as out_file:
out_file.write(dl_file.read())
z = zipfile.ZipFile(download_path)
z.extractall(output_dir)
return output_path | 5b25087fd884ef8c76b4b0a5755af8557e51b3d6 | 33,926 |
def checkLftToRht(l=[0, 1, 2, 3], spliceA=2, spliceB=9):
"""
task 0.5.13
show what happens when the len. of the left hand side list
does not match the len. of the right hand side
"""
return l[spliceA:spliceB] | 40703b2ac67eca1f2209e8c7ab52ff059bed59a4 | 33,927 |
def split_string(string, length):
""" function to split a string into differnt lengths or chunks"""
return [string[max(i - length, 0):i] for i in range(len(string), 0, -length)][::-1] | a0837366e1b5b6276863720e287c0909f051f159 | 33,928 |
def read_coordinates(coordinate_file):
""" Reads coordinates from file and creates coordinates list """
coordinates = []
for line in coordinate_file:
node_id, x, y = line.split(" ")
coordinates.append((float(x), float(y)))
return coordinates | 5566605fccdc978eb0bf3db1b94ff1a953b97fe2 | 33,930 |
def sformat(text, args):
"""
sformat
"""
args = args if args else {}
for key in args:
text = text.replace("{%s}" % key, "%s" % (args[key]))
return text | 726f88e471d374b0a40b1f20ca8484fbed611172 | 33,932 |
import argparse
import json
def parse_args(args):
"""Parse arguments from the command line"""
DESCRIPTION = "Update the dependencies of a local Helm Chart in a project repository." # noqa: E501
parser = argparse.ArgumentParser(description=DESCRIPTION)
subparsers = parser.add_subparsers()
version_parser = subparsers.add_parser( # noqa: F841
"version", help="Print the version and exit"
) # noqa: E501
run_parser = subparsers.add_parser(
"run", help="Update the dependencies of a helm chart"
)
run_parser.add_argument(
"chart", type=str, help="Name of the local Helm Chart to be updated."
)
run_parser.add_argument(
"dependencies",
type=json.loads,
help="""A dictionary of Helm Chart dependencies and their host repo URLs.
E.g. '{"nginx-ingress":
"https://raw.githubusercontent.com/helm/charts/master/stable/nginx-ingress/Chart.yaml"}'
""",
)
run_parser.add_argument(
"--dry-run",
action="store_true",
help="Perform a dry run of the update. Don't write the changes to a file.", # noqa: E501
)
run_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Option to turn on logging.", # noqa: E501
)
return parser.parse_args() | 7f07f42d982c6c7e0f9b99987a35d7d79884e930 | 33,933 |
from typing import List
import re
def re_findall(raw_str: str) -> List[str]:
"""
>>> re_findall('rabcdeefgyYhFjkIoomnpOeorteeeeet')
['ee', 'Ioo', 'Oeo', 'eeeee']
"""
pattern = re.compile(
'(?<=[qwrtypsdfghjklzxcvbnm])([aeiou]{2,})[qwrtypsdfghjklzxcvbnm]',
re.I)
return pattern.findall(raw_str) or ['-1'] | dea08d2b539362e835ed91413935acf1f8ed6029 | 33,934 |
import requests
def get_histohour(fsym = 'BTC', tsym = 'USD', e = 'CCCAGG', limit = 1920, optional_params = {}):
"""Gets hourly pricing info for given exchange
Args:
Required:
fsym (str): From symbol
tsym (str): To symbol
e (str): Name of exchange
Optional:
extraParams (str): Name of app
sign (bool): if true, server will sign requests
tryConvention (bool): if false, get values without conversion
aggregate (int):
limit (int): default: 168, max: 2000 (new default: 1920 (80 days))
toTs (timestamp):
Valid exchanges (e):
Cryptsy, BTCChina, Bitstamp, BTER, OKCoin, Coinbase, Poloniex, Cexio, BTCE, BitTrex, Kraken,
Bitfinex, Yacuna, LocalBitcoins, Yunbi, itBit, HitBTC, btcXchange, BTC38, Coinfloor, Huobi,
CCCAGG, LakeBTC, ANXBTC, Bit2C, Coinsetter, CCEX, Coinse, MonetaGo, Gatecoin, Gemini, CCEDK,
Cryptopia, Exmo, Yobit, Korbit, BitBay, BTCMarkets, Coincheck, QuadrigaCX, BitSquare,
Vaultoro, MercadoBitcoin, Bitso, Unocoin, BTCXIndia, Paymium, TheRockTrading, bitFlyer,
Quoine, Luno, EtherDelta, bitFlyerFX, TuxExchange, CryptoX, Liqui, MtGox, BitMarket, LiveCoin,
Coinone, Tidex, Bleutrade, EthexIndia, Bithumb, CHBTC, ViaBTC, Jubi, Zaif, Novaexchange,
WavesDEX, Binance, Lykke, Remitano, Coinroom, Abucoins, BXinth, Gateio, HuobiPro, OKEX
Returns:
history (str) hourly price history from fsym to tsym
"""
url = "https://min-api.cryptocompare.com/data/histohour"
params = {'fsym':fsym, 'tsym':tsym, 'e': e, 'limit': limit}
for k,v in optional_params.items():
params[k] = v
r = requests.get(url = url, params = params)
return r.text | 0a40d62685d438d0f2bd896a2b905e8e66d56a87 | 33,936 |
def last_early(my_str):
""" CHECK IF ABSOLUTE DISTANCE IS CLOSE OR FAR AWAY {CLOSE = 1, FAR >=2}
:param:my_str
:type:string
:return if letter exists not just in the end
:rtype:bool
"""
last_letter = my_str[-1].lower()
return last_letter in my_str[:len(my_str)-1].lower() | 8b7aed52542919461b7c7a6d149f5513a08c0dd3 | 33,938 |
def get_ec_filter(args, raw_config_dict):
"""Retrieve EC numbers to retrict the scrape to.
:param args: cmd-line arguments parser
:param raw_config_dict: dictionary of content from YAML config file
Return set containing user specificed EC numbers.
"""
ecs = []
if raw_config_dict is not None:
try:
if raw_config_dict["ECs"] is not None:
ecs += raw_config_dict["ECs"]
except KeyError:
pass
if args.ec is not None:
ecs += (args.ec).split(",")
i = 0
for i in range(len(ecs)):
ecs[i] = ecs[i].replace("EC", "")
ecs[i] = ecs[i].replace("ec", "")
ec_filter = set(ecs)
return ec_filter | f84de0f6a200c1569c46f38550ae439704a9b6ba | 33,939 |
import subprocess
def run_command(command):
"""
Run the given command which should be a list like `subprocess.Popen`
expects. stdout is captured and returned by this function. If the command
exit code is not 0, `subprocess.CalledProcessError` will be raised.
"""
return subprocess.check_output(command) | 5d575aab34b59b00f701a35138dd37b94baffcd1 | 33,940 |
def TypeSetter(constructor=None):
"""Returns function that takes obj, field, val and sets obj.field = val.
constructor can be any callable that returns an object.
"""
if constructor:
def setter(obj, field, val):
setattr(obj, field, constructor(val))
else:
def setter(obj, field, val):
setattr(obj, field, val)
return setter | d42cc9320478d723db8437f38d34d3a2307d6b14 | 33,942 |
def tool_key(tool_output):
"""The key of the handler is always the first argument, which we assume is the invocation (e.g. python)"""
return tool_output.args[0] | 5859ca2494a7809fdf5ba23c5b23b49ba6605d6f | 33,943 |
def get_accuracy(predictions, real_values):
"""
:param predictions: vector of predictions
:param real_values: true values
:return: accuracy
"""
acc = 0
for first, second in zip(predictions, real_values):
if first == second:
acc += 1
return acc / len(predictions) | eb4a58bf6608ce981f18e5d489d7d6fb5fe267a3 | 33,944 |
def num2str(num):
"""
pass big number to thousands
"""
string = ''
while num/1000 >= 1:
string += 'K'
num = num/1000
string = str(int(num)) + string
return string | 3bb9dbd56723de941839825036406011d89f1438 | 33,945 |
def is_sorted(list_):
"""
Return True iff list_ is in non-decreasing order.
@param list list_: list to inspect
@rtype bool:
>>> is_sorted([1, 3, 5])
True
>>> is_sorted([3, 1, 5])
False
"""
for j in range(1, len(list_)):
if list_[j - 1] > list_[j]:
return False
return True | 744aedacdedff9ff5453049e3b4631eb02e11178 | 33,946 |
def get_app(app):
"""
Wrap HTTP app into HTTPS.
"""
def https_app(environ, start_response):
"""
HTTPS app.
"""
environ['wsgi.url_scheme'] = 'https'
return app(environ, start_response)
return https_app | 2aa3a7754c84ea72f325dda4b2fec971b98cac2b | 33,947 |
def monthdelta(date, delta):
"""
Method to get a delta of Months from a provided datetime
From this StackOverflow response:
http://stackoverflow.com/questions/3424899/whats-the-simplest-way-to-subtract-a-month-from-a-date-in-python
Arguments:
date datetime: Date to be modified
delta int: delta value
Returns:
datetime: The datetime with the month delta applied
"""
m, y = (date.month + delta) % 12, date.year + (date.month + delta - 1) // 12
if not m:
m = 12
d = min(date.day, [31,
29 if y % 4 == 0 and not y % 400 == 0
else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][m - 1])
return date.replace(day=d, month=m, year=y) | aeb32e2f278a9a455b06f4e24b2b84f81860d139 | 33,948 |
def is_string_ipv4(string):
"""
判断一个字符串是否符合ipv4地址规则
:param string: 输入的字符串
:return: 一个元祖: (逻辑结果, ipv4 string 或 None)
"""
string = string.strip()
seg = string.split('.')
if len(seg) != 4:
return False, None
else:
try:
if not all([_si.isdigit() and -1 < int(_si) < 256
for _si in seg]):
return False, None
return True, string
except ValueError:
return False, None | adfc59b3e87359fb194070d7dcb5b1e7fcde49b1 | 33,949 |
def find_sum_pair(numbers, target):
"""Find a pair of numbers from a list that sum to the target value"""
for ix, x in enumerate(numbers):
for iy, y in enumerate(numbers):
if ix != iy and x + y == target:
return x, y
return None | b0bd08deb88bd95f3b50fd23bb8cfb371a7483a1 | 33,950 |
async def api_bookmark_add(info):
"""
Batch rename tags.
If path is specified, only rename tags underneath that path (including the directory
itself).
Since there may be a lot of tags to rename, this renames a block of tags and returns
the IDs that were edited. Call this repeatedly until no more tags are modified.
"""
path = info.data.get('path', None)
from_tag = info.data['from']
to_tag = info.data['to']
if path is not None:
path = info.manager.resolve_path(path)
info.manager.check_path(path, info.request, throw=True)
media_ids = info.manager.library.batch_rename_tag(from_tag, to_tag, paths=[path] if path else None, max_edits=100)
return { 'success': True, 'media_ids': media_ids } | c5ae7a85eb233b82a3a0cf779fead981c6a46ec5 | 33,951 |
import os
def reference_lists(find_lines):
"""Return list of pathnames containing mp3 or m4a, and the sizes of
each file (in the same order.
"""
pathnames = []
sizes = []
for line in find_lines:
if 'mp3' in line or 'm4a' in line:
pathnames.append(line)
s = os.path.getsize(line)
sizes.append(s)
return [pathnames, sizes] | d139aa040be6b4d7e54f4e8cf0854c43c76b9dd0 | 33,952 |
def numDateToYmd(numDate):
"""Convert numeric date (decimal year) to integer year, month, day"""
year = int(numDate)
isLeapYear = 1 if (year % 4 == 0) else 0
# Get rid of the year
numDate -= year
# Convert to Julian day
daysInYear = 366 if isLeapYear else 365
jDay = int(numDate * daysInYear) + 1
if (jDay > 334 + isLeapYear):
month, day = 11, (jDay - 334 - isLeapYear)
elif (jDay > 304 + isLeapYear):
month, day = 10, (jDay - 304 - isLeapYear)
elif (jDay > 273 + isLeapYear):
month, day = 9, (jDay - 273 - isLeapYear)
elif (jDay > 243 + isLeapYear):
month, day = 8, (jDay - 243 - isLeapYear)
elif (jDay > 212 + isLeapYear):
month, day = 7, (jDay - 212 - isLeapYear)
elif (jDay > 181 + isLeapYear):
month, day = 6, (jDay - 181 - isLeapYear)
elif (jDay > 151 + isLeapYear):
month, day = 5, (jDay - 151 - isLeapYear)
elif (jDay > 120 + isLeapYear):
month, day = 4, (jDay - 120 - isLeapYear)
elif (jDay > 90 + isLeapYear):
month, day = 3, (jDay - 90 - isLeapYear)
elif (jDay > 59 + isLeapYear):
month, day = 2, (jDay - 59 - isLeapYear)
elif (jDay > 31):
month, day = 1, (jDay - 31)
else:
month, day = 0, jDay
return year, month, day | 1893e1e569af46204f96a0096ec29b03f3bd206c | 33,953 |
import os
def where():
"""
Return the installation location of BrewData
"""
f = os.path.split(__file__)[0]
return os.path.abspath(f) | 8cd7ab93a0b792f7a0797cdb62cd27fdda00535d | 33,954 |
def file_available(file_cache, ip, path):
""" Returns {token: <token>, ready: <bool>}
"""
return file_cache.file_available(ip, path) | a18f6179cc8f15b604d97187e1694778f65a1ef2 | 33,956 |
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
def get_parser():
"""Build parser object"""
parser = ArgumentParser(description='First level analysis',
formatter_class=RawTextHelpFormatter)
parser.add_argument('feat_root', action='store',
help='root folder where all feat folders sit')
parser.add_argument('output_dir', action='store',
help='derivatives folder')
parser.add_argument('-S', '--subject', action='store')
parser.add_argument('--whitelist', action='store',
help='white list file with subject ids')
parser.add_argument('--nprocs', action='store', type=int, default=16)
return parser | aae73df4898a79bf57de79103c4370eb7cba1cb4 | 33,957 |
def covert_if_not_ascii(value):
"""converts a value if it is not a ascii supported str"""
try:
value.encode("ascii")
return value
except (AttributeError, UnicodeEncodeError):
return str(value) | 27456f80710aa7247b9ad3d492dae81c312c2537 | 33,959 |
def get_timesteps(trajectory):
"""
Determine valid timesteps for the associated hdf5 dump
Parameters
----------
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
Returns
-------
A tuple consisting of three entries. The first is a sorted list of
integers for every valid timestep.
The second and third are for convenience and represent the start and
end time of the run. (First and last timesteps.)
"""
trajectory_times = sorted([int(k) for k in trajectory['id'].keys()])
start_time = trajectory_times[0]
end_time = trajectory_times[len(trajectory_times)-1]
return(trajectory_times, start_time, end_time) | 01afcb93c9f226872cef342d048550f17820d9db | 33,962 |
def is_range(obj):
"""Helper function to test if object is valid "range"."""
keys = ['start', 'step', 'stop']
return isinstance(obj, dict) and all(k in obj for k in keys) and \
all(isinstance(obj[k], float) for k in keys) | 4bb1d210ebb0a7265671b3d7070912052f71601e | 33,963 |
from dateutil import tz
from datetime import datetime
def utc_to_est(utc_string):
"""returns a 12h formatted local time from a UTC data time string"""
from_zone = tz.gettz("UTC")
to_zone = tz.gettz("America/New_York")
utc = datetime.strptime(utc_string, "%Y-%m-%dT%H:%MZ")
utc = utc.replace(tzinfo=from_zone)
est = utc.astimezone(to_zone)
est_12h = est.strftime("%I:%M %p")
return est_12h | 894d0dd2f0170d2981adfa46f191752462fd6eba | 33,964 |
import six
import os
def get_pid_tid():
"""Returns a string containing the current process and thread id in the format "(pid=%pid) (tid=%tid)".
@return: The string containing the process and thread id.
@rtype: six.text_type
"""
# noinspection PyBroadException
try:
return "(pid=%s) (tid=%s)" % (
six.text_type(os.getpid()),
six.text_type(six.moves._thread.get_ident()),
)
except:
return "(pid=%s) (tid=Unknown)" % (six.text_type(os.getpid())) | 5f5ead7f1cd6e2863e11b8b00f5872cc7f536dcc | 33,965 |
import torch
def index2row(idx):
"""
Convert (B,N,...) to (B*N*...) and add B*N into indexs
Parameters
----------
idx: (batch_size, num_points, ...)
Returns
-------
one_row: (batch_size*num_dims*...)
"""
idx_shape = idx.shape
B, N = idx_shape[:2]
idx_base = torch.arange(0, B, device=idx.device).view(-1, *[1]*(len(idx_shape)-1)) * N # if len(idx_shape) = 3, .view(-1, 1, 1)
idx = idx + idx_base
idx = idx.view(-1)
return idx | abefe4e632e1a9f5928c2f9a39bf20dd0a9e9c2b | 33,966 |
def on_delete_callbacks(myself, req, name):
"""Customizible function to handle DELETE /callbacks"""
# return True if callback has been processed
return False | 1089ea455b671c9805b643a3a58ba035cbbe2721 | 33,967 |
def __find_team(teams, team_tricode):
"""
Auxilary function to find the team that matches the tricode.
Args:
teams: list of NBA teams.
team_tricode: the tricode of the given team.
Returns:
corresponding team for the given tricode
"""
#team = key(full team names, example: Toronto Raptors) in the dictionary of teams
for team in teams:
if(teams[team].get_tricode() == team_tricode):
#by returning team we are returning the name of the key in the list of teams which ends
#up being the full name of the team
return team | 7bc9973a7925cb2a8d88915b707cfd2aa40e143b | 33,968 |
def filter_req(req, extra):
"""Apply an extra using a requirements markers and return True if this requirement is kept"""
if extra and not req.marker:
return False
keep_req = True
if req.marker:
if not extra:
extra = None
keep_req = req.marker.evaluate({"extra": extra})
return keep_req | 9d095db4a51d8d77d387b94965c1fafdc8d4c304 | 33,969 |
def ipv4_reassembly(frame):
"""Make data for IPv4 reassembly.
Args:
frame (pcapkit.protocols.pcap.frame.Frame): PCAP frame.
Returns:
Tuple[bool, Dict[str, Any]]: A tuple of data for IPv4 reassembly.
* If the ``frame`` can be used for IPv4 reassembly. A frame can be reassembled
if it contains IPv4 layer (:class:`pcapkit.protocols.internet.ipv4.IPv4`) and
the **DF** (:attr:`IPv4.flags.df <pcapkit.protocols.internet.ipv4.DataType_IPv4_Flags.df>`)
flag is :data:`False`.
* If the ``frame`` can be reassembled, then the :obj:`dict` mapping of data for IPv4
reassembly (c.f. :term:`ipv4.packet`) will be returned; otherwise, returns :data:`None`.
See Also:
:class:`~pcapkit.reassembly.ipv4.IPv4Reassembly`
"""
if 'IPv4' in frame:
ipv4 = frame['IPv4'].info
if ipv4.flags.df: # dismiss not fragmented frame
return False, None
data = dict(
bufid=(
ipv4.src, # source IP address
ipv4.dst, # destination IP address
ipv4.id, # identification
ipv4.proto.name, # payload protocol type
),
num=frame.info.number, # original packet range number
fo=ipv4.frag_offset, # fragment offset
ihl=ipv4.hdr_len, # internet header length
mf=ipv4.flags.mf, # more fragment flag
tl=ipv4.len, # total length, header includes
header=bytearray(ipv4.packet.header), # raw bytearray type header
payload=bytearray(ipv4.packet.payload or b''), # raw bytearray type payload
)
return True, data
return False, None | b55ac3f9caa0007dd8a70a56de704f25a403755c | 33,971 |
def force_uppercase():
"""
083
Ask the user to type in a word in upper case. If they type it in lower case, ask them to try again.
Keep repeating this until they type in a message all in uppercase.
"""
user_input = input("Enter a word in uppercase: ")
while not user_input.isupper():
user_input = input("Try again, enter a word in uppercase: ")
return "" | c2e9c41838663e3248e5801b3bdd35d6adb866f8 | 33,972 |
import decimal
def json_handler(obj):
""" Handles non-serializable objects """
if isinstance(obj, decimal.Decimal):
return float(obj)
try:
return str(obj)
except TypeError:
return obj.__dict__ | 02b5d2cb2f30d91ac84e5cb44214233bdf8047a5 | 33,973 |
def _get_named_slices(y_true, logits,
section_name):
"""Returns the slices (given by name) of true and predictied vector."""
is_entity = y_true.enref_meta.is_enref()
if section_name == 'new_entity':
return (y_true.enref_meta.get_is_new_slice(),
is_entity * logits.enref_meta.get_is_new_slice())
elif section_name == 'entities':
return (y_true.enref_id.slice(), is_entity * logits.enref_id.slice())
elif section_name == 'properties':
return (y_true.enref_properties.slice(),
is_entity * logits.enref_properties.slice())
elif section_name == 'membership':
is_group = y_true.enref_properties.is_group()
return (y_true.enref_membership.slice(),
is_entity * is_group * logits.enref_membership.slice())
else:
raise ValueError('Unknown section name %s' % section_name) | e1a0fdf16a6366de66bb2f0610a87d7080b9ebac | 33,974 |
def how_much_to_go(current_level):
""" Calculates the number of points the user needs to obtain to advance
levels based on their current level
Args:
current_level::int
The users current level
Returns:
to_go_points::int
The number of points the user needs to gain in order to move onto
the next level
"""
if current_level < 9:
# transforming the current_level which is a float into an int
int_value = int(current_level)
# adding 1 to the int value to get the next threshold
# take away from the next threshold the current level
# this will give the level missing to go to the next threshold
to_go_level = (int_value+1) - current_level
# multiplying it by the 50 (this value has to be the same as the
# divider in the function check_points_treshold)
# this gives the number of points to go before the next treshold
to_go_points = to_go_level*50
else:
to_go_points = 0
return to_go_points | 9bec84687c4ccba9c3750f785226543c83e860ef | 33,975 |
import argparse
def parse_arguments():
""" see http://docs.python.org/library/argparse """
parser = argparse.ArgumentParser(
description='Convert string to or from epoch (as appropriate)')
parser.add_argument('timestr', help='string to convert',
nargs='?', default=None)
parser.add_argument('-b', '--base60time',
help='use YYMMDD and base60 time',
action="store_true")
parser.add_argument('-u', '--utc',
help='show UTC',
action="store_true")
parser.add_argument('-i', '--integer',
help='round off to nearest second',
action="store_true")
return parser.parse_args() | 0f857b42e0d600d3c1004987dd02d4e492963a75 | 33,976 |
def get_copyright():
""" Copyright-Vermerk """
return """ <!-- Alle Rechte am Datensatz liegen beim Bildungsserver Hessen.
Alle in dieser Datei enthaltenen Metadaten sind urheberrechtlich
geschuetzt und duerfen ausschliesslich im Rahmen der Vereinbarungen
zum Datenaustausch zwischen den deutschen Bildungsservern verwendet
werden. -->
""" | 6105dd7609359c89585d3cae8d9425338a323612 | 33,978 |
def element_count( atoms ):
"""
Counts the number of each element in the atoms object
"""
res = {}
for atom in atoms:
if ( not atom.symbol in res.keys() ):
res[atom.symbol] = 1
else:
res[atom.symbol] += 1
return res | 963ada4238e918a26df1c0b090b0035a6da638ff | 33,980 |
def get_bootstrap_styles():
"""
Returns HTML bootstrap style information
"""
return """<style>
code {
padding: 2px 4px;
font-size: 90%;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
font-family: Menlo,Monaco,Consolas,"Courier New",monospace;
}
.label {
display: inline;
padding: .2em .6em .3em;
font-weight: 700;
line-height: 1;
color: #fff;
font-size: 85%;
text-align: center;
white-space: nowrap;
vertical-align: baseline;
border-radius: .25em;
}
.label-high-cardinality {
background-color: #fe7c1b;
}
.label-missing {
background-color: #214761;
}
.label-zeros {
background-color: #333796;
}
.label-warning {
background-color: #e2007e;
}
.label-skew {
background-color: #ffdb58;
color: black;
}
.label-duplicate-rows {
background-color: #d90773;
}
</style>""" | 955eef9df66519773778e2d5fef3f7389092aa54 | 33,981 |
def td(txt):
"""Format text as html table data."""
return "<td>" + txt + "</td>" | 0e27b75dc40b242f2e39da44bd7d8750bfd4c498 | 33,983 |
def get_provenance_record(caption, ancestor_files, **kwargs):
"""Create a provenance record describing the diagnostic data and plot."""
record = {
'caption': caption,
'authors': ['schlund_manuel'],
'references': ['acknow_project'],
'ancestors': ancestor_files,
}
record.update(kwargs)
return record | 9ecfe152c21b27854fd9082c63f910b5ccbd9df8 | 33,985 |
def get_datetime_string(datetime):
"""
Given a datetime object, return a human readable string (e.g 05/21/2014 11:12 AM)
"""
try:
if datetime is not None:
return datetime.strftime("%m/%d/%Y %I:%M %p")
return None
except:
return None | d55f024c9b5995d1b544f5a58f4f8de7498fef2a | 33,986 |
def merge(d1, d2):
"""Helper function to merge two dictionaries."""
d = d1.copy()
d.update(d2)
return d | 16d2301d8edd5667daba9b7cc290ae43314eb027 | 33,989 |
import random
def getsimulatedgenereadcounts(numgenes,numreads):
"""
Computes number of simulated reads for all genes
input:
numgenes=total number of genes for which reads are generated
numreads==total number of reads generated
output:
a list of size numgenes containing the number of reads for each gene
"""
# empirical parameters
lognormmu=3.4
lognormsigma=0.95
lornormrange=[0.1,5.1]
logreadstemp1=[random.gauss(lognormmu,lognormsigma) for i in range(2*numgenes)]
logreadstemp2=[x for x in logreadstemp1 if x>lornormrange[0] and x<lornormrange[1]]
logreads=random.sample(logreadstemp2,numgenes)
reads=[pow(10,x) for x in logreads]
genereads=[int(x*numreads/sum(reads)) for x in reads]
return genereads | e54a4cb16ed3f6a00c59f5d6a2372b8776dc189d | 33,990 |
def csc_norm(n, Ap, Ax):
"""
Computes the 1-norm of a sparse matrix = max (sum (abs (A))), largest
column sum.
@param A: column-compressed matrix
@return: the 1-norm if successful, -1 on error
"""
norm = 0
for j in range(n):
s = 0
for p in range(Ap[j], Ap[j + 1]):
s += abs(Ax[p])
norm = max(norm, s)
return norm | 1caa703a4b44bb992c3753925e8bf065136566a7 | 33,991 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.