content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
|
77996effe11d35cff3b7e279533f331e1b2d05e0
| 20,459
|
def _place_caravanserai(new_map, size):
"""
Find a 3x3 region of desert near but not on the east or south edges.
"""
# find a space to fit it along the eastern edge,
# starting from the north
found_y = -1
rows = 0
for y in range(2, 19):
cols = 0
for x in range(19 - size, 19):
# print(x, y, new_map.region_terrain[x*20+y])
if new_map.region_terrain[x*20+y] != 'desert':
break
cols += 1
if cols < size:
rows = 0
continue
rows += 1
if rows == size:
found_y = y - size + 1
break
if found_y > 1:
# print('Can place size ' + str(size) + ' at y=' + str(found_y))
return (19 - size, found_y)
# find a space to fit it along the southern edge,
# starting from the west
found_x = -1
rows = 0
for x in range(2, 19):
cols = 0
for y in range(19 - size, 19):
# print(x, y, new_map.region_terrain[x*20+y])
if new_map.region_terrain[x*20+y] != 'desert':
break
cols += 1
if cols < size:
rows = 0
continue
rows += 1
if rows == size:
found_x = x - size + 1
break
if found_x > 1:
# print('Can place size ' + str(size) + ' at x=' + str(found_x))
return (found_x, 19 - size)
return (-1, -1)
|
bc35e27eff0ab564f3ea7d0eb8f5a3d1973797b0
| 20,460
|
import re
def suggest_name(ctx, wheel_name):
"""
Guess Debian package name from a wheel name and a python implementation.
:param wheel_name: Name of the distribution wheel
:type wheel_name: str
:return: Package name
"""
prefix = {2: "python", 3: "python3"}[ctx.python_version.major]
basename = re.compile("[^A-Za-z0-9.]+").sub("-", wheel_name)
basename = basename.replace("python-", "")
basename = basename.replace("-python", "").lower()
return prefix + "-" + basename
|
b64d190c8e9417af81808d4ec7b0b51661aaa3ca
| 20,461
|
import re
def _simplify_sql(sql):
"""
Simplify SQL statements to make them easier on the eye and shorter for the stats.
"""
sql = " ".join(sql.split("\n"))
sql = re.sub(r" +", " ", sql)
sql = re.sub(r"SELECT .*? FROM", "SELECT FROM", sql)
sql = re.sub(r"INSERT INTO (.*?) \(.*", r"INSERT INTO \1", sql)
sql = re.sub(r"SET .*? WHERE", "SET WHERE", sql)
sql = re.sub(r"IN \((?:%\(\w+\)\w(?:, *)?)+\)", "IN (?)", sql)
return re.sub(r"%\(\w+\)\w", "?", sql)
|
9bd29bd0d455ffa7a35d8d46be8372e7a84c3964
| 20,462
|
def parse_gff_comment(comment):
"""Parse the comment."""
fields = {}
for fld in comment.split(";"):
tokens = fld.split("=", 1)
if len(tokens) == 2:
fields[tokens[0]] = tokens[1]
return fields
|
2300fa3cab8507597d0c6d9fd2033f6f4a364e1d
| 20,464
|
import hmac
def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str:
"""
Computes the HMAC signature of *payload* given the specified *secret* and the given hashing *algo*.
# Parmeters
payload: The payload for which the signature should be computed.
secret: The secret string that is used in conjunction to generate the signature.
algo: The hash algorithm to use, must be `sha1` or `sha256`.
"""
if algo not in ('sha1', 'sha256'):
raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}')
return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest()
|
0f62326eaadddb569c661a59a60284062c348b2e
| 20,467
|
def serialize(instance):
"""Return instanceect data in easily serializeable format."""
title = instance.title
if not instance.title:
title = ''
firstname = instance.firstname
if not instance.firstname:
firstname = ''
lastname = instance.lastname
if not instance.lastname:
lastname = ''
if instance.abstr:
abstr = instance.abstr.replace('\n\n', '<br />')
else:
abstr = ''
email = instance.email
if email:
email = instance.email.strip()
sess = instance.sess
if sess:
sess = instance.sess.strip()
txt = instance.txt
if txt:
txt = instance.txt.strip()
return {
'crs_no': instance.crs_no.strip(),
'dept': instance.dept.strip(),
'disc': instance.disc.strip(),
'title': title.strip(),
'min_hrs': instance.min_hrs,
'max_hrs': instance.max_hrs,
'credits': '',
'abstr': abstr,
'fac_id': instance.fac_id,
'email': email,
'firstname': firstname.strip(),
'lastname': lastname.strip(),
'instructors': '',
'sess': sess,
'txt': txt,
'terms': '',
'core': instance.core.strip(),
}
|
f197143c115342997343aafb4724140bfe977e5a
| 20,468
|
def all_true(results):
"""
return all results with true values
"""
return filter(lambda r: r.value, results)
|
f67b2e669c226cbedc4e62aebe43c5da35b8d43f
| 20,471
|
import re
def remove_emoji(text: str) -> str:
"""
A function to remove emojis using regex
:param text: An input text.
:return: A text with removed emojis
"""
emoji_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001F300-\U0001F5FF"
u"\U0001F1E6-\U0001F1FF"
u"\U00002700-\U000027BF"
u"\U0001F900-\U0001F9FF"
u"\U0001F600-\U0001F64F"
u"\U0001F680-\U0001F6FF"
u"\U00002600-\U000026FF"
u'\u200d'
u'’'
u'£'
u'\u2060-\u2069'
u'í'
u'ó'
u'ú'
u'á'
u'–'
u'“”‘‘‘'
u'éàééàéééàéè'
u'üöççəəəəçä'
u'ışşƏıışşƏışêêñ'
u'İğğ~•'
u'⏯'
"]+", flags=re.UNICODE)
try: return str(emoji_pattern.sub(r'', text))
except: return ''
|
d30f2ae6ec3be04d4665329e1cba0cef47509751
| 20,474
|
import requests
import sys
def find_record_id(account_num, domain_id, headers, url, target_domain):
""" Parse through all records under our domain, to find our A record's ID. """
all_records = requests.get(url + '/domains/{}/records'.format(domain_id), headers=headers)
for record in all_records.json()['records']:
if record['name'] == target_domain and record['type'] == 'A':
return record['id']
sys.exit("Couldn't find a matching A record for {}.".format(target_domain))
|
c0a93f9489c15dc127eac6ce2788297a189faac5
| 20,475
|
def format_data_for_ai(data):
"""EDITABLE. This function task is to prepare input data for AI model."""
formated_data = [[
data["pacman"]["x"],
data["pacman"]["y"],
data["ghost_1"]["x"],
data["ghost_1"]["y"],
data["ghost_2"]["x"],
data["ghost_2"]["y"],
data["ghost_3"]["x"],
data["ghost_3"]["y"],
data["ghost_4"]["x"],
data["ghost_4"]["y"],
]]
return formated_data
|
72b50f2a96828c0a50df7f1bf13f5a70a60e45c6
| 20,476
|
from typing import Any
import jinja2
def _render_jinja2(filename: str, **kwargs: Any) -> str:
"""Returns a rendered template.
Args:
filename: Template filename.
**kwargs: Template environment.
"""
with open(filename, 'r') as fh:
template = jinja2.Template(fh.read(), autoescape=False)
return template.render(**kwargs)
|
1a48edc8fb829e9a12bfd678756c995afaf5c9bd
| 20,478
|
def min_value_node(node):
"""
Binary Search Tree min value node
Complexity: O(HEIGHT)
Find the node with the minimum value in a
binary search tree.
"""
while node.left is not None:
node = node.left
return node
|
4d423183f0da3fd5d0bc66c84f26353543f8e406
| 20,479
|
from typing import List
def word_list(list_: str) -> List[str]:
"""Load word list."""
with open(list_, "r") as f:
return f.readlines()
|
7c7ec4824d81d9f1e9e64af62dbfdf2d7bc2eaa1
| 20,480
|
def parse_option_list_string(option_list, delimiter=None):
"""Convert the given string to a dictionary of options.
Each pair must be of the form 'k=v', the delimiter seperates the
pairs from each other not the key from the value.
:param option_list: A string representation of key value pairs.
:type option_list: str
:param delimiter: Delimiter to use to seperate each pair.
:type delimiter: str
:returns: A dictionary of settings.
:rtype: Dict
"""
settings = {}
if delimiter is None:
delimiter = ';'
for setting in option_list.split(delimiter):
if not setting:
continue
key, value = setting.split('=')
settings[key.strip()] = value.strip()
return settings
|
fe6a440e004552b418d151ac6f3de9a08fe3e787
| 20,481
|
def _read_exact(read, size):
"""
Read exactly size bytes with `read`, except on EOF
:Parameters:
- `read`: The reading function
- `size`: expected number of bytes
:Types:
- `read`: ``callable``
- `size`: ``int``
:return: The read bytes
:rtype: ``str``
"""
if size < 0:
return read(size)
vlen, buf = 0, []
push = buf.append
while vlen < size:
val = read(size - vlen)
if not val:
break
vlen += len(val)
push(val)
return "".join(buf)
|
4520c977812bd77365dd1b5a445697b3435d1d19
| 20,483
|
def filtering_results(results, book_type, number):
"""
results = list of dictionnary
Filter the results to getspecific type trade / issue
include omnibus and compendium in trades
and number
Based on comic title
"""
assert book_type in ["trade", "issue", None] , "Choose between 'trade' or 'issue' or leave blank (compendium and omnibus are trades)"
type_filtered_holder = []
number_filtered_holder = []# Will hold the new entries after filtering step
# FIRST GET EITHER ISSUE OR PAPERBACK ADD TYPE
paperback_signs = ["vol", "vol.", "volume", "tpb", 'pb',"tp", "paperback" ,"omnibus", "compendium", "hc", "hardcover", "graphic novel", "softcover"]
issue_signs = ["#"]
for book in results:
if any(x in book["title"].lower() for x in issue_signs):
book["type"] = "issue"
elif any(x in book["title"].lower() for x in paperback_signs):
book["type"] = "trade"
else:
book["type"] = "unknown (assume trade)"
if book_type: # NOT NONE
for book in results:
if book["type"] == book_type or book["type"] == "unknown (assume trade)":
type_filtered_holder.append(book)
else:
type_filtered_holder = results
if number:
for book in type_filtered_holder:
if "{}".format(number) in book["title"] or "0{}".format(number) in book["title"]:
number_filtered_holder.append(book)
else:
number_filtered_holder = type_filtered_holder
# PUT CHEAPER FIRST
number_filtered_holder = sorted(number_filtered_holder, key=lambda k: k['price'])
return number_filtered_holder
|
1d762f30703ce90ed750493889189e7ac3ae8872
| 20,485
|
def genericIntListValidator(values, validValues):
"""
Generic. (Added at version 2.)
"""
if not isinstance(values, (list, tuple)):
return False
valuesSet = set(values)
validValuesSet = set(validValues)
if valuesSet - validValuesSet:
return False
for value in values:
if not isinstance(value, int):
return False
return True
|
5f20046435947ae811547646851b3964702ffeea
| 20,486
|
import math
def circular_easein(pos):
"""
Easing function for animations: Circular Ease In
"""
return 1 - math.sqrt(1 - (pos * pos))
|
89d69147f1f273740683597c9076319d75c080a5
| 20,487
|
import re
def char_length(character, letter_spacing=0):
"""Return the max width of a character by looking at its longest line.
:param character: The character array from the font face
:param letter_spacing: The user defined letter spacing
:returns: The length of a longest line in a character
"""
stripped = [re.sub(r"(<([^>]+)>)", "", char) for char in character]
char_width = max(map(len, stripped))
if char_width == 0 and letter_spacing > 0:
# Adding space to letter spacing
char_width = 1
return char_width
|
32650f0d21e597810225dad25513b0107d6551e1
| 20,488
|
def return_first_col_action(row, action_dict):
"""
Default action function for tables. This function returns the first data column value for the row of
data. Used by the **TABLE_RETURN_FIRST_VAL** action.
:param List row: the data associated with the selected row
:param Dict action_dict: the dictionary of values associated with the action - ignored in this function
:return: The first value from the list of data values for the selected row of the table.
"""
return row.values[0]
|
57e5975fd60cabff2bc135a6e0b88b34d3f70211
| 20,489
|
from typing import List
from typing import Dict
def aggregate_action_stats(action_stats: List[Dict[str, int]]) -> Dict[str, int]:
"""Aggregate statistics by returning largest value observed for each of the tweet reactions (reply, retweet, favorite)."""
action_stat = {}
for key in action_stats[0].keys():
counts = [count[key] for count in action_stats]
action_stat[key] = max(counts)
return action_stat
|
2e43e186c6f6ba58ce7ba0ff883bdb61783c896b
| 20,490
|
import re
import logging
def append_to_csl_item_note(csl_item, text='', dictionary={}):
"""
Add information to the note field of a CSL Item.
In addition to accepting arbitrary text, the note field can be used to encode
additional values not defined by the CSL JSON schema, as per
https://github.com/Juris-M/citeproc-js-docs/blob/93d7991d42b4a96b74b7281f38e168e365847e40/csl-json/markup.rst#cheater-syntax-for-odd-fields
Use dictionary to specify variable-value pairs.
"""
if not isinstance(csl_item, dict):
raise ValueError(f'append_to_csl_item_note: csl_item must be a dict but was of type {type(csl_item)}')
if not isinstance(dictionary, dict):
raise ValueError(f'append_to_csl_item_note: dictionary must be a dict but was of type {type(dictionary)}')
if not isinstance(text, str):
raise ValueError(f'append_to_csl_item_note: text must be a str but was of type {type(text)}')
note = str(csl_item.get('note', ''))
if text:
if note and not note.endswith('\n'):
note += '\n'
note += text
for key, value in dictionary.items():
if not re.fullmatch(r'[A-Z]+|[-_a-z]+', key):
logging.warning(f'append_to_csl_item_note: skipping adding "{key}" because it does not conform to the variable_name syntax as per https://git.io/fjTzW.')
continue
if '\n' in value:
logging.warning(f'append_to_csl_item_note: skipping adding "{key}" because the value contains a newline: "{value}"')
continue
if note and not note.endswith('\n'):
note += '\n'
note += f'{key}: {value}'
if note:
csl_item['note'] = note
return csl_item
|
b3f43adacba1dca3749e048fe94b5cf0b2c15e3b
| 20,491
|
def GetTitle( text ):
"""Given a bit of text which has a form like this:
'\n\n Film Title\n \n (OmU)\n '
return just the film title.
"""
pp = text.splitlines()
pp2 = [p.strip() for p in pp if len(p.strip()) >= 1]
return pp2[0]
|
d152282610072fa88c7a45a3aca2991b6fedb79c
| 20,493
|
def get_time_in_seconds(timeval, unit):
"""
Convert a time from 'unit' to seconds
"""
if 'nyear' in unit:
dmult = 365 * 24 * 3600
elif 'nmonth' in unit:
dmult = 30 * 24 * 3600
elif 'nday' in unit:
dmult = 24 * 3600
elif 'nhour' in unit:
dmult = 3600
elif 'nminute' in unit:
dmult = 60
else:
dmult = 1
return dmult * timeval
|
4fbcbf16e7a51e046e267a0cafad090049357cae
| 20,494
|
def index():
""" render svg graph """
return 'Hello! Go to /table/ or /graph/.'
|
a2e5be8ca5a33078497315dcdf850df0d7c7a6fb
| 20,495
|
def get_columns(document):
"""
Return a list of tuples, each tuple containing column name and type
"""
# tags = document.tags.to_dict()
tags = document.tags
names = list(tags.keys())
types = list(tags.values())
columns = []
for field, value in zip(names, types):
try:
value = int(value) # Handle year better
except:
pass
if isinstance(value, str):
value = "str"
elif isinstance(value, int):
value = "int"
col = (field, value)
columns.append(col)
return columns
|
c227815fcf1d2b3815b951c90397a7e89c43cc1c
| 20,496
|
def drawPins(input_data, xpins, ypins, map):
"""
Draw pins on input_data
Inputs:
- input_data: np.array of size (C, H, W) (all zeros to start with)
- xpins & ypins: np.array of x-coordinates and y-coordinates for all pins
e.g., [x1, x2 ... xm] and [y1, y2, ... ym] for m pins
- map: layout layer map (dictionary from layer name to index)
Outputs:
- output_data: np.array of size (C, H, W) (with pixels corresponding to pins in layer 'pins' set to '1')
"""
output_data = input_data
for (x, y) in zip(xpins, ypins):
output_data[map['pin'], y, x] = 1
return output_data
|
1d5796cc2a009e8e5993cbc374525c46b08a1a61
| 20,497
|
def _get_tile_grid_dict(tile_grid, tile_reading):
"""Create Dictionary with row and column for each tile in a grid.
Args:
tile_grid (tuple): Rows and columns of tile grid in well.
tile_reading (str): Reading method of microscope: horizontal,
horizontal_serp, vertical, vertical_serp.
Returns:
(dict): Tile numbers and their respective positions in the grid.
"""
# extract rows and columns
assert len(tile_grid) == 2, f"Grid should be a tuple with two integers for rows and columns of tiles."
tile_rows, tile_cols = tile_grid
# create a dictionary with ImageNumbers as keys and TileRow and TileCol as items
if tile_reading == "horizontal":
col_ls = list(range(1, tile_cols + 1)) * tile_rows
row_ls = [row for row in range(1, tile_rows + 1) for _ in range(tile_cols)]
elif tile_reading == "vertical":
row_ls = list(range(1, tile_rows + 1)) * tile_cols
col_ls = [col for col in range(1, tile_cols + 1) for _ in range(tile_rows)]
elif tile_reading == "horizontal_serp":
row_ls = [row for row in range(1, tile_rows + 1) for _ in range(tile_cols)]
col_ls = (list(range(1, tile_cols + 1)) + list(range(1, tile_cols + 1))[::-1]) * (tile_rows // 2)
if len(col_ls) == 0:
col_ls = list(range(1, tile_cols + 1))
elif (tile_rows % 2) != 0:
col_ls = col_ls + list(range(1, tile_cols + 1))
elif tile_reading == "vertical_serp":
col_ls = [col for col in range(1, tile_cols + 1) for _ in range(tile_rows)]
row_ls = (list(range(1, tile_rows + 1)) + list(range(1, tile_rows + 1))[::-1]) * (tile_cols // 2)
if len(row_ls) == 0:
row_ls = list(range(1, tile_rows + 1))
elif (tile_rows % 2) != 0:
row_ls = row_ls + list(range(1, tile_rows + 1))
else:
reading_methods = ['horizontal', 'horizontal_serp', 'vertical', 'vertical_serp']
raise ValueError(f"{tile_reading} not in reading methods: {reading_methods}")
tiles = list(range(1, (tile_rows * tile_cols) + 1))
tile_grid_dict = dict(zip(tiles, list(zip(row_ls, col_ls))))
return tile_grid_dict
|
5eb2a9c3c6e058e23bbb4c36f6229c4289d2803f
| 20,498
|
import re
def slugify(input: str) -> str:
"""Converts Foo Bar into foo-bar, for use wih anchor links."""
input = re.sub(r"([() ]+)", "-", input.lower())
return re.sub(r"-$", "", input)
|
c08acb689783e382ce58ca886f2971aa4f42c763
| 20,500
|
def start_with_qu(string):
"""判断一个字符串是否含有'qu'
:type string: Str
"""
if len(string) < 2:
return False
bool1 = string[0] == 'Q' or string[0] == 'q'
bool2 = string[1] == 'U' or string[1] == 'u'
return bool1 and bool2
|
98774403ee984ffe7eb15ead9176acc45b16e763
| 20,501
|
def clean_music_name(music_name):
"""
DESCRIPTION: clean extra info from music name
INPUT: music_name (str)
OUTPUT: music_name (str)
"""
breaking_substring_list = [
' (feat.',
' (with',
]
for substring in breaking_substring_list:
if substring in music_name:
breaking_point = music_name.find(substring)
music_name = music_name[:breaking_point]
return music_name
|
20d3ce462f9f3a3fb08bc57ff707a30788c40a75
| 20,502
|
import six
def _changes(plays):
"""
Find changes in ansible return data
"""
changes = {}
for play in plays["plays"]:
task_changes = {}
for task in play["tasks"]:
host_changes = {}
for host, data in six.iteritems(task["hosts"]):
if data["changed"] is True:
host_changes[host] = data.get("diff", data.get("changes", {}))
if host_changes:
task_changes[task["task"]["name"]] = host_changes
if task_changes:
changes[play["play"]["name"]] = task_changes
return changes
|
3c7206054db159d417aaaf617da1a7c197448c62
| 20,503
|
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cur_dt = {}
max_len = 0 # history max_len
inn_len = 0 # current round max_len
last_check_point = 0
for seq, ch in enumerate(s):
if ch not in cur_dt:
cur_dt[ch] = seq
inn_len += 1
else:
ch_begin = cur_dt[ch]
if ch_begin < last_check_point:
# fake repeat
cur_dt[ch] = seq
inn_len += 1
else:
# real repeat
max_len = max(max_len, inn_len)
inn_len = min(seq - ch_begin, inn_len)
cur_dt[ch] = seq
last_check_point = ch_begin + 1
max_len = max(max_len, inn_len)
return max_len
|
95fa88cc5ef6fb55397f78cd59ba0556abc04ae8
| 20,504
|
def create_hash(ls):
"""
Examples:
>>> create_hash([1,2,3])
{1: None, 2: None, 3: None}
"""
return dict.fromkeys(ls)
|
9b27921f0058331119856d27ca7b3565bbfe9673
| 20,505
|
def enemies_flied(enemies) -> bool:
"""Проверяет, добрались ли enemy balls до нижнего края экрана."""
for enemy in enemies.sprites():
if enemy.cx <= enemy.radius:
return True
return False
|
f069f5df813acbb61b65b0010905351f24f97502
| 20,506
|
def check_input_stream_count(expected_number_of_streams):
"""
Decorator for Tool._execute that checks the number of input streams
:param expected_number_of_streams: The expected number of streams
:return: the decorator
"""
def stream_count_decorator(func):
def func_wrapper(*args, **kwargs):
self = args[0]
sources = kwargs['sources'] if 'sources' in kwargs else args[1]
if expected_number_of_streams == 0:
if sources:
raise ValueError("No input streams expected")
else:
given_number_of_streams = len(sources) if sources else 0
if given_number_of_streams != expected_number_of_streams:
raise ValueError("{} tool takes {} stream(s) as input ({} given)".format(
self.__class__.__name__, expected_number_of_streams, given_number_of_streams))
return func(*args, **kwargs)
return func_wrapper
return stream_count_decorator
|
96dfdc8f85d70dee1ac44f01f95dd07eb3725261
| 20,508
|
import math
def calculate_base_day_duration(game):
"""Calculate the base day length."""
base_day_length = math.sqrt(2 * game.nb_alive_players)
base_day_length = math.ceil(base_day_length) + 1
base_day_length = base_day_length * 60
return base_day_length
|
00f49a75f7b9772bed5358fe05f7ddf9b35d2f93
| 20,510
|
def guess_shape_and_submatrix_shape(dic):
"""
Guess the data shape and the shape of the processed data submatrix.
"""
if 'procs' not in dic: # unknow dimensionality and shapes
return None, None
procs = dic['procs']
if 'SI' not in procs or 'XDIM' not in procs:
return None, None # cannot determine shape
si_0 = procs['SI']
xdim_0 = procs['XDIM']
if 'proc2s' not in dic: # 1D data
return (si_0, ), (xdim_0, )
proc2s = dic['proc2s']
if 'SI' not in proc2s or 'XDIM' not in proc2s:
return None, None # cannot determine shape
si_1 = proc2s['SI']
xdim_1 = proc2s['XDIM']
if 'proc3s' not in dic: # 2D data
return (si_1, si_0), (xdim_1, xdim_0)
proc3s = dic['proc3s']
if 'SI' not in proc3s or 'XDIM' not in proc3s:
return None, None # cannot determine shape
si_2 = proc3s['SI']
xdim_2 = proc3s['XDIM']
if 'proc4s' not in dic: # 3D data
return (si_2, si_1, si_0), (xdim_2, xdim_1, xdim_0)
proc4s = dic['proc4s']
if 'SI' not in proc4s or 'XDIM' not in proc4s:
return None, None # cannot determine shape
si_3 = proc4s['SI']
xdim_3 = proc4s['XDIM']
# assume 4D data
return (si_3, si_2, si_1, si_0), (xdim_3, xdim_2, xdim_1, xdim_0)
|
00cf28f558b8af1253abb90df4312ff5c594d8e3
| 20,511
|
def contains_tokens(pattern):
"""Test if pattern is a list of subpatterns."""
return type(pattern) is list and len(pattern) > 0
|
10436114b1eb1b9e3f5c85bf30ec0813d999d101
| 20,513
|
def _is_iter(lst):
"""Is ``lst`` an iterator?
"""
try:
return list(lst)
except: # pragma: nocover
return False
|
8296cb9741046e51db19368c93a9e901cef0b717
| 20,514
|
def remove_overlap_vector(original, to_remove, buff=0):
"""remove the to_remove CutVector from original CutVector"""
for row in to_remove:
original = original[(original[:, 1]<row[0]-buff) | (original[:, 0]>row[1]+buff)]
return original
|
63dc22963da5660874d372093dea15c1cf3003ef
| 20,515
|
def get_phase(answer: str) -> str:
"""Возвращает фазовое состояние на английском, как указано в GET запросе."""
while True:
if answer.lower() == 'газ':
return 'vapor'
elif answer.lower() == 'жидкость':
return 'liquid'
else:
answer = input('Неправильный ввод. Введите заново: ')
|
260d2eda627c0f788679af7af7a922cfb0a40ae4
| 20,516
|
import argparse
def parse_args() -> argparse.Namespace:
"""Parse user command line arguments."""
parser = argparse.ArgumentParser(
description='Merge given files on command-line.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infiles', metavar='file', type=str, nargs='+',
help='Files to be merged')
parser.add_argument('-o',
metavar='out.vcd',
type=str,
required=False,
default='out.vcd',
help='Optional output filename')
return parser.parse_args()
|
0b66396e75548b8552a3743d693b4bdbc248e8a0
| 20,519
|
def resolve_bibitem(bibitem, referenced_pubs):
"""Returns the publication object for the first referenced publication
whose bibliographic data matches to text in the bibitem.
.. note:: This is pretty hacky
Parameters
----------
bibitem : unicode
The bibitem string (e.g., \bibitem text)
referenced_pubs : object
List of publications referenced by the base document.
"""
for pub in referenced_pubs:
if len(pub.authors) > 3:
authors = pub.authors[:3]
else:
authors = pub.authors
for author in authors:
# author last names need to be in bibtem
if author[0] not in bibitem:
continue
return pub
|
19f6bde3936ec5c8ec534f119ca2c2f05e232ab4
| 20,520
|
def _patched_cast(self, db_type):
""" a workaround for icontains searches on LOB fields """
# see https://code.djangoproject.com/ticket/11580
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s,2000,1)"
else:
return "%s"
|
75e283c9a68f793550a13b384f4478b1397dd594
| 20,521
|
def solve(equation, answer):
""" Solve equation and check if user is correct or incorrect """
splitted = equation.split('=')
left = splitted[0].strip().replace('?', str(answer)).replace('x', '*')
right = splitted[1].strip().replace('?', str(answer)).replace('x', '*')
try:
if right.isdigit():
return eval(left) == int(right)
else:
return int(left) == eval(right)
except ValueError:
return False
|
78f0eced818e681ed8d8c5e0e4667a101a7ffd4e
| 20,522
|
def replace_parameters(templatefile, outfile, replacements, delims='=', commentchar=None, endchars = '\n', warn=False):
""" looks for strings of the form:
PARAM delim VALUE, if delim is a single str
and if PARAM is in replacements, writes a new line:
PARAM delim replacements[PARAM], otherwise does the replacement:
PARAM delim[0] VALUE delim[1] -> PARAM delim[0] replacement[PARAM] delim[1]
if warn, a warning will be printed if a parameter is not in the file
if commentchar, enchars will be replaced by the comment (if the line has a comment)
Returns boolean signifying if parameter was replaced successfully
"""
t = open(templatefile, 'r')
f = open(outfile, 'w')
param_set = True
# count the number of replacements
count = {}
for key in replacements:
count[key] = 0
for line in t:
if(isinstance(delims,str)): # DEFAULT: ONLY ONE DELIMITER
#print("One delim only")
delim = delims
s = line.split(delim)
if len(s) == 1:
f.write(line)
else:
for key in replacements:
if s[0].strip() == key:
if commentchar:
comment=line.split(commentchar, 1)
if len(comment) > 1:
endchars = ' '+commentchar+comment[1]
count[key] += 1
line = key + delim + str(replacements[key]) + endchars
f.write(line)
param_set=True
else: # ADD OPTION FOR TWO DELIMITERS
s = line.split(delims[0])
if len(s) == 1:
#print(line)
f.write(line) # prints blank lines
else:
for key in replacements:
if s[0].strip() == key:
#print(line)
if commentchar:
line_split_on_comment=line.split(commentchar, 1)
assignment_str = line_split_on_comment[0]
if(len(line_split_on_comment)>1):
endchars_new = " " + commentchar + line_split_on_comment[1]
else:
endchars_new = endchars
else:
assignment_str = line
endchars_new = endchars
#print(assignment_str,endchars_new)
rtokens = assignment_str.split(delims[1])
#print(rtokens)
tokens=[]
for token in rtokens:
if(token!=""):
tokens.append(token)
if(delims[0]==delims[1]):
remainder_str = delims[1].join(tokens[2:])
else:
remainder_str = delims[1].join(tokens[1:])
#print(remainder_str+".")
if(len(remainder_str.split("\n")) > 1 and endchars=="\n"):
remainder_str = remainder_str.split("\n")[0]
line = key + delims[0] + str(replacements[key]) + delims[1] + remainder_str + endchars_new
#print(line)
count[key] += 1
f.write(line)
t.close()
f.close()
if warn:
for key in count:
c = count[key]
if c == 0:
print('Warning: '+templatefile+' : No replacements for '+key)
param_set=False
if c > 1:
print('Warning: '+templatefile+' : Multiple ('+str(c)+') replacements for '+key)
param_set = False
return param_set
|
cd5fc328adfdc1ea9aef9a320e7e7b9dab07f39d
| 20,526
|
import os
def _has_api_credentials():
"""
Test for API credentials
"""
client_id = os.environ.get('GOOGLE_OAUTH_CLIENT_ID')
client_secret = os.environ.get('GOOGLE_OAUTH_CONSUMER_SECRET')
salt = os.environ.get('AUTHOMATIC_SALT')
return bool(client_id and client_secret and salt)
|
097c785c39ccb19cbf02284016b1bd1e2a104bce
| 20,528
|
import re
def contig_to_vcf_chrom(contig_name):
"""
Annotations may reference more complex contig names than the VCF does in its CHROM column.
This function maps any given annotation's contig name to its corresponding VCF CHROM name.
"""
return re.sub(r'\W.+$', '', contig_name)
|
080437ff504fd695742917542072f11405606a83
| 20,529
|
import pwd
import pwd
import os
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/') or userhome
return userhome + path[i:]
|
f08a1b3a6f2075a66804a8890dc71fe5d98f94f5
| 20,530
|
import os
def list_files_in_dir(dir_path):
"""
返回目录下的所有文件名和绝对路径
[
{
"key": "file name",
"path": "abs path"
},
...
]
"""
lists = []
for f in os.listdir(dir_path):
f_path = os.path.join(dir_path, f)
if os.path.isfile(f_path):
lists.append({
'key': f,
'path': f_path
})
return lists
|
4d72c52f74a20de7b297e47da6e1d3e7bd19aa0f
| 20,531
|
import os
import shutil
def send_to_logbook(fileName, location='lcls'):
""" Copies an xml file into the logbook 'new entry' directory. """
path = os.path.join("/u1/", location, "physics/logbook/data")
try:
shutil.copy(fileName + ".png", path)
except IOError:
print("Copying thumbnail failed!")
try:
shutil.copy(fileName + ".ps", path)
except IOError:
print("Copying attachment failed!")
shutil.copy(fileName + ".xml", path)
return True
|
9b7f7e72be4070fec6d8917a690f7278235fcd78
| 20,532
|
def create_prefix(nick, user, host):
""" Builds a prefix based on the nick, user, and host provided. This is essentially the
opposite to parse_prefix().
>>> create_prefix("Jack", "~jack", "000-000-000-000.example.com")
'Jack!~jack@000-000-000-000.example.com'
"""
return "{0}!{1}@{2}".format(nick, user, host)
|
9ad4cba5ab057faee06d0cf284e00c2c4d563623
| 20,533
|
def getJoint(df, attrs): # P(attrs)
"""Calcule la distribution de probabilité jointe de plusieurs attributs.
Parameters
----------
attrs : list of str
Les noms de l'ensemble d'attributs en question.
Returns
-------
probas : pandas.Series
La distribution de probabilité jointe des attributs.
"""
freqs = df.groupby(attrs)[attrs[0]].count()
total = len(df)
# print(freqs, total, freqs / total)
return freqs / total
|
e2668e99cd7a1dd1568033f3fd0270828fd33f40
| 20,534
|
def burg(sample_list, coefficient_number):
"""
Computes Linear Prediction coefficients via Burg method from a list of samples.
"""
p = sum(sample ** 2 for sample in sample_list)
a0 = p / len(sample_list)
b1 = sample_list[:len(sample_list) - 1]
b2 = sample_list[1:]
aa = [0.0 for i in range(coefficient_number)]
coefficient_list = [0.0 for i in range(coefficient_number)]
for i in range(coefficient_number):
numerator = 0.0
denominator = 0.0
for j in range(len(sample_list) - i - 1):
numerator += b1[j] * b2[j]
denominator += b1[j] ** 2 + b2[j] **2
coefficient_list[i] = 2.0 * numerator / denominator
a0 *= 1.0 - coefficient_list[i] ** 2
for j in range(i):
coefficient_list[j] = aa[j] - coefficient_list[i] * aa[i - j - 1]
if i < coefficient_number:
for j in range(i + 1):
aa[j] = coefficient_list[j]
for j in range(len(sample_list) - i - 2):
b1[j] -= aa[i] * b2[j]
b2[j] = b2[j + 1] - aa[i] * b1[j + 1]
return a0, coefficient_list
|
3ccb83d43be659022fe85b3f10a73031030c21dd
| 20,535
|
import numpy as np
def sigmoid(values, gain, shift):
"""
Map values with sigmoid function to range [0,1].
Y(t) = 1/(1 + exp(-gain*(values - shift))
"""
tiny = 0.000000001
# Make sure argument is a numpy array
if type(values) != np.ndarray:
values = np.array(values)
return 1.0 / (1.0 + np.exp(-gain * (values - shift)) + tiny)
|
aadbdbf8a3f5d8657cc7c9b24056e1a47baeb4a6
| 20,536
|
def from_last_seen_data(last_branch, last_node):
"""
nodeID / num_nodes
nodeID / nodes_left
nodes_left, best_integer, best_bound, itCnt, gap, has_incumbent, num_nodes
objective / best_integer, best_bound / objective, best_bound / best_integer
open_nodes_len, open_nodes_max, open_nodes_min, open_nodes_avg
num_nodes_at_max / open_nodes_len, num_nodes_at_min / open_nodes_len
open_nodes_max / best_integer, open_nodes_min / best_integer, open_nodes_min / open_nodes_max
objective / open_nodes_max, open_nodes_min / objective
num_cuts
"""
last_data_list = []
first = last_branch['node'] / float(last_branch['num_nodes']) if last_branch['num_nodes'] != 0 \
else None
second = last_branch['node'] / float(last_branch['nodes_left']) if last_branch['nodes_left'] != 0 \
else None
last_data_list.append(first)
last_data_list.append(second)
last_data_list.append(last_branch['nodes_left'])
last_data_list.append(last_branch['best_integer'])
last_data_list.append(last_branch['best_bound'])
last_data_list.append(last_branch['itCnt'])
last_data_list.append(last_branch['gap'])
last_data_list.append(last_branch['has_incumbent'])
last_data_list.append(last_branch['num_nodes'])
third = last_branch['objective'] / last_branch['best_integer'] \
if last_branch['best_integer'] and last_branch['objective'] else None
fourth = last_branch['best_bound'] / last_branch['objective'] \
if last_branch['best_bound'] and last_branch['objective'] else None
fifth = last_branch['best_bound'] / last_branch['best_integer'] \
if last_branch['best_integer'] and last_branch['best_bound'] else None
last_data_list.append(third)
last_data_list.append(fourth)
last_data_list.append(fifth)
last_data_list.append(last_node['open_nodes_len'])
last_data_list.append(last_node['open_nodes_max'])
last_data_list.append(last_node['open_nodes_min'])
last_data_list.append(last_node['open_nodes_avg'])
last_data_list.append(last_node['num_nodes_at_max'] / float(last_node['open_nodes_len']))
last_data_list.append(last_node['num_nodes_at_min'] / float(last_node['open_nodes_len']))
# open_nodes_max/best_integer, open_nodes_min/best_integer, open_nodes_min/open_nodes_max
sixth = last_node['open_nodes_max'] / float(last_branch['best_integer']) \
if last_branch['best_integer'] and last_node['open_nodes_max'] else None
seventh = last_node['open_nodes_min'] / float(last_branch['best_integer']) \
if last_branch['best_integer'] and last_node['open_nodes_min'] else None
eighth = last_node['open_nodes_min'] / float(last_node['open_nodes_max']) \
if last_node['open_nodes_max'] and last_node['open_nodes_min'] else None
last_data_list.append(sixth)
last_data_list.append(eighth)
last_data_list.append(seventh)
# objective/open_nodes_max, open_nodes_min/objective
ninth = last_branch['objective'] / float(last_node['open_nodes_max']) \
if last_branch['objective'] and last_node['open_nodes_max'] else None
tenth = last_node['open_nodes_min'] / float(last_branch['objective']) \
if last_branch['objective'] and last_node['open_nodes_min'] else None
last_data_list.append(ninth)
last_data_list.append(tenth)
last_data_list.append(last_node['num_cuts'])
if len(last_data_list) != 24:
print("*** len(last_seen_data): {}".format(len(last_data_list)))
return last_data_list, len(last_data_list)
|
5cc58b21f1f6ef776d14154aea4b7f6c8693de99
| 20,539
|
def load_model_configurations(model):
"""
Arguments:
model: A SSD model with PriorBox layers that indicate the
parameters of the prior boxes to be created.
Returns:
model_configurations: A dictionary of the model parameters.
"""
model_configurations = []
for layer in model.layers:
layer_type = layer.__class__.__name__
if layer_type == 'PriorBox':
layer_data = {}
layer_data['layer_width'] = layer.input_shape[1]
layer_data['layer_height'] = layer.input_shape[2]
layer_data['min_size'] = layer.min_size
layer_data['max_size'] = layer.max_size
layer_data['aspect_ratios'] = layer.aspect_ratios
layer_data['num_prior'] = len(layer.aspect_ratios)
model_configurations.append(layer_data)
return model_configurations
|
34c6efbca820bd5461b2e5aeb2c7b30184afa250
| 20,540
|
from numpy.linalg import det, svd
import numpy
def is_mirror_image(X, Y):
"""
Check if two configurations X and Y are mirror images
(i.e. their optimal superposition involves a reflection).
@param X: n x 3 input vector
@type X: numpy array
@param Y: n x 3 input vector
@type Y: numpy array
@rtype: bool
"""
## center configurations
X = X - numpy.mean(X, 0)
Y = Y - numpy.mean(Y, 0)
## SVD of correlation matrix
V, L, U = svd(numpy.dot(numpy.transpose(X), Y)) #@UnusedVariable
R = numpy.dot(V, U)
return det(R) < 0
|
62a384fc8d0a70990d1b5da1da80c9d43a6290a2
| 20,541
|
def user_case(mocker, user, case, org):
"""Fake UserCase instance."""
instance = mocker.Mock()
instance.user = user
instance.case = case
instance.organisation = org
return instance
|
5d6de334a8ac690156204e81a6c2db53a71ea1d6
| 20,543
|
import re
def camel_to_snake(text):
"""
Will convert CamelCaseStrings to snake_case_strings. Examples:
>>> camel_to_snake('CamelCase')
'camel_case'
>>> camel_to_snake('CamelCamelCase')
'camel_camel_case'
>>> camel_to_snake('Camel2Camel2Case')
'camel2_camel2_case'
>>> camel_to_snake('getHTTPResponseCode')
'get_http_response_code'
>>> camel_to_snake('get2HTTPResponseCode')
'get2_http_response_code'
>>> camel_to_snake('HTTPResponseCode')
'http_response_code'
>>> camel_to_snake('HTTPResponseCodeXYZ')
'http_response_code_xyz'
>>> camel_to_snake('Double_Case')
'double_case'
>>> camel_to_snake('SW_Merch')
'sw_merch'
>>> camel_to_snake('Odd/Characters')
'odd_characters'
>>> camel_to_snake('With Spaces')
'with_spaces'
"""
# Load #
result = text
# Eliminate trailing spaces #
result = result.strip(' ')
# First step #
try:
result = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', result)
except TypeError:
print("The text received was '%s'" % result)
raise
# Second step #
result = re.sub('([a-z0-9])([A-Z])', r'\1_\2', result)
# Lower case the rest #
result = result.lower()
# Eliminate remaining spaces #
result = result.replace(' ', '_')
# Eliminate quote characters #
result = result.replace('"', '')
result = result.replace("'", '')
# Eliminate parenthesis #
result = result.replace("(", '')
result = result.replace(")", '')
# Eliminate special characters #
result = result.replace('/', '_')
# Eliminate double underscore #
while '__' in result: result = result.replace('__', '_')
# Return #
return result
|
d7216ab1a35c189abf67bfb475486ce05dcba560
| 20,544
|
def get_hex(binary_str):
"""
Returns the hexadecimal string literal for the given binary string
literal input
:param str binary_str: Binary string to be converted to hex
"""
return "{0:#0{1}x}".format(int(binary_str, base=2), 4)
|
fe2784e58d61e577bcc66ed1bd3d2c02c1e7fda0
| 20,545
|
from typing import List
from typing import Any
def second_pass(tokens: List[Any]) -> List[Any]:
"""
Examples
--------
>>> second_pass([3, "[", "abc", "]", 4, "[", "ab", "]", "c"])
[3, ['abc'], 4, ['ab'], 'c']
>>> second_pass([10, '[', 'a', ']'])
[10, ['a']]
>>> second_pass([3, '[', 2, '[', 'a', ']'])
[3, [2, ['a']]]
"""
new_tokens = []
last_stack = new_tokens
stacks = [new_tokens]
for token in tokens:
if token == "[":
stack = []
stacks.append(stack)
last_stack.append(stack)
last_stack = stack
elif token == "]":
stacks.pop()
last_stack = stacks[-1]
else:
stacks[-1].append(token)
return new_tokens
|
478d3e5f5a8adb642110b50dd6268c1c43c254c8
| 20,546
|
def crossval_split_a2d2(imgs_paths: list, masks_paths: list, fold=5):
"""
Splits images and masks by two sets: train and validation by folds with a small stratification by categories 'uu', 'um' and 'umm'.
Possible value for 'fold' is: 1, 2, 3, 4, 5
params:
imgs_paths : list with source images(without validation area)
masks_paths : list with masks
fold : number of validation fold
"""
if len(imgs_paths) < 5:
raise RuntimeError("Length of imgs_paths less then 5.")
if fold not in range(1, 6):
raise ValueError("Invalid fold number: {}. 'fold' can be 1,2,3,4 or 5.".format(fold))
assert len(imgs_paths) == len(masks_paths), "Error: imgs_paths and masks_paths has different length."
imgs_per_fold = round(len(imgs_paths) / 5)
# train urban unmarked
if fold == 5:
valid_imgs_paths = imgs_paths[-(len(imgs_paths) - imgs_per_fold * 4):]
valid_masks_paths = masks_paths[-(len(imgs_paths) - imgs_per_fold * 4):]
train_imgs_paths = imgs_paths[:(imgs_per_fold * 4)]
train_masks_paths = masks_paths[:(imgs_per_fold * 4)]
else:
valid_imgs_paths = imgs_paths[(fold-1)*imgs_per_fold:fold * imgs_per_fold]
valid_masks_paths = masks_paths[(fold-1)*imgs_per_fold:fold * imgs_per_fold]
train_imgs_paths = imgs_paths[:(fold-1)*imgs_per_fold] + imgs_paths[fold*imgs_per_fold:]
train_masks_paths = masks_paths[:(fold-1)*imgs_per_fold] + masks_paths[fold*imgs_per_fold:]
return ((train_imgs_paths, train_masks_paths), (valid_imgs_paths, valid_masks_paths))
|
58d273d4093b8c56d9398c159bbcd06aae6c6742
| 20,547
|
def quickSortIntersection(dataList, keyList, discardList):
"""
quickSortIntersection recursively sorts the list of values usinga
quick sort algorithm.
"""
if len(keyList) <= 1:
return keyList
else:
lessData = []
lessKey = []
moreData = []
moreKey = []
pivot = dataList[-1]
kpivot = keyList[-1]
for i in range(len(dataList) - 1):
if keyList[i] not in discardList:
if dataList[i] <= pivot:
lessData.append(dataList[i])
lessKey.append(keyList[i])
else:
moreData.append(dataList[i])
moreKey.append(keyList[i])
return quickSortIntersection(lessData, lessKey, discardList) + [kpivot] + quickSortIntersection(moreData, moreKey, discardList)
|
a92bdd1fc0a380c529fab435eb4c63a24aea2c24
| 20,548
|
import logging
def default_end_point(message):
"""
All Requests whose request type is not determined are routed to this method
Primary function is to log the request received
:param request: request received by the server
:type request: `Message`
:param response: response to send back
:type response: `Message`
:param router: router associated with the request
:type router: `Router`
:param kwargs: Dictionary of model-specific arguments.
:type kwargs: `dict`
"""
# more detailed logging should be done on request message
logging.info(message.__dict__)
message.set_data({'status': 'error'})
return message
|
03986710696281da3b51c26d6f477ce12016add5
| 20,550
|
import glob
def is_file_exists(file_name: str) -> bool:
"""
Checks if a file exists.
:param file_name: file name to check
:return: True, if the file exists, False otherwise
"""
return len(glob.glob(file_name)) == 1
|
7e8da1f544d40d53f9329e4453e198022330f01c
| 20,551
|
def fix_lon(lon):
"""
Fixes negative longitude values.
Parameters
----------
lon: ndarray like or value
Input longitude array or single value
"""
if lon < 0:
lon += 360.
return lon
|
ab11dca9279399179242537c86cf0af85eedb60e
| 20,552
|
def is_unique_chars_v3(str):
"""
If not allowed to use additional data structures, we can compare every character of the string to every other
character of the string.
This will take 0(n ** 2) time and 0(1) space
"""
for char1 in str:
occurrence = 0
for char2 in str:
if char1 == char2:
occurrence += 1
if occurrence > 1:
return False
return True
|
5c524ffa29b7cdc9d43619da7b299ae0f90d443c
| 20,554
|
def split_canonical(canonical):
"""
Split a canonical into prefix and suffix based on value sign #
:param canonical: the canonical to split
:return: prefix and suffix
"""
if '#' not in canonical:
return canonical, ''
if canonical.startswith('#'):
return '', canonical[1:].strip()
return list(map(lambda x: x.strip(), canonical.split('#')))
|
1a3f7e17cfcc9fa88d63d72fced5435c92f2ea10
| 20,555
|
def decode_content(content, encoding=None):
"""
解码内容为文本
:param content: 二进制内容
:param encoding: 编码
:return: 文本
"""
try:
if encoding is not None:
return content.decode(encoding)
return content.decode('utf-8')
except Exception:
try:
return content.decode('utf-8')
except Exception:
try:
return content.decode('gbk')
except Exception as e:
raise e
|
b257a3c925cb9474ee4842a6fa063a92f72c6cd7
| 20,556
|
import argparse
def build_arg_parser():
"""
:return: argparse.ArgumentParser() filled with the standard arguments for a training session.
Might need to be enhanced for some models.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='./data/multitask_dataset.pkl', help='Data path.')
parser.add_argument('--gpu', type=int, help='Id of the GPU')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--only_nodes', action='store_true', default=False, help='Evaluate only nodes labels.')
parser.add_argument('--only_graph', action='store_true', default=False, help='Evaluate only graph labels.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=3000, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-6, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--patience', type=int, default=1000, help='Patience')
parser.add_argument('--loss', type=str, default='mse', help='Loss function to use.')
parser.add_argument('--print_every', type=int, default=5, help='Print training results every')
return parser
|
eb9552b23407506ba95ed55c15337a8840013d83
| 20,559
|
import re
def regularize_number(item):
"""
transform the item into hexadecimal like "0xff"
:param item:
:return:
"""
item = "".join(filter(str.isalnum, item)) # remove the non-number and non-alpha
item = re.sub(r'[g-z]|[G-Z]', "", item) # remove the illegal alpha except for (a-f,A-F)
if 2 < len(item):
item = item[:2]
elif 0 == len(item):
item = "00"
else:
item = "0" + item
item = "0x" + item
return item
|
809fec6708f4e7c8e43fda2c533bfbe49bc4ac83
| 20,560
|
import six
def force_bytes(s):
"""Force to a bytes type (not unicode)"""
if issubclass(type(s), six.binary_type):
return s
if issubclass(type(s), six.text_type):
return s.encode("utf-8")
return ValueError(s)
|
4dc6216a0078268e49557d79f083a18031273c45
| 20,561
|
from typing import Dict
def polymerize(template: str, rules: Dict[str, str]) -> str:
"""Polymerize a new polymer according to the given `template` and `rules`"""
new_polymer: str = ""
for i in range(len(template) - 1):
pair = template[i : i + 2]
insertion = rules[pair]
new_polymer += pair[0] + insertion
# The last character is missing at this point, so add it
new_polymer += template[-1]
return new_polymer
|
8cfbf233f2fc31f1326c0ab6e4676e5ad0a725ab
| 20,562
|
def _format_yaml_load(data):
"""
Reinsert '\n's that have been removed fom comments to make file more readable
:param data: string to format
:return: formatted string
"""
# ptr = 0
# cptr = data[ptr:].find('comment: ')
data = data.replace("\n", "\n\n")
return data
|
499cd37a75d9badb4ba3bc853a6152036bc2e66b
| 20,564
|
def TransformName(r, undefined=''):
"""Returns a resorce name from an URI.
Args:
r: JSON-serializable object.
undefined: Returns this value if the resource cannot be formatted.
Returns:
A project name for selfLink from r.
"""
if r:
try:
return r.split('/')[-1]
except AttributeError:
pass
return undefined
|
6dafaa2b3f0e187fc9bc9238e3a7a06a895675fd
| 20,565
|
def get_username(request):
"""Returns the username from request."""
# Retrieve the username either from a cookie (when logging out) or
# the authenticated user.
username = "not-login"
if hasattr(request, "user"):
username = request.user.username
if request.session.get('staff', False):
username = "%s(*)" % username
return username
|
cdd19d9715c20a4bc7f20be9b53926b87be671da
| 20,566
|
import hashlib
def get_methods():
"""
Returns a list of methods to use while hashing, along with
constructers for the hashing algorithm
"""
return [
{
"algorithm": "md5",
"hasher": hashlib.md5()
}, {
"algorithm": "sha1",
"hasher": hashlib.sha1(),
}, {
"algorithm": "sha2_224",
"hasher": hashlib.sha224()
}, {
"algorithm": "sha2_256",
"hasher": hashlib.sha256()
}, {
"algorithm": "sha2_384",
"hasher": hashlib.sha384()
}, {
"algorithm": "sha2_512",
"hasher": hashlib.sha512()
}, {
"algorithm": "sha3_224",
"hasher": hashlib.sha3_224()
}, {
"algorithm": "sha3_256",
"hasher": hashlib.sha3_256()
}, {
"algorithm": "sha3_384",
"hasher": hashlib.sha3_384()
}, {
"algorithm": "sha3_512",
"hasher": hashlib.sha3_512()
}, {
"algorithm": "blake2b",
"hasher": hashlib.blake2b()
}, {
"algorithm": "blake2s",
"hasher": hashlib.blake2s()
}
]
|
df655768d4680d211512a95bfc0b27ac0114a82c
| 20,567
|
def extractTitles(df):
"""EXTRACT title column with Mr,Mrs,Miss,Master or rare
Note: this function was adapted from an idea posted online. Please see README file for reference [2]"""
df['Title'] = df.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
#Collapse less frequent titles into larger groups for easier comparison
df['Title'] = df['Title'].replace(
['Capt', 'Col', 'Countess', 'Lady', 'Don', 'Dona', 'Dr', 'Major', 'Jonkheer', 'Rev', 'Sir'], 'Rare')
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
return df.drop(['Name'], axis=1)
|
24a121688d783733f969be10ea470159e3becd55
| 20,568
|
def west_valley(parcels):
"""
Dummy for presence in West Valley.
"""
in_wv = parcels['mpa'].isin([
'AV', 'BU', 'EL', 'GB', 'GL', 'GO', 'LP', 'PE', 'SU', 'TO', 'WI', 'YO'
])
return (parcels['is_MC'] & in_wv).astype(int)
|
fec326f82b21acb0cab670904b76096f84445e4d
| 20,569
|
import os
from pathlib import Path
def get_data_home():
"""
DATA_HOME is determined using environment variables.
The top priority is the environment variable $DICODILE_DATA_HOME which is
specific to this package.
Else, it falls back on XDG_DATA_HOME if it is set.
Finally, it defaults to $HOME/data.
The data will be put in a subfolder 'dicodile'
"""
data_home = os.environ.get(
'DICODILE_DATA_HOME', os.environ.get('XDG_DATA_HOME', None)
)
if data_home is None:
data_home = Path.home() / 'data'
return Path(data_home) / 'dicodile'
|
8339be48914e3fc23b688404e7c375159f526d82
| 20,571
|
import sys
def sanitized_argv(cli_args=None):
"""
Return a list of arguments where -- may have been inserted.
By default argparse gets confused with commands like the following:
$ remote foo --yay bar
The '--yay' argument should be sent to the "foo" command, but argparse
interprets it as a "remote" argument. So here we iterate over the
arguments, and insert "--" as soon as we encounter the first non "-"
argument.
"""
cli_args = sys.argv[1:] if not cli_args else cli_args # skip the exe name
if "--" in cli_args:
return cli_args
args = []
for arg in cli_args:
if "--" not in args and not arg.startswith("-"):
args.append("--")
args.append(arg)
return args
|
008924908784339e2149253b82deef898feedc05
| 20,573
|
from typing import Dict
def _run_compatibility_patches(json_data: Dict) -> Dict:
"""Patch the incoming JSON to make it compatible.
Over time the structure of the JSON information used to dump a workflow
has changed. These patches are to guarantee that an old workflow is
compliant with the new structure. The patches applied are:
1. Change action.target_url from None to ''
:param json_data: Json object to process
:return: Modified json_data
"""
# Target_url field in actions should be present an empty by default
for action_obj in json_data['actions']:
if action_obj.get('target_url') is None:
action_obj['target_url'] = ''
return json_data
|
e00776cfb89499fbac71cf867830fc00631ac600
| 20,575
|
def unsplit_to_tensors(tuples):
"""Get original tensor list from list of tensor tuples.
Args:
tuples: list of tensor tuples.
Returns:
original tensor list.
"""
return [t for tup in tuples for t in tup]
|
b5df94421ade286ef5ff9bb4c422c5201babe36f
| 20,577
|
def getLivenessPoints(liveness):
"""
histogram points for the liveness plot. It will be used for a plot like:
^
| *
| * * *
| ** ***
| *********
+-------------->
schedule index.
For example, if the livenesses are [3,5], the points will be,
[[0,3],[1,3],[1,5],[2,5]]
The points are connected alternatively with horizontal and vertical lines.
"""
xs = []
ys = []
for op in range(len(liveness)):
if op == 0:
xs.append(0)
else:
xs.append(xs[-1])
xs.append(xs[-1] + 1)
ys.append(liveness[op])
ys.append(liveness[op])
assert len(xs) == len(ys)
assert len(xs) == 2 * len(liveness)
return xs, ys
|
84dac2f0b29c957727354dd06863820c2e56a866
| 20,578
|
from urllib.parse import urlencode
def oauth_url(client_id, permissions=None, server=None, redirect_uri=None):
"""A helper function that returns the OAuth2 URL for inviting the bot
into servers.
Parameters
-----------
client_id : str
The client ID for your bot.
permissions : :class:`Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
server : :class:`Server`
The server to pre-select in the authorization screen, if available.
redirect_uri : str
An optional valid redirect URI.
"""
url = 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id)
if permissions is not None:
url = url + '&permissions=' + str(permissions.value)
if server is not None:
url = url + "&guild_id=" + server.id
if redirect_uri is not None:
url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri})
return url
|
bf7ca1957153ff938334927744804891010c0c26
| 20,579
|
def extract_locations_and_object_types(
object_info,
classification=False,
object_types_format="name"
):
"""
:param object_info:
:param classification:
:param object_types_format:
:return:
"""
locations = [obj.location for obj in object_info]
if classification:
if object_types_format == "id":
obj_types = [(obj.d_class_id, obj.c_class_id) for obj in object_info]
elif object_types_format == "name":
obj_types = [obj.class_name for obj in object_info]
else:
raise ValueError("Unsupported object type format")
else:
obj_types = None
return locations, obj_types
|
75d7baf7f7235c485732f5f2183719599e25004b
| 20,580
|
def git(orig): # pragma: no cover
""" most git commands play nicer without a TTY """
cmd = orig.bake(_tty_out=False)
return cmd
|
9c14f8ff4bd7a74112e2400af02657ac49ffa2bd
| 20,582
|
def get_menu_item(menu):
"""
Asks user to choose a menu item from one of the menu
dictionaries defined above
Args:
(dictionary) menu - dict of menu items
Returns:
(str) selection - key of menu item chosen
"""
while True:
print('------------')
print('Menu Options')
print('------------')
options = list(menu.keys())
options.sort()
for entry in options:
print( entry, menu[entry] )
selection = input("Please Select: ")
# in case X entered for exit
if selection.isupper():
selection = selection.lower()
if selection in options:
# print(menu[selection])
break
else:
print( "Unknown Option Selected!" )
return selection
|
603ed07bdd7b51d9539cb0251f42e0affc1001cf
| 20,584
|
def _sharded_checkpoint_pattern(process_index, process_count):
"""Returns the sharded checkpoint prefix."""
return f"shard-{process_index:05d}-of-{process_count:05d}_checkpoint_"
|
67e0b91b8b3ac9d5c69ec662f6c7931c90a79225
| 20,587
|
def size(self, dim=None):
"""
Size function
"""
if dim is not None:
return self.shape[dim]
else:
return self.shape
|
fac1bde25485088f2aab93a4b27e249f694762b1
| 20,588
|
def parse_arg(arg):
"""
Parses arguments for convenience. Argument can be a
csv list ('a,b,c'), a string, a list, a tuple.
Returns a list.
"""
# handle string input
if type(arg) == str:
arg = arg.strip()
# parse csv as tickers and create children
if ',' in arg:
arg = arg.split(',')
arg = [x.strip() for x in arg]
# assume single string - create single item list
else:
arg = [arg]
return arg
|
45154fbdd9b6ecfafebbee0ec47a6185799a773a
| 20,589
|
from typing import Dict
def tags_string(tags: Dict[str, str]) -> str:
"""
tags_string generates datadog format tags from dict.
"""
s = ''
for k in tags:
s += f'{k}:{tags[k]},'
return s.rstrip(',')
|
203243c2960bb4fb75a832bc014484aa5b9dec9c
| 20,590
|
def _process_cdf_array(cdf_array, process_type="long"):
"""Reshapes a CDF array for training/plotting.
Converts shape of cdf array between (n_eval, n_obs) (wide format)
and (n_eval*n_obs,) (long format).
Args:
cdf_array: (np.ndarray) Input cdf_array in long / wide format.
shape (n_eval, n_obs) if wide, or (n_eval*n_obs, ) if long.
process_type: (str) Should be `long`.
Returns:
(np.ndarray) cdf array in converted shape.
"""
if process_type == 'long':
return cdf_array.flatten()
|
2a2b9b5b038b23f4668c9ac25b3671aeb4f6d1c4
| 20,591
|
from typing import Optional
from typing import Set
def user_string(
prompt: str, allow_empty: bool = False, disallowed_values: Optional[Set[str]] = None
) -> str:
"""Get a string from the user"""
value = input(f"{prompt} ").strip()
while True:
if disallowed_values is not None and next(
(True for c in value if c in disallowed_values), False
):
print(f"Invalid input, must not contain: {repr(list(disallowed_values))}.")
elif not value and not allow_empty:
print(f"Input must not be empty.")
else:
return value
value = input(f"{prompt} ").strip()
|
11f303242500db99828e8c6edb1f9543baaee3f7
| 20,592
|
def if_elif_else(value, condition_function_pair):
"""
Apply logic if condition is True.
Parameters
----------
value : anything
The initial value
condition_function_pair : tuple
First element is the assertion function, second element is the
logic function to execute if assertion is true.
Returns
-------
The result of the first function for which assertion is true.
"""
for condition, func in condition_function_pair:
if condition(value):
return func(value)
|
ff2e5315e3bf7ad3e8fa23941e17d7052d6e3ebc
| 20,593
|
import functools
def compose(*functions):
"""Compose a list of function to one."""
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions,
lambda x: x)
|
7173b623c587a08d43e097a79cabb3f861026a4d
| 20,594
|
from typing import List
def nest_list(inp: list, col_cnt: int) -> List[list]:
"""Make a list of list give the column count."""
nested = []
if len(inp) % col_cnt != 0:
raise ValueError("Missing value in matrix data")
for i in range(0, len(inp), col_cnt):
sub_list = []
for n in range(col_cnt):
try:
sub_list.append(inp[i+n])
except IndexError:
break
nested.append(sub_list)
return nested
|
6d3ca2e2d4cb61d68279ffad773831a7ba203eae
| 20,595
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.