content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def resource_name_for_asset_type(asset_type):
"""Return the resource name for the asset_type.
Args:
asset_type: the asset type like 'google.compute.Instance'
Returns:
a resource name like 'Instance'
"""
return asset_type.split('.')[-1] | 00604d1285e8e275976a026aaf0e7afb200ba1c8 | 35,667 |
def get_merged_gaps(gaps):
"""Get gaps merged across channels/streams
Parameters
----------
gaps: dictionary
contains channel/gap array pairs
Returns
-------
array_like
an array of startime/endtime arrays representing gaps.
Notes
-----
Takes an dictionary of gaps, and merges those gaps across channels,
returning an array of the merged gaps.
"""
merged_gaps = []
for key in gaps:
merged_gaps.extend(gaps[key])
# sort gaps so earlier gaps are before later gaps
sorted_gaps = sorted(merged_gaps, key=lambda gap: gap[0])
# merge gaps that overlap
merged_gaps = []
merged_gap = None
for gap in sorted_gaps:
if merged_gap is None:
# start of gap
merged_gap = gap
elif gap[0] > merged_gap[2]:
# next gap starts after current gap ends
merged_gaps.append(merged_gap)
merged_gap = gap
elif gap[0] <= merged_gap[2]:
# next gap starts at or before next data
if gap[1] > merged_gap[1]:
# next gap ends after current gap ends, extend current
merged_gap[1] = gap[1]
merged_gap[2] = gap[2]
if merged_gap is not None:
merged_gaps.append(merged_gap)
return merged_gaps | 59c6c04ca20800040eaa2a4909708b4880fcb11f | 35,668 |
def get_lrmost(T, segs):
"""
Finds the leftmost and rightmost segments of segs in T
"""
l = []
for s in list(T):
if s in segs:
l.append(s)
if len(l) < 1:
return None, None
return l[0], l[-1] | a0af5055292b3253cd27300181e7aee9e0b9653a | 35,669 |
def overall_dwelling_dimensions(
area,
average_storey_height
):
"""Calculates the overall dwelling dimensions, Section 1.
:param area: A list of the areas of each floor.
The first item is the basement, the second the ground floor etc.
See (1a) to (1n).
:type area: list (float)
:param average_storey_height: A list of the average storey height of each floor.
The first item is the basement, the second the ground floor etc.
See (2a) to (2n).
:type average_storey_height: list (float)
:return: A dictionary with keys (volume,total_floor_area,dwelling_volume).
- **volume** (`list` (`float`)) - A list of the volumes of each floor.
The first item is the basement, the second the ground floor etc.
See (3a) to (3n).
- **total_floor_area** (`float`) - See (4).
- **dwelling_volume** (`float`) - See (5).
:rtype: dict
"""
# calculate volume
volume=[]
for i in range(len(area)):
volume.append(area[i] * average_storey_height[i])
# calculate total floor area
total_floor_area=sum(area)
# calculate dwelling volume
dwelling_volume=sum(volume)
return dict(volume=volume,
total_floor_area=total_floor_area,
dwelling_volume=dwelling_volume) | 72408ae8c0aa782b6a6b2f86dd5635f9c9e984c9 | 35,670 |
def speedx(clip, factor = None, final_duration=None):
"""
Returns a clip playing the current clip but at a speed multiplied
by ``factor``. Instead of factor one can indicate the desired
``final_duration`` of the clip, and the factor will be automatically
computed.
The same effect is applied to the clip's audio and mask if any.
"""
if final_duration:
factor = 1.0* clip.duration / final_duration
newclip = clip.fl_time(lambda t: factor * t, apply_to=['mask', 'audio'])
if clip.duration is not None:
newclip = newclip.set_duration(1.0 * clip.duration / factor)
return newclip | e6bf9595bc958cd0fc39bab95bef31fe6fba6ba6 | 35,672 |
def check_groups(dashboard, request):
"""
Check groups authorization for a dashboard
"""
if dashboard.public is True:
return True
if request.user.is_superuser is True:
return True
groups = dashboard.groups.all()
user_groups = request.user.groups.all()
for group in groups:
if group in user_groups:
return True
return False | e4ab46d443f3cbb1513242101792de4d411d993e | 35,673 |
def binarize_worker(document):
"""Binarizes a BOW document.
Parameters
----------
document : list of (int, float)
A document.
Returns
-------
binarized_document : list of (int, float)
The binarized document.
"""
binarized_document = [(term_id, 1) for term_id, _ in document]
return binarized_document | eadf535337dec095b9f287b422541fe5de8ae932 | 35,674 |
import networkx as nx
def split_graph(G):
"""splits graph(s) on interactions and return a dictionary of graphs with interaction as keys."""
# Find all interactions to split the graph on
split_by = "interaction"
split_list = list()
for u, v, d in G.edges(data=True):
split_list.append(d[split_by])
split_set = set(split_list)
G_splits = dict()
for split in split_set:
G_split = nx.from_edgelist(
[(u, v, d) for u, v, d in G.edges(data=True) if d[split_by] == split],
create_using=nx.MultiDiGraph())
G_split.add_nodes_from(G.nodes(data=True))
G_splits[split] = G_split
return G_splits | 4ac596e472afb57cb3f6094fb0f4166aca4b1a2a | 35,675 |
def CheckForNewSkipExpectations(input_api, output_api):
"""Checks for and dissuades the addition of new Skip expectations."""
new_skips = []
expectation_file_dir = input_api.os_path.join(input_api.PresubmitLocalPath(),
'gpu_tests',
'test_expectations')
file_filter = lambda f: f.AbsoluteLocalPath().startswith(expectation_file_dir)
for affected_file in input_api.AffectedFiles(file_filter=file_filter):
for _, line in affected_file.ChangedContents():
if input_api.re.search(r'\[\s*Skip\s*\]', line):
new_skips.append((affected_file, line))
result = []
if new_skips:
warnings = []
for affected_file, line in new_skips:
warnings.append(' Line "%s" in file %s' %
(line, affected_file.LocalPath()))
result.append(
output_api.PresubmitPromptWarning(
'Suspected new Skip expectations found:\n%s\nPlease only use such '
'expectations when they are strictly necessary, e.g. the test is '
'impacting other tests. Otherwise, opt for a '
'Failure/RetryOnFailure expectation.' % '\n'.join(warnings)))
return result | dcd4dceb38fc745f10c58980fb56cf8730ee3e03 | 35,676 |
def test_trainable_parameters_changed(trainer):
"""Performs a training step and verifies that at least one parameter
in every trainable layer has changed."""
print("At least one parameter changed in the trainable layers", end="")
passed, msg = True, ""
trainer.fit(epochs=1, max_steps=1, max_eval_steps=0)
for name, new_param in trainer.model.named_parameters():
if new_param.requires_grad:
old_param = trainer._saved_state["model"][name]
if not (new_param.data != old_param).any():
msg += " expected changes in: %s\n" % name
passed = False
return passed, msg | 29a0b0ffab55b62e9cb5ff5d4b29f1b655e99ad9 | 35,677 |
def get_free_space_network(free_nets, used_nets):
"""Return list of free subnets."""
for excluded_net in used_nets:
temp_net_list = list(free_nets)
free_nets = []
while temp_net_list:
temp_net = temp_net_list.pop()
used_nets = []
try:
used_nets = list(temp_net.address_exclude(excluded_net))
except ValueError:
used_nets = [temp_net]
pass
free_nets.extend(used_nets)
free_nets.sort()
return free_nets | 4b1e59bf9afbb8adf37db250248619824320af37 | 35,678 |
def get_game_name(filename: str):
"""
retrieves game name from pbp file
:param filename: path to pbp file
:return: game name or False
"""
gamename = b''
with open(filename, 'rb') as eboot:
eboot.seek(0x24)
pbp_verification_offset = eboot.read(4)
eboot.seek(int.from_bytes(pbp_verification_offset, "little"))
pbp_verification_string = eboot.read(8)
# check the bytes for information that confirms the pbpfile is from a psx game
# PSISOIMG is for single disc games and PSTITLEI is for multi-disc games
# based on evertonstz's contributions
if pbp_verification_string == b'PSISOIMG' or pbp_verification_string == b'PSTITLEI':
eboot.seek(0x358)
while True:
current_byte = eboot.read(1)
if current_byte == b'\x00':
break
else:
try:
gamename += current_byte
except UnicodeDecodeError:
break
else:
return False
gamename = gamename.decode()
if len(gamename) > 31:
return gamename.replace(' ', '')[:21].replace('\x00', '').replace(':', '')
else:
return gamename.replace('\x00', '') | 379e46d427feabba3f33dd3757ce34c34ff6780c | 35,679 |
def sbt(bees):
"""Sanitize bees types"""
return [tuple(b) for b in bees] | 6410e7aa4b8f7a575041a8d0d02d29ba1b06feaf | 35,680 |
def i_simple(r,m):
"""
variables:
r=nominal interest rate
m=number of periods in a year
Ex. annually = 1
semianually = 2
quaterly = 4
"""
return r/m | df534b93e2fed16de099697217ad79b36a796c01 | 35,681 |
def strategy(history, memory):
"""
Tit for Tat, but it only defects once in a row.
"""
choice = 1
if (
history.shape[1] >= 1
and history[1, -1] == 0
and memory is not None
and 1 == memory
):
choice = 0
return choice, choice | ec4a016acd66374c56b57bbeb1d0e54813edd829 | 35,682 |
def _pixel_to_coords(col, row, transform):
"""Returns the geographic coordinate pair (lon, lat) for the given col, row, and geotransform."""
lon = transform[0] + (col * transform[1]) + (row * transform[2])
lat = transform[3] + (col * transform[4]) + (row * transform[2])
return lon, lat | f610340b5eb2ea652774d753920076f59a6faccd | 35,684 |
def round_to_factor(num: float, base: int) -> int:
"""Rounds floating point number to the nearest integer multiple of the
given base. E.g., for floating number 90.1 and integer base 45, the result
is 90.
# Attributes
num : floating point number to be rounded.
base: integer base
"""
return round(num / base) * base | 3679b884523253cec51659ceaf6f78292f87a401 | 35,685 |
def quaternary_spherical(rho, phi):
"""Zernike quaternary spherical."""
return 252 * rho**10 \
- 630 * rho**8 \
+ 560 * rho**6 \
- 210 * rho**4 \
+ 30 * rho**2 \
- 1 | 7a7e211ae02d024955087a5da4f1b4e4c0b5de3c | 35,686 |
def get_doc_content(content):
"""
Return the doc fields from request (not auth-related fields).
"""
return {
"title": content.get("title"),
"link": content.get("link"),
"tags": content.get("tags"),
"authors": content.get("authors"),
"year": content.get("year"),
"notes": content.get("notes"),
"read": content.get("read"),
} | ffd5037aee65dcc1cc104e8fc47df708fd5e7c64 | 35,687 |
def join_strings(lst):
"""Join a list to comma-separated values string."""
return ",".join(lst) | fbbc195a53d2d4b15012861251d676dbf9369cde | 35,688 |
def l_system(depth, axiom, **rules):
"""Generate L-system from axiom using rules, up to given depth"""
if not depth:
return axiom
# Basic, most straight-forward implementation
# Note 1: it doesn't matter if axiom is a string or a list
# Note 2: consider the difference between .extend() and .append()
out = []
for char in axiom:
if char in rules:
out.extend(rules[char])
else:
out.append(char)
return l_system(depth - 1, out, **rules) | ed59af25848c5074be7b1da1a796dcdb0422e87e | 35,689 |
def call_with_ensured_size(method, max_size, arg):
"""
Breaks a list of arguments up into chunks of a maximum size and calls the given method on each chunk
Args:
method (function): the method to call
max_size (int): the maximum number of arguments to include in a single call
arg (any | list<any>): the arguments to split up
Returns:
list<any> | dict<any>: the combined results of the function calls on each chunk
"""
if not isinstance(arg, list) or len(arg) <= max_size:
return method(arg)
results = method(arg[0:max_size])
i = max_size
if isinstance(results, list):
while(i < len(arg)):
sublist = arg[i:i + max_size]
try:
results = results + method(sublist)
except:
results = results + ([None] * len(sublist))
i += max_size
elif isinstance(results, dict):
while(i < len(arg)):
sublist = arg[i:i + max_size]
try:
results.update(method(sublist))
except:
pass
i += max_size
return results | e31dfbd03de5e4caabb46241957b98d1b76eee1a | 35,690 |
import numpy
def gtsolh(a, b):
"""Starting solution."""
c = 0.95 * b
for _ in range(5):
gamma = b / a
kappa = c / b
k2 = kappa ** 2
gk2 = (gamma * kappa) ** 2
fac1 = numpy.sqrt(1.0 - gk2)
fac2 = numpy.sqrt(1.0 - k2)
fr = (2.0 - k2) ** 2 - 4.0 * fac1 * fac2
frp = -4.0 * (2.0 - k2) * kappa
frp += 4.0 * fac2 * gamma * gamma * kappa / fac1
frp += 4.0 * fac1 * kappa / fac2
frp /= b
c -= fr / frp
return c | f8b4c7081262a49848572222a2ba6c5501e18127 | 35,691 |
def safe_str(maybe_str):
"""To help with testing between python 2 and 3, this function attempts to
decode a string, and if it cannot decode it just returns the string.
"""
try:
return maybe_str.decode('utf-8')
except AttributeError:
return maybe_str | f9713f98b4d32558855d46d82c2f5cc9bce568ee | 35,692 |
import os
def _get_win_drives():
"""Returns a list of paths for all available drives e.g. ['C:\\']"""
assert os.name == "nt"
drives = [letter + u":\\" for letter in u"CDEFGHIJKLMNOPQRSTUVWXYZ"]
return [d for d in drives if os.path.isdir(d)] | 9ef95f3dca0acce1a370d39a671210187da6b139 | 35,693 |
def args_priority(args, environ):
"""
priority of token
1) as argumment: -t
2) as environ variable
priority of as_user
1) as argument: -a
2) as environ variable
"""
arg_token = args.token
arg_as_user = args.as_user
slack_token_var_name = "SLACK_TOKEN"
if slack_token_var_name in environ.keys():
token = environ[slack_token_var_name]
else:
token = None
if arg_token:
token = arg_token
# slack as_user
slack_as_user_var_name = "SLACK_AS_USER"
as_user = bool(environ.get(slack_as_user_var_name))
if arg_as_user:
as_user = True
return token, as_user, args.channel | b2e0a114c477ae9e09aa8beb2516bfc7f5c60c8a | 35,695 |
def get_snapshot(ec2, snapshot_name: str):
"""Returns a snapshot by its name."""
res = ec2.describe_snapshots(Filters=[
{'Name': 'tag:Name', 'Values': [snapshot_name]},
])
if len(res['Snapshots']) > 1:
raise ValueError('Several snapshots with Name=%s found.' % snapshot_name)
snapshot = res['Snapshots'][0] if len(res['Snapshots']) else {}
return snapshot | cd6076e04fd2c83e50b3a0ac7bc117793c92731d | 35,697 |
import uuid
def get_file_name():
"""
"""
return uuid.uuid4().hex | 7495447e9ec49536db1bb9c8cda9f822f2e71b90 | 35,698 |
from datetime import datetime
def get_datetime_from_timestamp(timestamp):
"""Return datetime from unix timestamp"""
try:
return datetime.fromtimestamp(int(timestamp))
except:
return None | 553d24a532681d139845777cd71ca54fc3e16f96 | 35,701 |
def mock_athlete():
"""Mock athlete."""
athlete = {
"first_name": "Test",
"last_name": "USER",
"yearborn": 1900,
}
return athlete | 1d8d911cd889c1b728104475dcbfff14737b603f | 35,702 |
def cns_representer(dumper, data):
"""
Represents Configuration as dict
Parameters
----------
dumper :
YAML dumper object
data :
ConfigurationNameSpace object
Returns
-------
yaml dumper representation of Configuration as dict
"""
return dumper.represent_dict(dict(data)) | a4586b205e968d493854d2c37c3bd39ab3c96c6b | 35,703 |
import zlib
def compress(data):
"""
info: will compress data
:param data: bytes
:return: bytes
"""
return zlib.compress(data) | 23039a3382c10bc6b4cb15325443aa5b9e816028 | 35,704 |
def get_pubmed_id_for_doc(doc_id):
"""Because our doc_id is currently just the PMID, and we intend to KEEP it this way, return the doc_id here"""
return doc_id | a1d9241406b9655553d008ff7839c1f2b730f994 | 35,705 |
def generate_onehot_dict(word_list):
"""
Takes a list of the words in a text file, returning a dictionary mapping
words to their index in a one-hot-encoded representation of the words.
"""
word_to_index = {}
i = 0
for word in word_list:
if word not in word_to_index:
word_to_index[word] = i
i += 1
return word_to_index | 1408cfbf8360134b4d9a95165e693a972cc4f4e3 | 35,706 |
def get_rp_dict(data, context=None):
"""
Get a label and value dictionary from the request POST
"""
all_dict = {}
for key, item in data["results"].items():
"""
All Responses is a category base for responses that are free text
and not options. If not all All Responses, the label must take
the value of the chosen category base.
"""
if item["category"] == "All Responses":
all_dict[key] = item["value"]
else:
all_dict[key] = item["category"]
if context == "entrychanges":
final_dict = {}
final_dict[all_dict["change_category"]] = all_dict["new_value"]
if "patient_id" in all_dict:
final_dict["patient_id"] = all_dict["patient_id"]
return final_dict
else:
return all_dict | ffeab44e031e6237ce4da69aecbad0d68d082a9c | 35,708 |
def get_sale_location(match):
"""Convert address strings into a list of address components."""
sale = {
'consignor_city': match.group('city').strip(),
'consignor_state': match.group('state'),
}
return sale | 803bd7f9375efdf8695f787d781faf2dc0deef25 | 35,713 |
def valid_file(path: str) -> bool:
"""
Check if regressi file is valid
:param path: path to the file to test
:return: whether the file is valid or not
"""
with open(path, 'r') as file:
if file.readline() == "EVARISTE REGRESSI WINDOWS 1.0":
return False
else:
return True | 81e407fd24a830b32a0ba55bb4e912a2d32d61ef | 35,714 |
import os
def get_short_name_with_ext(path):
""" get file short name without ext, for example: "c:/1.txt" will return "1.txt" """
return os.path.basename(path) | b260d7d9e57b4dd6398be6383eef0f4e0bbec708 | 35,715 |
import time
def _date_to_posix(date):
""" date: datetime.datetime object """
posix = time.mktime(date.timetuple())
return str(int(posix)) | e41a7d10b3aa9d4ca35b7f72d09ae284cc5ca95e | 35,717 |
def owners_to_string(owners, queryset=False):
"""
Args:
owners: [{"user": {"username": "jack"}}, ...] OR
[models.Profile] (if queryset=True)
Returns:
['<Profile.user.username', ...]
"""
if queryset:
new_owners = [i.user.username for i in owners]
else:
new_owners = [i['user']['username'] for i in owners]
return str(sorted(new_owners)) | 8463b36dfd1cf34af9d6d62ab2a1843247e048cc | 35,718 |
import re
def alphanum_key(s):
""" Key func for sorting strings according to numerical value. """
return [int(c) if c.isdigit() else c for c in re.split('([0-9]+)', s)] | c9147a41cad775700db280e92efe500fb6d8469e | 35,719 |
def get_something():
"""A session scope fixture."""
return "Bubu was here" | 54361255ffa6fe6a48b2db00a70a309d4bab0e98 | 35,720 |
from typing import Any
import json
import yaml
def repr_arg_value(val: Any) -> str:
"""Return the value's string representation.
The representation can be used by `libwampli.parse_arg_value` to retrieve
the value.
"""
try:
return json.dumps(val)
except ValueError:
return yaml.safe_dump(val) | 82508250cc46d2ef21af08d68024b224b290f27f | 35,721 |
def var_name_from_file_name(name):
"""var name is camelCase, file name is snake_case"""
no_ext = name[:-3]
if '_' not in no_ext:
return no_ext
segments = no_ext.split('_')
return ''.join([segments[0]] + [x.capitalize() or '_' for x in segments[1:]]) | 8d547f568fd0d7ff76876cb9592b3b2646aa6023 | 35,722 |
import os
def get_vcf_paths_hs37d5(phased, chroms = None, sample = None):
""" Get list of paths for HS37D5 vcfs. If not phased, then a whole-genome vcf will be returned. If phased
and chromosomes provided, a list of phased vcfs are returned. If sample is HG002, a single GIAB whole-genome phased
vcf is returned no matter what.
"""
if sample == 'HG002':
out_vcfs = ['ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/release/AshkenazimTrio/HG002_NA24385_son/NISTv3.3.2/GRCh37/HG002_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-22_v.3.3.2_highconf_triophased.vcf.gz']
else:
if not phased:
out_vcfs = ['ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502/ALL.wgs.phase3_shapeit2_mvncall_integrated_v5b.20130502.sites.vcf.gz']
else:
assert chroms
base = 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502/'
out_vcfs = []
for chrom in chroms:
try:
if int(chrom) in range(1, 23):
out_vcfs.append(os.path.join(base, 'ALL.chr{}.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz'.format(chrom)))
except:
pass
if chrom == 'X':
out_vcfs.append(os.path.join(base, 'ALL.chrX.phase3_shapeit2_mvncall_integrated_v1b.20130502.genotypes.vcf.gz'))
elif chrom == 'Y':
out_vcfs.append(os.path.join(base, 'ALL.chrY.phase3_integrated_v2a.20130502.genotypes.vcf.gz'))
assert len(out_vcfs) == len(chroms)
return out_vcfs | 1072fac3301d3a39c60ff45e5eac4ac9ed5152ed | 35,723 |
def handle_command(command, old_service):
"""
1. identify the OLD from the command
{'acked': True,
'id': 'cbcfb97d-6f72-4851-997d-173560c6173d',
'old_id': 'd9d5563d-a29a-46b3-85f2-6a3a1104f7fa'}
"""
return None | f6737829f43e70b22bbb2fd8d2952c8332580f87 | 35,724 |
def format_default_groups(default_groups: list) -> list:
"""
Formats the default groups
to execute in prowler
"""
formatted_groups = []
for group in default_groups:
formatted_group = group.rsplit("_")
formatted_groups.append(formatted_group[1])
return formatted_groups | 065102e96b0ba7e4fb547cc76616dfd55c4a41f2 | 35,725 |
def change_ohlcv_time(df, period):
""" Changes the time period on cryptocurrency ohlcv data.
Period is a string denoted by 'time_in_minutesT'(ex: '1T', '5T', '60T')."""
# Set date as the index. This is needed for the function to run
df = df.set_index(['date'])
# Aggregation function
ohlc_dict = {'open':'first',
'high':'max',
'low':'min',
'close': 'last',
'base_volume': 'sum'}
# Apply resampling
df = df.resample(period, how=ohlc_dict, closed='left', label='left')
return df | e95872b2047f2367bcb69aa411c382ce69cc8d19 | 35,726 |
def value_or_default(value, default):
"""
Returns the supplied value of it is non None, otherwise the supplied default.
"""
return value if value is not None else default | d3fdf1cee4029c5617623e2834a6f05d962b2e94 | 35,727 |
import re
def charge(q_layer,p_layer):
"""
@param q_layer: q layer string
@param p_layer: p layer string
@return: charge
"""
global_charge = 0
if(q_layer != ""):
q_components = q_layer.split(';')
for q_component in q_components:
if(q_component!=""):
(multiplier,charge) = (1,0)
#Match for multiplier
m = re.match('^(\d+)\*(.+)$', q_component)
if m:
multiplier = int(m.group(1))
charge = int(m.group(2))
else:
charge = int(q_component)
global_charge += ( multiplier * charge )
#Proton layers have never had multiple components
#So this is an explicit warning, using it directly
#will raise an error
if(";" in p_layer):
print("Warning: multiple components in mobile proton layer")
#protons have positive charge
if(p_layer != ''):
global_charge += int(p_layer)
return global_charge | 6da9565d3f3bc4ccb6ed064d66a1a0d28242f74f | 35,728 |
import re
def parse_k(msg):
"""Parse create keypoint message and return keypoint number"""
if not re.search(r"KEYPOINT NUMBER", msg):
res = re.search(r"(KEYPOINT\s*)([0-9]+)", msg)
else:
res = re.search(r"(KEYPOINT NUMBER =\s*)([0-9]+)", msg)
if res:
result = int(res.group(2))
else:
result = None
return result | 92f45ea09a7c9e1eecf48664c49218657687a217 | 35,729 |
def confirm(prompt=None, response=False):
"""Prompts for a yes or no response from the user
Arguments
---------
prompt : str, default=None
response : bool, default=False
Returns
-------
bool
True for yes and False for no.
Notes
-----
`response` should be set to the default value assumed by the caller when
user simply types ENTER.
Examples
--------
>>> confirm(prompt='Create Directory?', response=True)
Create Directory? [y]|n:
True
>>> confirm(prompt='Create Directory?', response=False)
Create Directory? [n]|y:
False
>>> confirm(prompt='Create Directory?', response=False)
Create Directory? [n]|y: y
True
"""
if prompt is None:
prompt = 'Confirm'
if response:
prompt = '{} [{}]|{}: '.format(prompt, 'y', 'n')
else:
prompt = '{} [{}]|{}: '.format(prompt, 'n', 'y')
while True:
ans = input(prompt)
if not ans:
return response
if ans not in ['y', 'Y', 'n', 'N']:
print('please enter y or n.')
continue
if ans in ['y', 'Y']:
return True
if ans in ['n', 'N']:
return False | 4e60be0b9973de67ee718eb255c10efdbd7666aa | 35,730 |
import random
def generate_colour():
"""generate random colours"""
#from colorutils import random_web
#return random_web()
r = lambda: random.randint(0,255)
return "#%02X%02X%02X" % (r(), r(), r()) | b7980eeba42b5b14548348f7050111b052be9f1d | 35,731 |
def count_single_char(line):
"""
统计单字词
"""
return sum([1 for word in line if len(word) == 1]) | c26696d29164c98f5a34217596ed9c947350d4d5 | 35,732 |
def READ_RECORD(SFI:int, record:int) -> dict:
"""READ_RECORD(): generate APDU for READ RECORD command
"""
return {'CLA' : '00', 'INS' : 'B2', 'P1' : F"{record:02X}", 'P2' : F"{SFI*8+4:02X}", 'Le' : '00'} | 746e09d86b42eb822b197ebb3ff8f4bb36ad018a | 35,733 |
import os
def get_train_event_log(base_dir):
"""
Args:
base_dir: Path to model training directory
Returns:
(str) Path to train tensorboard data
"""
tb_str = os.path.join(base_dir, 'tensorboard/train/')
return tb_str | 3f8a1f96f8d7e1fcd478d5ead8326ded8b154301 | 35,734 |
import os
def folder_is_hidden(p):
"""
Removes hidden folders from a list. Works on Linux, Mac and Windows
:return: true if a folder is hidden in the OS
"""
if os.name == "nt":
attribute = win32api.GetFileAttributes(p)
return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
else:
return p.startswith(".") | 3b4f15d6a7b656d62d1f3d03dc115193a5ec0a5d | 35,735 |
import random
def get_most_frequent_referent(seen_coref, ref_dict, tok_line):
"""Takes care of indexes that were never instantiated"""
# first check if we have instantiated variables that were never referred to
line = " ".join(tok_line) # put line back
most_freq = ''
score = -1
for item in seen_coref:
if line.count(item) == 1: # index only occurs once - never used as reference
if seen_coref[item] in ref_dict: # if this word in general dict
if ref_dict[seen_coref[item]] > score: # check if it is the most frequent
score = ref_dict[seen_coref[item]]
most_freq = seen_coref[item]
else:
most_freq = seen_coref[item]
score = 0
if score > -1: # if we found one
# index_dict[replace_types[1]] += 1
return most_freq
# else find the most frequent in general
else:
most_freq = ''
score = -1
for item in seen_coref:
if seen_coref[item] in ref_dict: # if this word in general dict
if ref_dict[seen_coref[item]] > score: # check if it is the most frequent
score = ref_dict[seen_coref[item]]
most_freq = seen_coref[item]
if score > -1:
# index_dict[replace_types[2]] += 1
return most_freq # return most frequent referent we saw
else:
# index_dict[replace_types[2]] += 1
rand_key = random.choice(seen_coref.keys()) # if no referents with score, return a random one
return seen_coref[rand_key] | 108220ea3761a18e1f13f7a7c1a158d3e7172d36 | 35,736 |
from typing import List
import subprocess
def _call_pytest(files: List[str]) -> int:
"""
Args:
files (list(str)): The files on which to call pytest
Returns:
The maximum of return codes of calls to pytest on `files`
"""
result = 0
for file_ in files:
cmd = ["pytest", file_]
print(" ".join(cmd))
py_test_result = subprocess.run(cmd, check=False)
result = max(result, py_test_result.returncode)
return result | 7c5f5b6f3666455e315057bdc41cb0fbb047459d | 35,737 |
def is_capitalized(text: str):
""" Given a string (system output etc.) , check whether it is lowercased,
or normally capitalized.
"""
return not text.islower() | 1a1404ea4a4a109711755644c20c6ce19f70b5ae | 35,738 |
def get_trials():
""" Return trials for taskA and B (combined) in trial-time
(equivilant to 3 TRs). """
return [1,1,4,4,6,0,4,2,4,1,0,0,0,0,5,4,5,1,5,5,3,6,6,0,0,2,2,3,
3,1,3,2,4,2,2,6,0,5,3,1,2,2,0,4,3,0,0,6,5,6,6,5,1,0,0,
5,5,2,2,6,2,0,0,0,6,6,5,0,0,0,0,4,6,4,5,6,4,0,0,0,0,3,3,
4,2,5,5,1,0,3,3,1,1,0,6,1,5,3,3,0,4,6,0,0,0,1,2,2,0,5,0,
4,4,4,3,3,2,0,1,0,0,0,4,4,0,0,2,1,6,6,2,4,4,1,1,1,5,4,0,6,
0,3,3,5,5,4,2,1,1,1,6,1,1,0,2,0,0,6,5,6,0,3,4,3,3,6,4,0,6,6,
6,1,1,3,6,3,0,5,5,3,0,0,0,2,2,2,3,2,6,3,5,5,0,1,0,2,5,2,4,1,
4,4,4,0,5,5,6,0,3,4,0,5,0,0,6,1,3,5,0,3,0,0,1,6,3,0,2,2,0,5,
5,2,5,0,1,4,1,2,0,0,3,0,0,1,3,2,1,4,0,3,5,0,0,4,0,5,2,6,
6,2,1,6,0,2,3,3,6,4,4,2] | 7dae3e9f02bc974544795da63e7c680e3a0af811 | 35,739 |
def transform_vertex(u, phi):
"""
Given a vertex id u and a set of partial isomorphisms phi.
Returns the transformed vertex id
"""
for _phi in phi:
if _phi[0] == u:
return _phi[1]
raise Exception('u couldn\' be found in the isomorphisms') | d35d0a819d795098190f8578b115b04231a53902 | 35,740 |
def write_normal(fname,triplets,na,angd,agr):
"""
Write out ADF data in normal ADF format.
"""
outfile= open(fname,'w')
outfile.write('# 1:theta[i], ')
for it,t in enumerate(triplets):
outfile.write(' {0:d}:{1:s}-{2:s}-{3:s},'.format(it+2,*t))
outfile.write('\n')
for i in range(na):
outfile.write(' {0:10.4f}'.format(angd[i]))
for it,t in enumerate(triplets):
outfile.write(' {0:11.3e}'.format(agr[it,i]))
outfile.write('\n')
outfile.close()
return None | d12e858060dad0f398beb139aaf9a4edda255807 | 35,741 |
import six
def restruct_for_pack(obj):
"""Recursively walk object's hierarchy."""
if isinstance(obj, six.text_type):
return obj
if isinstance(obj, (bool, six.integer_types, float, six.binary_type)):
return obj
elif isinstance(obj, dict):
obj = obj.copy()
for key in obj:
obj[key] = restruct_for_pack(obj[key])
return obj
elif isinstance(obj, list) or isinstance(obj, set):
return [restruct_for_pack(item) for item in obj]
elif isinstance(obj, tuple):
return tuple(restruct_for_pack([item for item in obj]))
elif hasattr(obj, '__dict__'):
return restruct_for_pack(obj.__dict__)
else:
return None | 500fe6c3127fa7daa6b4fd99d2ba41f6ecbf6b74 | 35,742 |
import random
def bomber(length, width, bomb, m, n):
"""
Place bombs randomly (position of first click and its surroundings cannot be bombs)
:param length: length of the board
:param width: width of the board
:param bomb: number of bombs
:param m: horizontal position of first click
:param n: vertical position of first click
:return: list of bomb positions
"""
forbidden = ((m - 1, n - 1), (m - 1, n), (m - 1, n + 1), (m, n - 1), (m, n), (m, n + 1), (m + 1, n - 1), (m + 1, n),
(m + 1, n + 1))
candidates = [(x, y) for x in range(length) for y in range(width) if (x, y) not in forbidden]
return random.sample(candidates, bomb) | 52591fe68d7d03e29559072148a945700b6aad06 | 35,743 |
import os
def get_violations_reports(report_path, violations_type):
"""
Finds violations reports files by naming convention (e.g., all "pep8.report" files)
"""
violations_files = []
for subdir, _dirs, files in os.walk(os.path.join(report_path)):
for f in files:
if f == "{violations_type}.report".format(violations_type=violations_type):
violations_files.append(os.path.join(subdir, f))
return violations_files | e1209300c71a32bb4f7ad6d86217aae90f075204 | 35,744 |
import os
def game_id():
""" Caches and returns the SteamAppId as """
return os.environ.get('SteamAppId') | 573b7fb5da7abd1c551b9f2fa764799e83329206 | 35,745 |
def format_example_name(example):
"""Formats an example command to a function or file name
"""
return '_'.join(example).replace('-', '_').replace(
'.', '_').replace('/examples/', '')[1:] | 223301709fb52aac98e2622c7df760fd3d7056e1 | 35,747 |
def value(card):
"""Returns the numeric value of a card or card value as an integer 1..13"""
prefix = card[:len(card) - 1]
names = {'A': 1, 'J': 11, 'Q': 12, 'K': 13}
if prefix in names:
return names.get(prefix)
else:
return int(prefix) | 534681894f67dc626173d952e7eddb0ecd8da20d | 35,748 |
def dirInfoGet(in_dir, mid_dir):
"""
将输入目录、输出目录改为固定的目录
:param in_dir: 输入目录
:param mid_dir: 输入目录
:return:
"""
dirInfo = [in_dir + r'/ObsData', in_dir + r'/SatNcData', mid_dir + r'/RetData']
return dirInfo | b9a1e20ee7f78ff294b08af6a2d8535dd932866a | 35,750 |
from textwrap import dedent
def rs256_private_key():
"""
This fixture provides a pre-generated private key for RS256 hashing.
"""
return dedent(
"""
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAw408+QDZ10idz4ytJtwFQE4YgmrjvCoEXjtTUWQ3H4nWAAYQ
+oE9xpr/gosNiFMuyRburvXT+Rkq8ry8tWoUzN2zViaarot+Tt9I71sVlnIsbtDZ
+XrteMvBwjARn/MEAQEwDLvVzrBnAZrOTwrIkznyJttZh7STrt6y5X91i2MMm3xu
9QK90kpu3rymAyT5V+AEIRzZai/ZT4YfLDutXulOVlWPQ55Xww1mbheGQ99fUMo5
LmkxM5Jsz8ulIVvq/G/8guiKwAPJN/8S34NbkgL5GoeXT8uNDkbhtkLh5+o2T4EL
9/ODKHqx46pHgUmBiC6wNv6uJXdH7qpaqhPR3QIDAQABAoIBABeyl/788Wk7bZRn
UdxxsVk3nZTAa1S0Ks9YlSI56MwzofFiys/wtZHJ2sjxHPS2T+cilk4xkDyRpjjA
UoYRku+4tjDsgLZCRU49lNMc0KLotyW+vYuUMA8BcjucI6akhomwoSgJ40Em83So
U/QUNHZTAVtgHZtqcLMyXa+eIJqBcfsMHFkCgSF8LSD/XkRBMm1SREswDw6KqQQ0
sZ/8TVF9sJTi3/OG8m5OfI+44AYDaMH5wKoOBcR3FBln+dEutB6JuRjmpnEjQpIT
DggULc+Dzb/c75yhT1qZSEL3Z99JQTbytPm6boNGKmzUE9HCoY84wKfnhUDocFKW
jnHMmKkCgYEA7/gRAtLjbJW1rbxw8xN3cyOZEJsMFt4mXMmne6nDTttVKb0wUuXJ
H8prKAXDOzadAZgPeGJXVSNgGoNeNtkmEKDtysrRiZbWiTRxYPE36MrHFGOywjTn
tP8qMJHmHYkxS16nqrOl0znUWv6Q6/qwd59Utuu4IJF/CxqP3Z6HPssCgYEA0J2M
1gRgGj8NnGoIKS58gc3Aa5RdqWKoeiyXeN/zRDfMCKpPsVykvZJb4cLcEdcwe9kC
3xpgIPaTZCPwhJ1rYiZ0/Xr7oIf0E66IeEKs/bchKcT9+sSaWgc5/zQ7aQ/XpwzU
nKCTTeMGFUyulCIkoe2tLEQ+Mw1OphIXv17fNPcCgYEAjWgVxh81ivgRjiZ8PJEd
E4lHmmRzVEpmOslN225nO+G9ppHolwD3ardiO7xhllQRYy4S97KjmfT1ncoJy7Jc
XvImDhlELprnIwT3RtP+STys4ZP6c7yvSZYPa32eJ4t/s9U8YjfooLb0LwbRqW0Z
bfRC/GOdJfv27Dkjy8muEs8CgYEAo+oHDOonMLg2U54kh2cVQVCPTngnF76DLmv3
IGym0gUddfmL4Iowjxt+wma/T+1LFSSwUuiAe6YCrX5nr2uZQmeBKOIG8F2idAyB
Ai0xi7Dmh9FW1kDAHtjqwxEhVS2zfnhgXij1VQ96aiX0TkR9kBYWKV/9l1NvZqF0
s1MyAoUCgYA0wfJrCTXdWitkyfxApcmoTxt0ljqUwO6F5fhojf8PU1ouglgkRXtm
1rSDGp7YUfODhWNSsN2P/eaDybcZo+TGtLQJ5Bai3Qxqh8xPaKCsSZbcPRLRP0w5
CbTvFEyj6EBEH+TJL/Loa4hKFuAk7ErBAtzMCw6LchTjB/OF+dUusA==
-----END RSA PRIVATE KEY-----
"""
).encode("utf-8") | c730d6070e9872f3401fdd020429d3e5afb775aa | 35,751 |
import re
def make_command(name):
"""Convert a string into a space-less command."""
name = re.sub('[^\w]', ' ', name) # Replace special characters with spaces
name = name.strip().lower()
while " " in name: name = name.replace(' ', ' ') # Replace duplicate spaces
name = name.replace(' ', '-') # In the end, we want no whitespace
return name | e1f154f89e0e54ead76a4492325fb1c547043cea | 35,752 |
def convertMappingDict(mdict):
""" This method converts a mapping proxy object to
a dict object. mapping proxies create read-only dicts
but we don't have that concept in transcrypt yet.
"""
ret = {}
for k in mdict.keys():
ret[k] = mdict[k]
return(ret) | 8a1ea786dadceb7707b75a258372925447dbd1c2 | 35,753 |
import re
def _remap_by_name(stations):
"""Remaps channel numbers based on the callsign and uses subchannels to
for each additional. For example, if the first CBS channel got 5.1,
the next encountered one would get 5.2, and so on
"""
callsigns = []
callsignSubchannels = {}
for station in stations:
callsigns.append(re.sub(r'^[0-9. ]+', '', station['callSign']))
callsigns = list(set(callsigns))
callsigns.sort()
for item in callsigns:
callsignSubchannels[item] = 1
# remap each of the station's channel numbers and callsigns
for station in stations:
callsign = re.sub(r'^[0-9. ]+', '', station['callSign'])
channel = callsigns.index(callsign) + 1
subchannel = callsignSubchannels[callsign]
station['channel_remapped'] = "{}.{}".format(channel, subchannel)
station['callSign_remapped'] = "{}.{} {}".format(channel, subchannel, callsign)
callsignSubchannels[callsign] += 1
return stations | 94edc3483cfa4b4be7b369ef0e0a7c045bf192ef | 35,754 |
import subprocess
def upload_kaggle_titanic_submission_via_api(filename, message):
"""Upload CSV to Kaggle Titanic Competition via Kaggle API.
Arguments:
filename {str} -- CSV filename that you want to submit to Kaggle.
message {str} -- The message that you want to include in the
Kaggle submission.
Returns:
stdout {str} -- Standard Output during subprocess submission.
stderr {str} -- Standard Error during subprocess submission.
"""
process = subprocess.Popen(['kaggle', 'competitions', 'submit', 'titanic', '-f', filename, '-m', message],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(filename)
print(message)
print(stdout)
print(stderr)
return stdout, stderr | df371b157049721fb229fd39450bb15f5c739000 | 35,755 |
def _get_name(param) -> str:
"""Checks if signature.Parameter corresponds to *args or **kwargs type input."""
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD) and param.default is param.empty:
return str(param)
else:
return param.name | 1a8c054d42e081ff3abdb53dd56cdeafea05e781 | 35,756 |
import subprocess
def on_cluster(cmds=["sbatch"]):
"""Used to check if we are on a cluster
:return: True is a clsuter commands is found
Currently, checks for slurm commands only
"""
def cmd_exists(cmd):
result = subprocess.call(
"type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return result == 0
for cmd in cmds:
if cmd_exists("sbatch"):
return True
return False | 15b3a5ee9d30631b333c6429e8093ba5eb0423b8 | 35,757 |
def merge(left, right):
"""Merge function used for merge sort"""
lPoint = 0
rPoint = 0
result = []
# Use two pointer method to build sorted list
while lPoint < len(left) and rPoint < len(right):
if left[lPoint][0] > right[rPoint][0]: # Sort by min radius in descending order
result.append(left[lPoint])
lPoint += 1
else:
result.append(right[rPoint])
rPoint += 1
# Insert remaining terms from left or right
if lPoint < len(left):
for remaining in left[lPoint:]:
result.append(remaining)
elif rPoint < len(right):
for remaining in right[rPoint:]:
result.append(remaining)
return result | 9fda9f6ac322ccdf61508791a18a9d8440283c57 | 35,758 |
import time
def now_sec():
"""Timestamp in seconds since 1970"""
now = time.time()
return int(now) | f5dd857040f7bb40d6b0aa0dc5f10a415f4eb0da | 35,760 |
def add_prefix(name, prefix=None, split='.'):
"""Add prefix to name if given."""
if prefix is not None:
return '{}{}{}'.format(prefix, split, name)
else:
return name | b22ab3ce8a286716579892a4bf9627674aea1f62 | 35,761 |
import sys
def is_in_virtualenv():
"""
Returns ``True`` if we are in a virtualenv.
"""
return hasattr(sys, 'real_prefix') or hasattr(sys, 'base_prefix') | f213414831f80496d5c80536b605ec4f11c3a476 | 35,762 |
def lpmerge(L, R):
"""In place left precedent dictionary merge.
Keeps values from `L`, if the value in `R` is :const:`None`."""
set = L.__setitem__
[set(k, v) for k, v in R.iteritems() if v is not None]
return L | 739b999cc9fbbec6d26448131c74b27c961a460c | 35,763 |
import torch
def f_get_raster_image(cfg,
images,
history_weight=0.9):
"""
Creates single raster image from sequence of images from l5kit's AgentDataset
Args:
cfg {dict}: Dictionary config.
images: (batch_size, 2*(history_num_frames+1)+3, H, H) - sequences of images after applying l5kit's rasterizer:
There is (history_num_frames+1) ego-car images, (history_num_frames+1) agent-car's images + 3 scene RGB images
history_weight {float}: Amount of history fading (for rendering).
Returns:
RGB image of the scene and agents.
Red color stays for EGO car.
Yellow color stays for Agent's cars.
"""
batch_size = images.shape[0]
image_size = images.shape[-1]
# get number of history steps
hnf = cfg['model_params']['history_num_frames']
# define ego-car's indices range in channels (images):
# ind (0, hnf) correspond to all agents except ego car,
# from hnf+1 to 2*hnf+1 correspond to ego car,
# last 3 indices correspond to rgb scene
ego_index = range(hnf+1, 2*hnf+2)
# iterate through ego-car's frames and sum them according to history_weight (history fading) in single channel.
ego_path_image = torch.zeros(size=(batch_size, image_size, image_size), device=cfg['device'])
for im_id in reversed(ego_index):
ego_path_image = (images[:, im_id, :, :] + ego_path_image * history_weight).clamp(0, 1)
# define agent's range
agents_index = range(0, hnf+1)
# iterate through agent-car's frames and sum them according to history_weight in single channel
agents_path_image = torch.zeros(size=(batch_size, image_size, image_size), device=cfg['device'])
for im_id in reversed(agents_index):
agents_path_image = (images[:, im_id, :, :] + agents_path_image*history_weight).clamp(0, 1)
# RGB path for ego (red (255, 0, 0)); channels last
ego_path_image_rgb = torch.zeros((ego_path_image.shape[0],
ego_path_image.shape[1],
ego_path_image.shape[2],
3), device=cfg['device'])
ego_path_image_rgb[:, :, :, 0] = ego_path_image
# RGB paths for agents (yellow (255, 255, 0)); channels last
agents_path_image_rgb = torch.zeros((agents_path_image.shape[0],
agents_path_image.shape[1],
agents_path_image.shape[2],
3), device=cfg['device'])
# yellow
agents_path_image_rgb[:, :, :, 0] = agents_path_image
agents_path_image_rgb[:, :, :, 1] = agents_path_image
# generate full RGB image with all cars (ego + agents)
all_vehicles_image = ego_path_image_rgb + agents_path_image_rgb # (batch_size, 3, H, H)
# get RGB image for scene from rasterizer (3 last images); channels last
scene_image_rgb = images[:, 2*hnf+2:, :, :].permute(0, 2, 3, 1)
# Add mask to positions of cars (to merge all layers):
# We need to take into account that the off-road is white, i.e. 1 in all channels
# So, we have to prevent disappearing of off-road cars after clipping when we add images together.
# For ex. (1, 1, 1) + (1, 0, 0) = (2, 1, 1) --> (1, 1, 1) = off-road car disappears.
# In order to solve this, we cut the scene at the car's area.
scene_image_rgb[(all_vehicles_image > 0).any(dim=-1)] = 0.0
# generate final raster map
full_raster_image = (all_vehicles_image + scene_image_rgb).clamp(0, 1)
# channels as a second dimension
full_raster_image = full_raster_image.permute(0, 3, 1, 2)
return full_raster_image | aff05d51041c909a73a93c9dbef2970e302677b1 | 35,764 |
import json
def sub_string_to_json(pre_text):
"""
将字符串处理成json格式返回 , 这里返回的是原生类型dict
:param pre_text:
:return:
"""
pre_text = pre_text[pre_text.find('{'): len(pre_text) - 1]
if pre_text.endswith(")"):
pre_text = pre_text[:len(pre_text) - 1]
# 将str转成json, 这里会成为原生类型,即dict
json_content = json.loads(pre_text)
return json_content | 667960b24491ca3696f06b596aee2262a33ce208 | 35,765 |
def a_send(text, ctx):
"""Send text line to the controller."""
ctx.ctrl.send(text)
return True | 7a5b8412f5099138afedc892c893f413ab4eba21 | 35,766 |
def path_info_split(path_info):
"""
Splits off the first segment of the path. Returns (first_part,
rest_of_path). first_part can be None (if PATH_INFO is empty), ''
(if PATH_INFO is '/'), or a name without any /'s. rest_of_path
can be '' or a string starting with /.
"""
if not path_info:
return None, ''
assert path_info.startswith('/'), (
"PATH_INFO should start with /: %r" % path_info)
path_info = path_info.lstrip('/')
if '/' in path_info:
first, rest = path_info.split('/', 1)
return first, '/' + rest
else:
return path_info, '' | 2b8429767feee1d271f8370684dddd34362390af | 35,768 |
import os
def get_img_path(dir: str):
"""
get the list of image paths from a certain directory dir
"""
img_path_ls = []
for image in os.scandir(dir):
if image.path.endswith('.jpg') or image.path.endswith('.png') or image.path.endswith(
'.JPEG') and image.is_file():
img_path_ls.append(image.path)
img_path_ls.sort()
return img_path_ls | fc73af86395a79e06861fad4c41c9c11a06903a9 | 35,769 |
def update_package_db(module, xbps_path):
"""Returns True if update_package_db changed"""
cmd = "%s -S" % (xbps_path['install'])
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="Could not update package db")
if "avg rate" in stdout:
return True
else:
return False | 686a8c87fa8348c748b968813b0bb85320381bd0 | 35,770 |
def readline_mock(src):
"""Line reader for the given text.
This is meant to be used with Python's tokenizer.
"""
curr = -1
src = bytes(src, encoding="utf8")
lines = [line + b"\n" for line in src.split(b"\n")]
def readline():
nonlocal curr
curr = curr + 1
if curr >= len(lines):
raise StopIteration
return lines[curr]
return readline | af37541695d67931d99762ffc91d5447d5bb7bc9 | 35,772 |
def py_fn(f, args=None, kwargs=None):
"""
Rem codes: py_fn f args kwargs
python intepreted as: f(*args, **kwargs)
"""
if not args:
args = ()
if not kwargs:
kwargs = {}
return f(*args, **kwargs) | 6bc2906757bcf1d0d2f21ba024c7abab8f81cb7e | 35,773 |
import time
def unixtime(dt_obj):
"""Format datetime object as unix timestamp
:param dt_obj: datetime.datetime object
:returns: float
"""
return time.mktime(dt_obj.utctimetuple()) | 7b2e6a923be2c05abed0ad8d324e818646e8f0c1 | 35,774 |
def star_wars(x, y, elem, neighbours):
"""Star Wars preset of the Generations family. (345/2/4)"""
red_count = 0
for neighbour in neighbours:
if neighbour == 3:
red_count += 1
if elem == 3: # The cell is alive
if red_count in [3,4,5]:
return 3
else:
return 2
elif elem > 0: # The cell is decaying
return elem - 1
elif red_count == 2: # The cell is dead, but will be brought to life
return 3
else: # The cell is dead
return 0 | b9929d132b42744439052600019b852833e2c032 | 35,776 |
def pwd():
"""
后台密码修改
"""
return 'pwd' | 9c2dfa12ca49b20f5ac34904149f5705f81cae44 | 35,777 |
import json
def construct_event_json(event, data):
"""
Helper function to construct a payload for sending to clients
"""
return json.dumps([event, data], ensure_ascii=False).encode('utf8') | 0693dacbf3592a6965fda925d8b3eab9c11dddf0 | 35,778 |
def number_unit(disc):
"""
Return the number of units with the given discriminant.
"""
if disc < -4:
return 2
elif disc == -4:
return 4
elif disc == -3:
return 6
else:
raise ValueError | abc19d49a036f952e897f9cc1426d34ce5237371 | 35,779 |
def is_private(message) -> bool:
"""Whether message is private."""
# See "type" at https://core.telegram.org/bots/api#chat.
return message.chat.type == 'private' | d45dca82c3a46d25997b1dbde2803fb50d08c871 | 35,780 |
import calendar
def to_timestamp(date):
"""Convert a datetime object into a calendar timestamp object
Parameters
----------
date : datetime.datetime
Datetime object e.g. datetime.datetime(2020, 10, 31, 0, 0)
Returns
-------
cal : calendar.timegm
Calendar timestamp corresponding to the input datetime
"""
return calendar.timegm(date.timetuple()) | 514ea392268dc94aa0a14b1dd5ce04425d1ed3bb | 35,782 |
def title_getter(node):
"""Return the title of a node (or `None`)."""
return node.get('title','') | 0825f4a2d6360d4845b3379ba9ade7a8e5fa11dc | 35,783 |
import struct
import socket
def next_ip(ip):
"""Return the next IP address following the given IP address.
It needs to be converted to an integer, then add 1,
then converted back to an IP address
"""
ip2int = lambda ipstr: struct.unpack('!I', socket.inet_aton(ipstr))[0]
int2ip = lambda n: socket.inet_ntoa(struct.pack('!I', n))
return int2ip(ip2int(ip) + 1) | 883d88ea35aaed0d2a506f3ea4fc404f83ac4cea | 35,784 |
def get_palette(num_classes):
"""
功能:获得画板颜色
"""
n = num_classes
palette = [0]*(n*3)
for j in range(0, n):
lab = j
palette[j*3+0] = 0
palette[j*3+1] = 0
palette[j*3+2] = 0
i = 0
while (lab > 0):
palette[j*3+0] |= (((lab >> 0) & 1) << (7-i))
palette[j*3+1] |= (((lab >> 1) & 1) << (7-i))
palette[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return palette | b59614cfc014a3ad72003a68b969270ca90a8ea1 | 35,786 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.