content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def other_ops(request):
"""
Other Operations View
"""
args = {
'pending': OtherOperation.objects.filter(status=0).count(),
'active': OtherOperation.objects.filter(status=1).count(),
'done': OtherOperation.objects.filter(status=2).count(),
'cancelled': OtherOperation.objects.filter(status=3).count(),
'passed': OtherOperation.objects.filter(status=4).count(),
'failed': OtherOperation.objects.filter(status=5).count(),
}
args['other_ops'] = OtherOperation.objects.all()
args['a'] = 'other-ops'
return render(request, 'operations/other-ops.html', args) | 35,900 |
def verify_input_values(input_values, timeout='0'):
"""Verify input fields have given values.
Accepts a .txt file or Robot FW dictionary as a parameter. If using a text file, the locator
and the expected value should be separated with a comma on each row.
Examples
--------
.. code-block:: robotframework
VerifyInputValues list_of_locators_and_values.txt
VerifyInputValues C:/Users/pace/Desktop/textfile.txt
${cool_dict}= Create Dictionary Name=Jane Email=janedoe@iddqd.com
... Phone=04049292243923 Address=Yellow street 33 C 44
VerifyInputValues ${cool_dict}
"""
if isinstance(input_values, dict):
for locator in input_values:
logger.info('Locator: {}, Expected value: {}'.format(locator, input_values[locator]),
also_console=True)
verify_input_value(locator, input_values[locator])
elif input_values.endswith('.txt') or input_values.endswith('.csv'):
file = download.get_path(input_values)
with open(file, 'rb') as txt_file:
params = [line.rstrip() for line in txt_file]
for x in params:
x = x.decode('utf-8').split(',')
locator, value = x[0].strip(), x[1].strip()
logger.info('Locator: {}, Expected value: {}'.format(locator, value),
also_console=True)
verify_input_value(locator, value, timeout=timeout)
else:
raise QWebValueError('Unknown input value. Text file or dictionary required.') | 35,901 |
def distance_point_2_line(point, seg):
"""Finds the minimum distance and closest point between a point and a line
Args:
point ([float, float]): (x,y) point to test
seg ([[float, float], [float, float]]): two points defining the line
Returns:
A list of two items:
* Distance between the point and line
* The (x,y) value on the line that is the closest point
"""
dseg = seg[1] - seg[0]
dpt = point - seg[0]
proj = (np.dot(dpt, dseg) / np.dot(dseg, dseg)) * dseg
dist = np.linalg.norm(dpt, proj)
return dist, seg[0] + proj | 35,902 |
def load_glove_from_file(glove_filepath):
"""
Load the GloVe embeddings
Args:
glove_filepath (str): path to the glove embeddings file
Returns:
word_to_index (dict), embeddings (numpy.ndarary)
"""
word_to_index = {}
embeddings = []
with open(glove_filepath, "r") as fp:
for index, line in enumerate(fp):
line = line.split(" ") # each line: word num1 num2 ...
word_to_index[line[0]] = index # word = line[0]
embedding_i = np.array([float(val) for val in line[1:]])
embeddings.append(embedding_i)
return word_to_index, np.stack(embeddings) | 35,903 |
def save_tabbed_lines(array, path, encode=True):
""" Writes lines from array to path
array: assumes to be an array of arrays
path: path to write to
"""
check_dir(os.path.dirname(path))
concatenated_array= list(map(lambda s: "\t".join(s), array))
# Open a file in write mode
with open(path, "w") as fo:
success = save_lines(concatenated_array, path, encode=encode) | 35,904 |
def make_pd(space: gym.Space):
"""Create `ProbabilityDistribution` from gym.Space"""
if isinstance(space, gym.spaces.Discrete):
return CategoricalPd(space.n)
elif isinstance(space, gym.spaces.Box):
assert len(space.shape) == 1
return DiagGaussianPd(space.shape[0])
elif isinstance(space, gym.spaces.MultiBinary):
return BernoulliPd(space.n)
else:
raise TypeError(space) | 35,905 |
def download_file_from_google_drive(
gdrive_file_id: typing.AnyStr,
destination: typing.AnyStr,
chunk_size: int = 32768
) -> typing.AnyStr:
"""
Downloads a file from google drive, bypassing the confirmation prompt.
Args:
gdrive_file_id: ID string of the file to download from google drive.
destination: where to save the file.
chunk_size: chunk size for gradual downloads.
Returns:
The path to the downloaded file.
"""
# taken from this StackOverflow answer: https://stackoverflow.com/a/39225039
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': gdrive_file_id}, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
if token:
params = {'id': gdrive_file_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return destination | 35,906 |
def attach(func, params):
"""
Given a function and a namespace of possible parameters,
bind any params matching the signature of the function
to that function.
"""
sig = inspect.signature(func)
params = Projection(sig.parameters.keys(), params)
return functools.partial(func, **params) | 35,907 |
def question_route():
"""
題庫畫面
"""
# 取得使用者物件
useruid = current_user.get_id()
# 嘗試保持登入狀態
if not keep_active(useruid):
logout_user()
return question_page(useruid) | 35,908 |
async def test_config_file_passed_to_config_entry(hass):
"""Test that configuration file for a host are loaded via config entry."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(deconz, 'configured_hosts', return_value=[]), \
patch.object(deconz, 'load_json',
return_value={'host': '1.2.3.4'}):
assert await async_setup_component(hass, deconz.DOMAIN, {
deconz.DOMAIN: {}
}) is True
# Import flow started
assert len(mock_config_entries.flow.mock_calls) == 2 | 35,909 |
def generate_qrcode(url: str, should_cache: bool = True) -> str:
"""
Generate a QR code (as data URI) to a given URL.
:param url: the url the QR code should reference
:param should_cache: whether or not the QR code should be cached
:return: a data URI to a base64 encoded SVG image
"""
if should_cache and url in qrcode_cache:
return qrcode_cache[url]
image = qrcode.make(url, image_factory=qrcode.image.svg.SvgPathFillImage)
image_stream = BytesIO()
image.save(image_stream)
image_stream.seek(0)
qrcode_url = 'data:image/svg+xml;base64,' + base64.b64encode(image_stream.read()).decode('utf-8')
if should_cache:
qrcode_cache[url] = qrcode_url
return qrcode_url | 35,910 |
def print_green(msg: str = None) -> None:
"""Print message to STDOUT in yellow text.
:param msg: {str} - the message to be printed
"""
if msg is None:
raise Exception("msg was not defined")
print(Fore.GREEN + msg)
print(Style.RESET_ALL + "", end="") | 35,911 |
def _process_environment_variables():
"""Process environment variables"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
if "STRINGS_DEBUG" in os.environ:
logging.disable(logging.NOTSET)
if "FLAVOUR" in os.environ:
parameters["Command flavour"] = os.environ["FLAVOUR"].lower()
if "STRINGS_FLAVOUR" in os.environ:
parameters["Command flavour"] = os.environ["STRINGS_FLAVOUR"].lower()
# From "man environ":
# POSIXLY_CORRECT
# When set to any value, this environment variable
# modifies the behaviour of certain commands to (mostly)
# execute in a strictly POSIX-compliant manner.
if "POSIXLY_CORRECT" in os.environ:
parameters["Command flavour"] = "posix"
# Command variants supported:
if parameters["Command flavour"] == "posix":
parameters["String termination"] = [0, ord('\n')]
elif parameters["Command flavour"] in ("unix", "unix:v10"):
parameters["Include backspaces"] = True
parameters["String termination"] = [0, ord('\n')]
elif parameters["Command flavour"] in ("plan9", "inferno"):
parameters["Minimum length"] = 6
parameters["Print offset"] = "decimal"
parameters["Encoding"] = "u"
parameters["Split long lines"] = 70
elif parameters["Command flavour"] in ("PNU", "bsd", "bsd:freebsd", "gnu", "gnu:linux", "linux"):
pass
else:
logging.critical("Unimplemented command FLAVOUR: %s", parameters["Command flavour"])
sys.exit(1)
logging.debug("_process_environment_variables(): parameters:")
logging.debug(parameters) | 35,912 |
def get_conflict_fks_versions(obj, version, revision, exclude=None):
"""
Lookup for deleted FKs for obj, expects version to be obj
version from the same revision.
If exclude provided - excludes based on that from versions to check.
Expects exclude to be a dict of filter string, value i.e {'pk': 1}.
Returns versions for deleted fks.
"""
# TODO: get all conflicts, return a tuple/dict with required and not.
fk_relations = get_fk_models(obj)
versions_to_check = []
for relation in fk_relations:
found_versions = revision.version_set.exclude(
pk=version.pk).filter(content_type=relation['content_type'])
versions_to_check += list(found_versions.values_list('pk', flat=True))
# convert to versions queryset instead of a list
versions_to_check_qs = revision.version_set.filter(pk__in=versions_to_check)
if exclude is not None:
versions_to_check_qs = versions_to_check_qs.exclude(**exclude)
conflict_fks_versions = get_deleted_objects_versions(
versions_to_check_qs)
return conflict_fks_versions | 35,913 |
def create_tbls(conn, c):
""" Create tables """
# dictionary table
sql = """ CREATE TABLE IF NOT EXISTS glossary (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
verb TEXT UNIQUE,
explanation TEXT,
date_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
);"""
c.execute(sql)
# forward practice table
sql = """ CREATE TABLE IF NOT EXISTS practice_forward (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
verb TEXT,
verb_id INTEGER,
tense_mood_idx INTEGER,
person_idx INTEGER,
correct_num INTEGER DEFAULT 0,
expiration_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
has_conjug INTEGER DEFAULT 0
);"""
c.execute(sql)
# backward practice table
sql = """ CREATE TABLE IF NOT EXISTS practice_backward (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
verb TEXT,
verb_id INTEGER,
tense_mood_idx INTEGER,
person_idx INTEGER,
correct_num INTEGER DEFAULT 0,
expiration_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
has_conjug INTEGER DEFAULT 0
);"""
c.execute(sql)
# statistics table
sql = """ CREATE TABLE IF NOT EXISTS statistics (
log_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
n_total_forward INTEGER,
n_correct_forward INTEGER,
n_total_backward INTEGER,
n_correct_backward INTEGER
);"""
c.execute(sql)
conn.commit() | 35,914 |
def hz2mel(f):
"""Convert an array of frequency in Hz into mel."""
return 1127.01048 * np.log(f/700 +1) | 35,915 |
def fetchallpages():
"""Fetches all wikipedia date pages one by one saves them on the disk"""
months = range(1,13)
print("Fetching Wiki Pages")
for month in months:
fetchmonth(month)
# map(fetchmonth, months) | 35,916 |
def Logica(line, cell, run_query):
"""Running Logica predicates and storing results."""
predicates = ParseList(line)
if not predicates:
ShowError('No predicates to run.')
return
try:
program = ';\n'.join(s for s in [PREAMBLE, cell] if s)
parsed_rules = parse.ParseFile(program)['rule']
except parse.ParsingException as e:
e.ShowMessage()
return
try:
program = universe.LogicaProgram(parsed_rules)
except functors.FunctorError as e:
e.ShowMessage()
return
engine = program.annotations.Engine()
if engine == 'bigquery' and not BQ_READY:
ShowError(
'BigQuery client and/or authentification is not installed. \n'
'It is the easiest to run BigQuery requests from Google CoLab:\n'
' https://colab.research.google.com/.\n'
'Note that running Logica on SQLite requires no installation.\n'
'This could be a good fit for working with small data or learning Logica.\n'
'Use {warning}@Engine("sqlite");{end} annotation in your program to use SQLite.')
return
bar = TabBar(predicates + ['(Log)'])
logs_idx = len(predicates)
executions = []
sub_bars = []
ip = IPython.get_ipython()
for idx, predicate in enumerate(predicates):
with bar.output_to(logs_idx):
try:
sql = program.FormattedPredicateSql(predicate)
executions.append(program.execution)
ip.push({predicate + '_sql': sql})
except rule_translate.RuleCompileException as e:
print('Encountered error when compiling %s.' % predicate)
e.ShowMessage()
return
# Publish output to Colab cell.
with bar.output_to(idx):
sub_bar = TabBar(['SQL', 'Result'])
sub_bars.append(sub_bar)
with sub_bar.output_to(0):
if SHOW_FULL_QUERY:
print(
color.Format(
'The following query is stored at {warning}%s{end} '
'variable.' % (
predicate + '_sql')))
print(sql)
else:
print('Query is stored at %s variable.' %
color.Warn(predicate + '_sql'))
with bar.output_to(logs_idx):
if engine == 'sqlite':
sql_runner = SqliteRunner()
elif engine == 'psql':
sql_runner = PostgresRunner()
elif engine == 'bigquery':
EnsureAuthenticatedUser()
sql_runner = RunSQL
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
result_map = concertina_lib.ExecuteLogicaProgram(
executions, sql_runner=sql_runner, sql_engine=engine)
for idx, predicate in enumerate(predicates):
t = result_map[predicate]
ip.push({predicate: t})
with bar.output_to(idx):
with sub_bars[idx].output_to(1):
if run_query:
print(
color.Format(
'The following table is stored at {warning}%s{end} '
'variable.' %
predicate))
display(t)
else:
print('The query was not run.')
print(' ') | 35,917 |
def set_address_bitmap(manager_bitmap, address):
"""
Sets the pvscan0 of the worker to the address we want to read/write later through the manager_bitmap
@param manager_bitmap: handle to the manager bitmap
@param address: the address to be set in worker bitmap's pvscan0 pointer
"""
address = c_ulonglong(address)
gdi32.SetBitmapBits(manager_bitmap, sizeof(address), addressof(address)); | 35,918 |
def figure(figsize=None, logo="iem", title=None, subtitle=None, **kwargs):
"""Return an opinionated matplotlib figure.
Parameters:
figsize (width, height): in inches for the figure, defaults to something
good for twitter.
dpi (int): dots per inch
logo (str): Currently, 'iem', 'dep' is supported. `None` disables.
title (str): Title to place on the figure.
subtitle (str): SubTitle to place on the figure.
"""
if figsize is None:
figsize = TWITTER_RESOLUTION_INCH
fig = plt.figure(figsize=figsize, **kwargs)
draw_logo(fig, logo)
titlebounds = [0.1, 0.9, 0.91, 0.98]
if subtitle is not None:
titlebounds[2] = 0.94
fitbox(fig, title, *titlebounds)
fitbox(fig, subtitle, 0.1, 0.9, 0.91, 0.935)
return fig | 35,919 |
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
"accuracy":model.accuracy,
"y_new":model.y_new,
"y_target":model.y_target
}
accuracys = 0.0
if eval_op is not None:
fetches["eval_op"] = eval_op
output_y = []
for step in range(model.input.epoch_size):
feed_dict = {}
feed_dict[model.initial_state] = state
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
accuracy = vals["accuracy"]
y_new = vals["y_new"]
y_target = vals["y_target"]
costs += cost
accuracys += accuracy
#iters += model.input.num_steps
iters = iters + 1
for i in range(model.input.batch_size):
if y_new[i,0] == 0:
output_y.append(1)
else:
output_y.append(0)
return costs, accuracys / iters, output_y | 35,920 |
def is_scoo(x: Any) -> bool:
"""check if an object is an `SCoo` (a SAX sparse S-matrix representation in COO-format)"""
return isinstance(x, (tuple, list)) and len(x) == 4 | 35,921 |
def createNonce():
"""Creates a new nonce and stores it in the session."""
nonce = base64.b64encode(os.urandom(32))
flask_session['nonce'] = nonce
return nonce | 35,922 |
def project_rename_folder(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /project-xxxx/renameFolder API method.
For more info, see: https://documentation.dnanexus.com/developer/api/data-containers/folders-and-deletion#api-method-class-xxxx-renamefolder
"""
return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs) | 35,923 |
def full(shape, fill_value, dtype=None):
"""Returns a new array of given shape and dtype, filled with a given value.
This function currently does not support ``order`` option.
Args:
shape (tuple of ints): Dimensionalities of the array.
fill_value: A scalar value to fill a new array.
dtype: Data type specifier.
Returns:
cupy.ndarray: An array filled with ``fill_value``.
.. seealso:: :func:`numpy.full`
"""
# TODO(beam2d): Support ordering option
a = empty(shape, dtype)
a.fill(fill_value)
return a | 35,924 |
def is_seq(a):
"""Return `True` if `a` is a Z3 sequence expression.
>>> print (is_seq(Unit(IntVal(0))))
True
>>> print (is_seq(StringVal("abc")))
True
"""
return isinstance(a, SeqRef) | 35,925 |
def identity(__obj: T, /) -> T:
"""Identity function"""
return __obj | 35,926 |
def lines_in_file(filename: str) -> int:
"""
Count the number of lines in a file
:param filename: A string containing the relative or absolute path to a file
:returns: The number of lines in the file
"""
with open(filename, "r") as f:
return len(f.readlines()) | 35,927 |
def readargs():
"""
Read input arguments if run as separate program
Returns
-------
None.
"""
parser = argparse.ArgumentParser(description=(
'Convert data from WiPL format to binary SimRadar-compatible format.'
))
parser.add_argument('input',
type=str,
help='[REQUIRED] File to be converted')
parser.add_argument('--outdir', '-o',
type=str,
help='Output directory for rcs files, default as location of input file',
default='.')
parser.add_argument('--debug', '-d',
type=str,
help=('[True/False] Flag for debug mode - writes file back out again in human readable'
+' format after conversion to allow validation of converted data'),
default='False')
args = parser.parse_args()
infile = args.input
if not path.isfile(infile):
raise ArgumentsError("Could not find file {}\n If file exists try again with absolute path rather than relative path")
path_out = args.outdir
if path_out == '.':
inpath = path.split(infile)[0]
if inpath != '':
path_out = inpath
if not path.exists(path_out):
print('Directory to write rcs files to'
+ ' does not exist\nAttempting to create:')
try:
makedirs(path_out)
except:
raise FatalError('Could not create directory '+ path_out +'\n')
else:
print ("Success!\n")
if path_out and not path.isdir(path_out):
raise ArgumentsError(path_out + ' exists but is not a directory\n')
debug_str = args.debug
if debug_str.lower() == "true" or debug_str.lower() == "t":
debug = True
elif debug_str.lower() == "false" or debug_str.lower() == "f":
debug = False
else:
raise ArgumentsError("Value for debug should be True/False. Value read was {}".format(debug_str))
return (infile, path_out, debug) | 35,928 |
def make(tag):
"""Create a tag window, representing the given pre-existing tag."""
construct()
gui.cue_top()
gui.title("Panthera: Tag: "+tag)
rec_to_window(tagrecords.find(tag))
gui.cue("$top.tag_frame.widget")
gui.text_ro() | 35,929 |
async def test_gw_query_inst_cc(query_cc, gateway, org1_user, peer):
""" Tests Gateway().query_instantiated_chaincodes """
await _assert_gw_required(
gateway, lambda gw: gw.query_instantiated_chaincodes(),
['channel', 'requestor', 'endorsing_peers']
)
res = await gateway.query_instantiated_chaincodes()
query_cc.assert_called_with(
requestor=org1_user,
channel=CHANNEL,
peers=[peer]
)
assert res == query_cc.return_value | 35,930 |
def validate_input_parameters(live_parameters, original_parameters):
"""Return validated input parameters."""
parsed_input_parameters = dict(live_parameters)
for parameter in parsed_input_parameters.keys():
if parameter not in original_parameters:
click.echo(
click.style('Given parameter - {0}, is not in '
'reana.yaml'.format(parameter),
fg='red'),
err=True)
del live_parameters[parameter]
return live_parameters | 35,931 |
def _upper_zero_group(match: ty.Match, /) -> str:
"""
Поднимает все символы в верхний
регистр у captured-группы `let`. Используется
для конвертации snake_case в camelCase.
Arguments:
match: Регекс-группа, полученная в результате `re.sub`
Returns:
Ту же букву из группы, но в верхнем регистре
"""
return match.group("let").upper() | 35,932 |
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatenation.
Returns
-------
shape: tuple of int
This tuple satisfies:
```
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
```
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds:
```
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
```
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis + 1 :]
if any(
shape[:axis] != first_shape_pre or shape[axis + 1 :] != first_shape_post
for shape in shapes
):
raise ValueError("Mismatched array shapes in block along axis {}.".format(axis))
shape = first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1 :]
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [
(slice(start, end),)
for start, end in zip([0] + offsets_at_axis, offsets_at_axis)
]
return shape, slice_prefixes | 35,933 |
def md_changes(seq, md_tag):
"""Recreates the reference sequence of a given alignment to the extent that the
MD tag can represent.
Note:
Used in conjunction with `cigar_changes` to recreate the
complete reference sequence
Args:
seq (str): aligned segment sequence
md_tag (str): MD tag for associated sequence
Returns:
ref_seq (str): a version of the aligned segment's reference sequence given \
the changes reflected in the MD tag
Raises:
ValueError: if MD tag is None
Example:
>>> md_changes('CTTATATTGGCCTT', '3C4AT4')
'CTTCTATTATCCTT'
"""
if md_tag is None:
raise ValueError('No MD tag found or given for sequence')
ref_seq = ''
last_md_pos = 0
for mo in re.finditer(r'(?P<matches>\d+)|(?P<del>\^\w+?(?=\d))|(?P<sub>\w)', md_tag):
mo_group_dict = mo.groupdict()
if mo_group_dict['matches'] is not None:
matches = int(mo_group_dict['matches'])
ref_seq += seq[last_md_pos:last_md_pos + matches]
last_md_pos += matches
elif mo_group_dict['del'] is not None:
deletion = mo_group_dict['del']
ref_seq += deletion[1:]
elif mo_group_dict['sub'] is not None:
substitution = mo_group_dict['sub']
ref_seq += substitution
last_md_pos += 1
else:
pass
return ref_seq | 35,934 |
def start_board(stop_area_id):
"""Update a departure board with departures at station STOP_AREA_ID.
Use search to find the STOP_AREA_ID for a station."""
try:
sncf.check_params(stop_area_id)
except commons.SncfException as e:
logger.error("error: %s", str(e))
# start board
board_client_instance = board_client.BoardClient()
threading.Thread(
target=board_client_instance.run,
args=[
view_model.ViewModelSncf_192_32_3_Rows_To_ProtocolBuffers,
data_updater.DataUpdaterSncf,
{
"stop_area_id": stop_area_id,
},
],
).start()
# run until interrupt
try:
while True:
time.sleep(ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.info("received interrupt")
board_client_instance.running = False | 35,935 |
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug("Partitions:")
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
num = int(num)
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(" %(num)s: %(fstype)s %(size)d sectors",
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
return partitions | 35,936 |
def ping(target, mode="4"):
"""PING a.fi (193.166.4.1) 56(84) bytes of data.
--- a.fi ping statistics ---
10 packets transmitted, 10 received, 0% packet loss, time 8996ms
rtt min/avg/max/mdev = 17.395/23.978/34.866/5.381 ms"""
p = subprocess.Popen(["ping", "-q", "-c", "10", "-%s" % mode, target], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
for row in stdout.decode("utf-8").splitlines():
if "packet loss" in row:
loss = int(row.split()[5][:-1].strip())
ping_loss.labels(target=target, mode=mode).set(loss)
elif 'min/avg/max/mdev' in row:
min_rtt,avg_rtt,max_rtt,mdev_rtt = [float(x) for x in row.split()[3].split("/")]
ping_min.labels(target=target, mode=mode).set(min_rtt)
ping_avg.labels(target=target, mode=mode).set(avg_rtt)
ping_max.labels(target=target, mode=mode).set(max_rtt)
ping_mdev.labels(target=target, mode=mode).set(mdev_rtt) | 35,937 |
def sort_singlepulse(basename, directory=os.getcwd(), verbose=False):
"""
Accepts the base name (usually Observation ID) and the directory where the relevant files are located.
If no directory argument is given, assumes all files are in current working directory (via os.getcwd())
Creates a total singlepulse file from all singlpulse files with the given base name.
Ensures unique entries only, and the file output is sorted in time (and therefore sample).
"""
# grab all files with relevant basename in current directory
base_files = sorted([f for f in os.listdir(directory) if f.startswith(basename)])
sp_files = [s for s in base_files if s.endswith('.singlepulse') and '_DM' in s]
# create a list of single pulse events from the .singlepulse file
sp_events = []
for sp in sp_files:
empty = False
if verbose: print "loading data from: {0}".format(sp)
# load data from files, unles file is empty in which case do nothing and move on
try:
data = np.genfromtxt(sp, comments='#', skip_header=1)
except:
if verbose: print "empty file. not appending to events list."
empty = True
if empty is False:
if any(isinstance(d, np.ndarray) for d in data):
# contains 2 or more lines
for d in data:
sp_events.append(np.append(d, sp.replace('.singlepulse', '.inf')).tolist())
else:
# is only a single line file
sp_events.append(np.append(d, sp.replace('.singlepulse', '.inf')).tolist())
# annoying but seemingly necessary type conversions for sorting
for s in sp_events:
s[0]=float(s[0])
s[1]=float(s[1])
s[2]=float(s[2])
s[3]=int(float(s[3]))
s[4]=int(float(s[4]))
# create a list of tuples, keeping only unique pulse events. Output is a list of tuples
ordered = sorted(set(map(tuple, sp_events)), key=itemgetter(2))
if verbose: print "writing {0}.singlepulse".format(basename)
with open(basename+'.singlepulse','wb') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format('DM','Sigma','Time(s)','Sample',\
'Downfact','inf_file'))
for event in ordered:
f.write(''.join('{0},{1},{2},{3},{4},{5}\n'.format(*event))) | 35,938 |
def forwards_func(apps, schema_editor):
"""
move relation shit from sponsorship->org to org->sponsorship
to allow a many orgs to many sponsorship relation
"""
Sponsorship = apps.get_model("peeringdb_server", "Sponsorship")
SponsorshipOrganization = apps.get_model(
"peeringdb_server", "SponsorshipOrganization"
)
for sponsorship in Sponsorship.objects.all():
SponsorshipOrganization.objects.create(
org=sponsorship.org,
sponsorship=sponsorship,
url=sponsorship.url,
logo=sponsorship.logo,
) | 35,939 |
def test_get_all_subscriptions(controller: Controller, owner: str):
"""Check that Controller returned correct list of Subscription objects:
- No _id specified in each subscription
- type is not dict, but Subscription
- start_date field type is not datetime, but date
"""
result = controller.get_subscriptions_list(owner)
assert result == subscriptions_obj_list | 35,940 |
def interest_coverage_ratio():
"""Interest Coverage Ratio"""
x = float(input("Please Enter Net Income Value: "))
y = float(input("Please Enter Income Tax Expense Value: "))
z = float(input("Please Enter Interest Expense Value: "))
eb = float(x)+float(y)+float(z)
s = (float(x)+float(y)+float(z))/float(z)
print ">> Your Earning Before Interest And Tax (EBIT) is",eb
print ">> Your Interest-Coverage Ratio is",round(s,2) | 35,941 |
def get_cs_token(accesskey="",secretkey="",identity_url="",tenant_id=""):
"""
Pass our accesskey and secretkey to keystone for tokenization.
"""
identity_request_json = json.dumps({
'auth' : {
'apiAccessKeyCredentials' : {
'accessKey' : accesskey,
'secretKey' : secretkey
},
"tenantId": tenant_id
}
})
identity_req = urllib2.Request(identity_url+"/tokens",
identity_request_json, {'Content-type':'application/json'})
try:
response = urllib2.urlopen(identity_req).read()
except urllib2.HTTPError, e:
log("HTTP Error: "+str(e))
return False
response_json = json.loads(response)
if response_json['access']['token']['tenant']['id'] == tenant_id:
return response_json['access']['token']['id']
return False | 35,942 |
def get_auto_scaling_group(asg, asg_name: str):
"""Get boto3 Auto Scaling Group by name or raise exception"""
result = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
groups = result["AutoScalingGroups"]
if not groups:
raise Exception("Auto Scaling Group {} not found".format(asg_name))
return groups[0] | 35,943 |
def guiraud_r(txt_len: int, vocab_size: int) -> np.float64:
"""
The TTR formula underwent simple corrections: RTTR (root type-token ratio), Guiraud, 1960.
"""
return vocab_size / np.sqrt(txt_len) | 35,944 |
def serialize_dagster_namedtuple(nt: tuple, **json_kwargs) -> str:
"""Serialize a whitelisted named tuple to a json encoded string"""
check.tuple_param(nt, "nt")
return _serialize_dagster_namedtuple(nt, whitelist_map=_WHITELIST_MAP, **json_kwargs) | 35,945 |
def test_footprint_antimeridian(benchmark):
"""
When a polygon crosses the antimeridian, check that it's translated correctly.
"""
overview = _create_overview()
footprint_latlon = benchmark(lambda: overview.footprint_wgs84)
assert_shapes_mostly_equal(footprint_latlon, EXPECTED_CLEAN_POLY, 0.1) | 35,946 |
def join_epiweek(year, week):
""" return an epiweek from the (year, week) pair """
return year * 100 + week | 35,947 |
def add_data_associations(db, data_id, tree_identifier, folder_path, profile_id, user_group_id):
"""Creates associations to the active user profile
and personal user group for the given data.
Args:
db (object): The db object
data_id (int): The id of the data
tree_identifier (str): The identifier of the tree
folder_path (str): The folder path f.e. "/scripts/server1"
profile_id (int): The id of profile
user_group_id (int): The user group id
Returns:
None
"""
profile_root_folder_id = get_root_folder_id(db, tree_identifier, 'profile', profile_id)
user_group_root_folder_id = get_root_folder_id(db, tree_identifier, 'group', user_group_id)
if profile_root_folder_id is None:
profile_root_folder_id = create_profile_data_tree(db, tree_identifier, profile_id)
if user_group_root_folder_id is None:
user_group_root_folder_id = create_user_group_data_tree(db, tree_identifier, user_group_id)
if folder_path:
folder_profile_id = get_folder_id(db, profile_root_folder_id, folder_path)
folder_user_group_id = get_folder_id(db, user_group_root_folder_id, folder_path)
if folder_profile_id is None:
folder_profile_id, _ = create_folder(db, folder_path, profile_root_folder_id)
if folder_user_group_id is None:
folder_user_group_id, _ = create_folder(db, folder_path, user_group_root_folder_id)
else:
folder_profile_id = profile_root_folder_id
folder_user_group_id = user_group_root_folder_id
add_data_to_folder(db, data_id, folder_profile_id, read_only=0)
add_data_to_folder(db, data_id, folder_user_group_id, read_only=0) | 35,948 |
def get_or_create_api_key(datastore: data_store.DataStore,
project_id: str) -> str:
"""Return API key of existing project or create a new project and API key.
If the project exists, return its API key, otherwise create a new project
with the provided project ID and return its API key.
Args:
datastore: The datastore used for reading / writing the project.
project_id: The ID of the project to get or write.
Returns:
The API key associated with the project.
"""
try:
return datastore.read_by_proto_ids(project_id=project_id).api_key
except data_store.NotFoundError:
# Project not found, create it.
api_key = unique_id.generate_base64_id()
project = data_store_pb2.Project(
project_id=project_id, name=project_id, api_key=api_key)
datastore.write(project)
return api_key | 35,949 |
def map_cosh(process):
"""
"""
return map_default(process, 'cosh', 'apply') | 35,950 |
def deploy_mydaemon():
"""Update uwsgi master config conf/pydaemon.service, then restart"""
sudo("systemctl stop pydaemon", warn_only=True)
put("conf/pydaemon.service", "/etc/systemd/system/", use_sudo=True)
sudo("systemctl enable pydaemon")
sudo("systemctl daemon-reload")
sudo("systemctl start pydaemon") | 35,951 |
def fs(func):
"""
This is the decorator which performs recursive AST substitution of
functions, and optional JIT-compilation using `numba`_.
This must only be used on functions with positional parameters
defined; this must not be used on functions with keyword parameters.
This decorator modifies the original function (and any nested
function calls) by replacing any functions passed in using keyword
arguments. It replaces them in the AST and returns a new function
object with a new code object that calls the replacement functions
instead.
For example, a function hierarchy such as:
>>> def calculate(x):
... return x * x
>>> def my_func(x):
... a = calculate(x)
... return a / 2
will take the input variable `x`, square it, and then halve the
result:
>>> my_func(6)
18.0
Six squared is 36, divided by two is 18.
If you wanted to replace the `calculate` function to
return a different calculation, you could use this `@fs`
decorator:
>>> @fs
... def my_func(x):
... a = calculate(x)
... return a / 2
Now the `my_func` callable is able to accept keyword arguments,
which it will replace recursively throughout its hierarchy.
If you wanted to change the `calculate` in this function to:
>>> def cube(x):
... return x * x * x
then after applying the `@fs` decorator you can do this:
>>> my_func(6, calculate=cube)
108.0
Six cubed is 216, divided by two is 108.0.
This parametrisation can be decided at runtime - every time a
new keyword argument is passed in, it generates a new function
object with a new code object.
To store the new function object instead of executing it, pass
`return_callable=True` to the decorated function:
>>> new_func = my_func(6, calculate=cube, return_callable=True)
>>> # At this point the new function has **not** been called.
>>> new_func(6)
108.0
"""
_func = func
replaced = {}
@wraps(func)
def fs_wrapper(*args, **kwargs):
return_callable = kwargs.pop('return_callable', None)
# This deliberately mutates the kwargs.
# We don't want to have a fs-decorated function
# as a kwarg to another, so we undecorate it first.
for k, v in kwargs.items():
if hasattr(v, 'undecorated'):
kwargs[k] = v.undecorated
# TODO : ensure jit function returned
if not kwargs:
return _func(*args)
# TODO : remove fastats keywords such as 'debug'
# before passing into AstProcessor
new_funcs = {}
for v in kwargs.values():
if isfunction(v) and v.__name__ not in kwargs:
inner_replaced = {}
processor = AstProcessor(v, kwargs, inner_replaced, new_funcs)
proc = processor.process()
new_funcs[v.__name__] = convert_to_jit(proc)
new_kwargs = {}
for k, v in kwargs.items():
if new_funcs.get(v.__name__):
new_kwargs[k] = new_funcs[v.__name__]
kwargs.update(new_kwargs)
processor = AstProcessor(_func, kwargs, replaced, new_funcs)
proc = processor.process()
if return_callable:
return convert_to_jit(proc)
return convert_to_jit(proc)(*args)
fs_wrapper.undecorated = _func
return fs_wrapper | 35,952 |
def cleanline(line):
"""去除讀入資料中的換行符與 ',' 結尾
"""
line = line.strip('\n')
line = line.strip(',')
return line | 35,953 |
def convert_contrib_box_nms(node, **kwargs):
"""Map MXNet's _contrib_box_nms operator to ONNX
"""
from onnx.helper import make_node
name, input_nodes, attrs = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
dtype = input_dtypes[0]
#dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
opset_version = kwargs['opset_version']
if opset_version < 11:
raise AttributeError('ONNX opset 11 or greater is required to export this operator')
overlap_thresh = float(attrs.get('overlap_thresh', '0.5'))
valid_thresh = float(attrs.get('valid_thresh', '0'))
topk = int(attrs.get('topk', '-1'))
coord_start = int(attrs.get('coord_start', '2'))
score_index = int(attrs.get('score_index', '1'))
id_index = int(attrs.get('id_index', '-1'))
force_suppress = attrs.get('force_suppress', 'True')
background_id = int(attrs.get('background_id', '-1'))
in_format = attrs.get('in_format', 'corner')
out_format = attrs.get('out_format', 'corner')
center_point_box = 0 if in_format == 'corner' else 1
if topk == -1:
topk = 2**31-1
if in_format != out_format:
raise NotImplementedError('box_nms does not currently support in_fomat != out_format')
if background_id != -1:
raise NotImplementedError('box_nms does not currently support background_id != -1')
if id_index != -1 or force_suppress == 'False':
logging.warning('box_nms: id_idex != -1 or/and force_suppress == False detected. '
'However, due to ONNX limitations, boxes of different categories will NOT '
'be exempted from suppression. This might lead to different behavior than '
'native MXNet')
create_tensor([coord_start], name+'_cs', kwargs['initializer'])
create_tensor([coord_start+4], name+'_cs_p4', kwargs['initializer'])
create_tensor([score_index], name+'_si', kwargs['initializer'])
create_tensor([score_index+1], name+'_si_p1', kwargs['initializer'])
create_tensor([topk], name+'_topk', kwargs['initializer'])
create_tensor([overlap_thresh], name+'_ot', kwargs['initializer'], dtype=np.float32)
create_tensor([valid_thresh], name+'_vt', kwargs['initializer'], dtype=np.float32)
create_tensor([-1], name+'_m1', kwargs['initializer'])
create_tensor([-1], name+'_m1_f', kwargs['initializer'], dtype=dtype)
create_tensor([0], name+'_0', kwargs['initializer'])
create_tensor([1], name+'_1', kwargs['initializer'])
create_tensor([2], name+'_2', kwargs['initializer'])
create_tensor([3], name+'_3', kwargs['initializer'])
create_tensor([0, 1, -1], name+'_scores_shape', kwargs['initializer'])
create_tensor([0, 0, 1, 0], name+'_pad', kwargs['initializer'])
create_tensor([0, -1], name+'_bat_spat_helper', kwargs['initializer'])
create_const_scalar_node(name+"_0_s", np.int64(0), kwargs)
create_const_scalar_node(name+"_1_s", np.int64(1), kwargs)
nodes = [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('Shape', [name+'_shape'], [name+'_dim']),
make_node('Sub', [name+'_dim', name+'_2'], [name+'_dim_m2']),
make_node('Slice', [name+'_shape', name+'_dim_m2', name+'_dim'], [name+'_shape_last2']),
make_node('Concat', [name+'_m1', name+'_shape_last2'], [name+'_shape_3d'], axis=0),
make_node('Reshape', [input_nodes[0], name+'_shape_3d'], [name+'_data_3d']),
make_node('Slice', [name+'_data_3d', name+'_cs', name+'_cs_p4', name+'_m1'],
[name+'_boxes']),
make_node('Slice', [name+'_data_3d', name+'_si', name+'_si_p1', name+'_m1'],
[name+'_scores_raw']),
make_node('Reshape', [name+'_scores_raw', name+'_scores_shape'], [name+'_scores']),
make_node('Shape', [name+'_scores'], [name+'_scores_shape_actual']),
make_node('NonMaxSuppression',
[name+'_boxes', name+'_scores', name+'_topk', name+'_ot', name+'_vt'],
[name+'_nms'], center_point_box=center_point_box),
make_node('Slice', [name+'_nms', name+'_0', name+'_3', name+'_m1', name+'_2'],
[name+'_nms_sliced']),
make_node('GatherND', [name+'_data_3d', name+'_nms_sliced'], [name+'_candidates']),
make_node('Pad', [name+'_candidates', name+'_pad', name+'_m1_f'], [name+'_cand_padded']),
make_node('Shape', [name+'_nms'], [name+'_nms_shape']),
make_node('Slice', [name+'_nms_shape', name+'_0', name+'_1'], [name+'_cand_cnt']),
make_node('Squeeze', [name+'_cand_cnt'], [name+'_cc_s'], axes=[0]),
make_node('Range', [name+'_0_s', name+'_cc_s', name+'_1_s'], [name+'_cand_indices']),
make_node('Slice', [name+'_scores_shape_actual', name+'_0', name+'_3', name+'_m1',
name+'_2'], [name+'_shape_bat_spat']),
make_node('Slice', [name+'_shape_bat_spat', name+'_1', name+'_2'], [name+'_spat_dim']),
make_node('Expand', [name+'_cand_cnt', name+'_shape_bat_spat'], [name+'_base_indices']),
make_node('ScatterND', [name+'_base_indices', name+'_nms_sliced', name+'_cand_indices'],
[name+'_indices']),
make_node('TopK', [name+'_indices', name+'_spat_dim'], [name+'_indices_sorted', name+'__'],
largest=0, axis=-1, sorted=1),
make_node('Gather', [name+'_cand_padded', name+'_indices_sorted'], [name+'_gather']),
make_node('Reshape', [name+'_gather', name+'_shape'], [name+'0'])
]
return nodes | 35,954 |
def fill76(text):
"""Any text. Wraps the text to fit in 76 columns."""
return fill(text, 76) | 35,955 |
def current_object(cursor_offset, line):
"""If in attribute completion, the object on which attribute should be
looked up."""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_re.finditer(word)
s = ""
for m in matches:
if m.end(1) + start < cursor_offset:
if s:
s += "."
s += m.group(1)
if not s:
return None
return LinePart(start, start + len(s), s) | 35,956 |
def scheduled_job(process=dash_app_process):
"""Schedule a job."""
print('This job is run every weekday at 5pm.')
print('Kill process of a Dash app')
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
cmd = "python3 itcfinally2.py"
subprocess.call(cmd, shell=True)
print('Create a new process of a Dash app')
cmd = "python3 app.py"
global dash_app_process
dash_app_process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid) | 35,957 |
def download_zip(url: str) -> BytesIO:
"""Download data from url."""
logger.warning('start chromium download.\n'
'Download may take a few minutes.')
# disable warnings so that we don't need a cert.
# see https://urllib3.readthedocs.io/en/latest/advanced-usage.html for more
urllib3.disable_warnings()
with urllib3.PoolManager(cert_reqs='CERT_NONE') as http:
# Get data from url.
# set preload_content=False means using stream later.
data = http.request('GET', url, preload_content=False)
try:
total_length = int(data.headers['content-length'])
except (KeyError, ValueError, AttributeError):
total_length = 0
process_bar = tqdm(
total=total_length,
file=os.devnull if NO_PROGRESS_BAR else None,
)
# 10 * 1024
_data = BytesIO()
for chunk in data.stream(10240):
_data.write(chunk)
process_bar.update(len(chunk))
process_bar.close()
logger.warning('\nchromium download done.')
return _data | 35,958 |
def test_nested_condition() -> None:
"""Test a nested condition. """
@argcomb(Or(And("a", "b"), And("c", "d")))
def f(a: Any = None, b: Any = None, c: Any = None, d: Any = None) -> None:
...
# valid
f(a=1, b=1)
f(c=1, d=1)
f(a=1, b=1, c=1, d=1)
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f() | 35,959 |
def test_handle_requiressl_in_priv_string(input_tuple, output_tuple):
"""Tests the handle_requiressl_in_priv_string funciton."""
assert handle_requiressl_in_priv_string(MagicMock(), *input_tuple) == output_tuple | 35,960 |
def _is_match(option, useful_options, find_perfect_match):
"""
returns True if 'option' is between the useful_options
"""
for useful_option in useful_options:
if len(option) == sum([1 for o in option if o in useful_option]):
if not find_perfect_match or len(set(useful_option)) == len(set(option)):
return True
return False | 35,961 |
def pass_aligned_filtering(left_read, right_read, counter):
"""
Test if the two reads pass the additional filters such as check for soft-clipped end next to the variant region,
or overlapping region between the two reads.
:param left_read: the left (or 5') most read
:param right_read: the right (or 3') most read
:param counter: Counter to report the number of reads filtered.
:return: True or False
"""
# in CIGAR tuples the operation is coded as an integer
# https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples
if left_read.cigartuples[-1][0] == pysam.CSOFT_CLIP or right_read.cigartuples[0][0] == pysam.CSOFT_CLIP:
counter['Soft-clipped alignments'] += 1
elif left_read.reference_end > right_read.reference_start:
counter['Overlapping alignment'] += 1
elif left_read.is_reverse != right_read.is_reverse:
counter['Unexpected orientation'] += 1
else:
return True
return False | 35,962 |
def load_metaconfig(file_path):
""" Loads a single metaconfig file and returns variable to expression dictionary """
definitions = OrderedDict()
if os.path.isfile(file_path):
with open(file_path) as f:
for line in [l.strip(os.linesep).strip() for l in f.readlines()]:
# skip comments
if line.startswith("#"):
continue
# skip empty lines
if not line:
continue
# parse line
try:
var_name, var_expression = parse_metaconfig_line(line)
definitions[var_name] = var_expression
except ValueError as e:
future.utils.raise_from(
ValueError("Cannot parse metaconfig; file=" + file_path), e)
return definitions | 35,963 |
def detect_slow_oscillation(data: Dataset, algo: str = 'AASM/Massimini2004', start_offset: float = None) -> pd.DataFrame:
"""
Detect slow waves (slow oscillations) locations in an edf file for each channel
:param edf_filepath: path of edf file to load. Will maybe work with other filetypes. untested.
:param algo: which algorithm to use to detect spindles. See wonambi methods: https://wonambi-python.github.io/gui/methods.html
:param chans_to_consider: which channels to detect spindles on, must match edf channel names
:param bad_segments:
:param start_offset: offset between first epoch and edf - onset is measured from this
:return: returns dataframe of spindle locations, with columns for chan, start, duration and other spindle properties, sorted by onset
"""
detection = DetectSlowWave(algo)
sos_detected = detection(data)
sos_df = pd.DataFrame(sos_detected.events, dtype=float)
col_map = {'start': 'onset',
'end': None,
'trough_time': 'trough_time',
'zero_time': 'zero_time',
'peak_time': 'peak_time',
'trough_val': 'trough_uV',
'peak_val': 'peak_uV',
'dur': 'duration',
'ptp': None,
'chan': 'chan'}
cols_to_keep = set(sos_df.columns) - set([k for k, v in col_map.items() if v is None])
sos_df = sos_df.loc[:, cols_to_keep]
sos_df.columns = [col_map[k] for k in sos_df.columns]
if sos_df.shape[0] == 0:
return None #empty df
sos_df['peak_time'] = sos_df['peak_time'] - sos_df['onset']
sos_df['trough_time'] = sos_df['trough_time'] - sos_df['onset']
sos_df['zero_time'] = sos_df['zero_time'] - sos_df['onset']
sos_df['description'] = 'slow_osc'
if start_offset is not None:
sos_df['onset'] = sos_df['onset'] - start_offset
sos_df = sos_df.loc[sos_df['onset']>=0,:]
return sos_df.sort_values('onset') | 35,964 |
def _map_channels_to_measurement_lists(snirf):
"""Returns a map of measurementList index to measurementList group name."""
prefix = "measurementList"
data_keys = snirf["nirs"]["data1"].keys()
mls = [k for k in data_keys if k.startswith(prefix)]
def _extract_channel_id(ml):
return int(ml[len(prefix) :])
return {_extract_channel_id(ml): ml for ml in mls} | 35,965 |
def is_not_applicable_for_questionnaire(
value: QuestionGroup, responses: QuestionnaireResponses
) -> bool:
"""Returns true if the given group's questions are not answerable for the given responses.
That is, for all the questions in the given question group, only not
applicable answers have been provided for the provided questionnaire
response.
"""
return value.is_not_applicable_for_responses(responses) | 35,966 |
def _chebnodes(a,b,n):
"""Chebyshev nodes of rank n on interal [a,b]."""
if not a < b:
raise ValueError('Lower bound must be less than upper bound.')
return np.array([1/2*((a+b)+(b-a)*np.cos((2*k-1)*np.pi/(2*n))) for k in range(1,n+1)]) | 35,967 |
def test_1(test_1_fixture):
"""First test case."""
# store and remove the last item in dictionary.
norm_q_2_true = test_1_fixture.popitem()
p_measures = partial(mc_quantile_measures, **test_1_fixture)
for estimator, n_draws, decimal in zip(
["DLR", "DLR", "DLR", "DLR", "brute force"],
[2 ** 6, 2 ** 9, 2 ** 10, 2 ** 13, 3000],
[0, 1, 1, 2, 2],
):
norm_q_2_solve = p_measures(
estimator=estimator,
n_draws=n_draws,
)
assert_array_almost_equal(
norm_q_2_solve.loc["Q_2"],
norm_q_2_true[1],
decimal=decimal,
) | 35,968 |
def delcolumn(particles, columns, metadata):
"""
With dataframes, stating dataframe1 = dataframe2 only creates
a reference. Therefore, we must create a copy if we want to leave
the original dataframe unmodified.
"""
nocolparticles = particles.copy()
#Loop through each passed column to delete them
for c in columns:
#Check if the column doesn't exist.
#Consider doing the check in decisiontree.py
if c not in nocolparticles:
print("\n>> Error: the column \"" + c + "\" does not exist.\n")
sys.exit()
"""
The .drop can be used to drop a whole column.
The "1" tells .drop that it is the column axis that we want to drop
inplace means we want the dataframe to be modified instead of creating an assignment
"""
nocolparticles.drop(c, 1, inplace=True)
#We nead to remove that column header too. The heads are the third
#metadata (i.e. metadata[3])
metadata[3].remove(c)
return(nocolparticles, metadata) | 35,969 |
def Rx_matrix(theta):
"""Rotation matrix around the X axis"""
return np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
]) | 35,970 |
def spawn_actor(world: carla.World, blueprint: carla.ActorBlueprint, spawn_point: carla.Transform,
attach_to: carla.Actor = None, attachment_type=carla.AttachmentType.Rigid) -> carla.Actor:
"""Tries to spawn an actor in a CARLA simulator.
:param world: a carla.World instance.
:param blueprint: specifies which actor has to be spawned.
:param spawn_point: where to spawn the actor. A transform specifies the location and rotation.
:param attach_to: whether the spawned actor has to be attached (linked) to another one.
:param attachment_type: the kind of the attachment. Can be 'Rigid' or 'SpringArm'.
:return: a carla.Actor instance.
"""
actor = world.try_spawn_actor(blueprint, spawn_point, attach_to, attachment_type)
if actor is None:
raise ValueError(f'Cannot spawn actor. Try changing the spawn_point ({spawn_point.location}) to something else.')
return actor | 35,971 |
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in range(size)] | 35,972 |
def splitFile(file, chunk = 65536, count = 3, lanes = ('hi', 'lo')):
"""Split a ROM file from disk.
See splitRom() arguments.
"""
base, ext = splitName(file)
rom = splitRom(open(file, 'rb').read(), chunk, count, lanes)
for n in rom: open(F"{base}-{n}{ext}", 'wb').write(rom[n]) | 35,973 |
def form_errors_json(form=None):
"""It prints form errors as JSON."""
if form:
return mark_safe(dict(form.errors.items())) # noqa: S703, S308
return {} | 35,974 |
def test_text_handler(regexp, origin, postfix, expected):
"""Test text data handling."""
handler = get_text_handler(re.compile(regexp), postfix)
assert handler(origin) == expected | 35,975 |
def test_find_all_user_authorizations_for_empty(session): # pylint:disable=unused-argument
"""Test with invalid user id and assert that auth is None."""
user = factory_user_model()
org = factory_org_model('TEST')
factory_membership_model(user.id, org.id)
authorizations = Authorization.find_all_authorizations_for_user(str(user.keycloak_guid))
assert authorizations is not None
assert len(authorizations) == 0 | 35,976 |
def echo_bold(message):
"""Write a message in bold (if supported).
Args:
message (string): message to write in bold.
"""
_get_mfutil().mfutil_echo_bold(message.encode('utf8')) | 35,977 |
def get_semantic_ocs_version_from_config():
"""
Returning OCS semantic version from config.
Returns:
semantic_version.base.Version: Object of semantic version for OCS.
"""
return get_semantic_version(config.ENV_DATA["ocs_version"], True) | 35,978 |
def test_pop(doubly_list):
"""Test error for pop of empty list."""
doubly_list.push(1)
assert doubly_list.pop() == 1 | 35,979 |
def do_smooth(fileIn, fileOut, N=3, Wn=1e-3):
"""
Сглаживание с использованием фильтра Баттерворта.
Параметры:
fileIn - имя входного файла. Тип - str.
fileOut - имя выходного файла. Тип - str.
N - порядок фильтра (точность). Тип - int.
Wn - частота Найквиста (половина частоты дискретизации сигнала).
Тип - float.
"""
data = pd.read_csv(fileIn, index_col=0, dtype=np.double)
smoothed = pd.DataFrame(index=data.index)
for key in data.keys():
smoothed[key] = smooth(data[key], N, Wn)[0]
smoothed.to_csv(fileOut) | 35,980 |
def leanlauncher_launch_online(version, options):
"""
Launches the specified version of the game in online mode.
Parameters:
version (str): the version of the game to launch
options (Dictionary): used to specify login options for the game
"""
command = minecraft_launcher_lib.command.get_minecraft_command(version, leanlauncher_download.DEFAULT_INSTALL_PATH, options)
subprocess.call(command) | 35,981 |
def train_rl(
*,
_run: sacred.run.Run,
_seed: int,
total_timesteps: int,
normalize: bool,
normalize_kwargs: dict,
reward_type: Optional[str],
reward_path: Optional[str],
rollout_save_final: bool,
rollout_save_n_timesteps: Optional[int],
rollout_save_n_episodes: Optional[int],
policy_save_interval: int,
policy_save_final: bool,
) -> Mapping[str, float]:
"""Trains an expert policy from scratch and saves the rollouts and policy.
Checkpoints:
At applicable training steps `step` (where step is either an integer or
"final"):
- Policies are saved to `{log_dir}/policies/{step}/`.
- Rollouts are saved to `{log_dir}/rollouts/{step}.pkl`.
Args:
total_timesteps: Number of training timesteps in `model.learn()`.
normalize: If True, then rescale observations and reward.
normalize_kwargs: kwargs for `VecNormalize`.
reward_type: If provided, then load the serialized reward of this type,
wrapping the environment in this reward. This is useful to test
whether a reward model transfers. For more information, see
`imitation.rewards.serialize.load_reward`.
reward_path: A specifier, such as a path to a file on disk, used by
reward_type to load the reward model. For more information, see
`imitation.rewards.serialize.load_reward`.
rollout_save_final: If True, then save rollouts right after training is
finished.
rollout_save_n_timesteps: The minimum number of timesteps saved in every
file. Could be more than `rollout_save_n_timesteps` because
trajectories are saved by episode rather than by transition.
Must set exactly one of `rollout_save_n_timesteps`
and `rollout_save_n_episodes`.
rollout_save_n_episodes: The number of episodes saved in every
file. Must set exactly one of `rollout_save_n_timesteps` and
`rollout_save_n_episodes`.
policy_save_interval: The number of training updates between in between
intermediate rollout saves. If the argument is nonpositive, then
don't save intermediate updates.
policy_save_final: If True, then save the policy right after training is
finished.
Returns:
The return value of `rollout_stats()` using the final policy.
"""
custom_logger, log_dir = common.setup_logging()
rollout_dir = osp.join(log_dir, "rollouts")
policy_dir = osp.join(log_dir, "policies")
os.makedirs(rollout_dir, exist_ok=True)
os.makedirs(policy_dir, exist_ok=True)
venv = common.make_venv(
post_wrappers=[lambda env, idx: wrappers.RolloutInfoWrapper(env)],
)
callback_objs = []
if reward_type is not None:
reward_fn = load_reward(reward_type, reward_path, venv)
venv = RewardVecEnvWrapper(venv, reward_fn)
callback_objs.append(venv.make_log_callback())
logging.info(f"Wrapped env in reward {reward_type} from {reward_path}.")
vec_normalize = None
if normalize:
venv = vec_normalize = VecNormalize(venv, **normalize_kwargs)
if policy_save_interval > 0:
save_policy_callback = serialize.SavePolicyCallback(policy_dir, vec_normalize)
save_policy_callback = callbacks.EveryNTimesteps(
policy_save_interval,
save_policy_callback,
)
callback_objs.append(save_policy_callback)
callback = callbacks.CallbackList(callback_objs)
rl_algo = rl.make_rl_algo(venv)
rl_algo.set_logger(custom_logger)
rl_algo.learn(total_timesteps, callback=callback)
# Save final artifacts after training is complete.
if rollout_save_final:
save_path = osp.join(rollout_dir, "final.pkl")
sample_until = rollout.make_sample_until(
rollout_save_n_timesteps,
rollout_save_n_episodes,
)
rollout.rollout_and_save(save_path, rl_algo, venv, sample_until)
if policy_save_final:
output_dir = os.path.join(policy_dir, "final")
serialize.save_stable_model(output_dir, rl_algo, vec_normalize)
# Final evaluation of expert policy.
return train.eval_policy(rl_algo, venv) | 35,982 |
def get_non_ntile_cols(frame: pd.DataFrame) -> List[str]:
"""
:param frame: data frame to get columns of
:return: all columns in the frame that dont contain 'Ntile'
"""
return [col for col in frame.columns if 'Ntile' not in col] | 35,983 |
def deserialize_date(value: Any) -> Optional[datetime.datetime]:
"""A flexible converter for str -> datetime.datetime"""
if value is None:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, str):
# datetime.datetime.fromisoformat(...) can't parse Notion's dates,
# and, anyway, this is faster
return ciso8601.parse_datetime(value)
raise TypeError(f'Invalid type {type(value)} for date property') | 35,984 |
def watch_directory():
"""Watch directory by recursing into it every EVERY_SECONDS_WATCH.
Compare size and mtimes between periods. Is responsible for converting on startup.
"""
log = logging.getLogger(__name__)
previous_hash = None
array = bytearray()
ramp_up = list(range(EVERY_SECONDS_WATCH, 15, -35))
while True:
sleep_for = ramp_up.pop() if ramp_up else EVERY_SECONDS_WATCH
source_dir = GLOBAL_MUTABLE_CONFIG['--music-source'] # Keep in loop for when update_config() is called.
for i in sorted((p, s.st_size, s.st_mtime) for p, s in ((p, os.stat(p)) for p in walk_source(source_dir))):
array.extend(str(i).encode('utf-8'))
current_hash = hashlib.md5(array).hexdigest()
array.clear()
if current_hash != previous_hash:
log.debug('watch_directory() file system changed, calling run().')
yield from run()
previous_hash = current_hash
else:
log.debug('watch_directory() no change in file system, not calling run().')
log.debug('watch_directory() sleeping %d seconds.', sleep_for)
for _ in range(sleep_for):
yield from asyncio.sleep(1)
if SHUTDOWN.done():
log.debug('watch_directory() saw shutdown signal.')
return
log.debug('watch_directory() waking up.') | 35,985 |
def intersect(x1, x2, y1, y2, a1, a2, b1, b2):
""" Return True if (x1,x2,y1,y2) rectangles intersect. """
return overlap(x1, x2, a1, a2) & overlap(y1, y2, b1, b2) | 35,986 |
def BestEffort(func):
"""Decorator to log and dismiss exceptions if one if already being handled.
Note: This is largely a workaround for the lack of support of exception
chaining in Python 2.7, this decorator will no longer be needed in Python 3.
Typical usage would be in |Close| or |Disconnect| methods, to dismiss but log
any further exceptions raised if the current execution context is already
handling an exception. For example:
class Client(object):
def Connect(self):
# code to connect ...
@exc_util.BestEffort
def Disconnect(self):
# code to disconnect ...
client = Client()
try:
client.Connect()
except:
client.Disconnect()
raise
If an exception is raised by client.Connect(), and then a second exception
is raised by client.Disconnect(), the decorator will log the second exception
and let the original one be re-raised.
Otherwise, in Python 2.7 and without the decorator, the second exception is
the one propagated to the caller; while information about the original one,
usually more important, is completely lost.
Note that if client.Disconnect() is called in a context where an exception
is *not* being handled, then any exceptions raised within the method will
get through and be passed on to callers for them to handle in the usual way.
The decorator can also be used on cleanup functions meant to be called on
a finally block, however you must also include an except-raise clause to
properly signal (in Python 2.7) whether an exception is being handled; e.g.:
@exc_util.BestEffort
def cleanup():
# do cleanup things ...
try:
process(thing)
except:
raise # Needed to let cleanup know if an exception is being handled.
finally:
cleanup()
Failing to include the except-raise block has the same effect as not
including the decorator at all. Namely: exceptions during |cleanup| are
raised and swallow any prior exceptions that occurred during |process|.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
exc_type = sys.exc_info()[0]
if exc_type is None:
# Not currently handling an exception; let any errors raise exceptions
# as usual.
func(*args, **kwargs)
else:
# Otherwise, we are currently handling an exception, dismiss and log
# any further cascading errors. Callers are responsible to handle the
# original exception.
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
logging.exception(
'While handling a %s, the following exception was also raised:',
exc_type.__name__)
return Wrapper | 35,987 |
def recording_to_chunks(fingerprints: np.ndarray,
samples_per_chunk: int) -> List[np.ndarray]:
"""Breaks fingerprints of a recording into fixed-length chunks."""
chunks = []
for pos in range(0, len(fingerprints), samples_per_chunk):
chunk = fingerprints[pos:pos + samples_per_chunk]
# exclude partial chunks (at end)
if chunk.shape[0] == samples_per_chunk:
chunks.append(chunk)
return chunks | 35,988 |
def coset_enumeration_c(fp_grp, Y):
"""
>>> from sympy.combinatorics.free_group import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_c(f, [x])
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]]
"""
# Initialize a coset table C for < X|R >
C = CosetTable(fp_grp, Y)
X = fp_grp.generators
R = fp_grp.relators()
A = C.A
# replace all the elements by cyclic reductions
R_cyc_red = [rel.identity_cyclic_reduction() for rel in R]
R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \
for rel in R_cyc_red))
R_set = set()
for conjugate in R_c:
R_set = R_set.union(conjugate)
# a list of subsets of R_c whose words start with "x".
R_c_list = []
for x in C.A:
r = set([word for word in R_set if word[0] == x])
R_c_list.append(r)
R_set.difference_update(r)
for w in Y:
C.scan_and_fill_f(0, w)
for x in A:
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
i = 0
while i < len(C.omega):
alpha = C.omega[i]
i += 1
for x in C.A:
if C.table[alpha][C.A_dict[x]] is None:
C.define_f(alpha, x)
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
return C | 35,989 |
def slr_pulse(
num=N, time_bw=TBW,
ptype=PULSE_TYPE, ftype=FILTER_TYPE,
d_1=PBR, d_2=SBR,
root_flip=ROOT_FLIP,
multi_band = MULTI_BAND,
n_bands = N_BANDS,
phs_type = PHS_TYPE,
band_sep = BAND_SEP
):
"""Use Shinnar-Le Roux algorithm to generate pulse"""
if root_flip is False:
complex_pulse = rf.dzrf(n=num, tb=time_bw, ptype=ptype, ftype=ftype, d1=d_1, d2=d_2)
amp_arr = complex_pulse
else:
amp_arr, b_rootflip = slr_rootflip(ROOT_FLIP_ANGLE)
phs_arr = np.zeros(num)
for idx in range(num):
if amp_arr[idx] < 0:
phs_arr[idx] = 180
else:
phs_arr[idx] = 0
if multi_band is True:
amp_arr = rf.multiband.mb_rf(amp_arr, n_bands, band_sep, phs_type)
# prepare pulse for instrument, which takes absolute only
# cast negative values to positive
amp_arr_abs = np.abs(amp_arr)
# shift amplitude such that the lowest value is 0
amp_arr_abs = amp_arr_abs - amp_arr_abs.min()
# fold back phase when it exceeds 360
phs_arr = phs_arr % 360
freq_arr = (np.diff(phs_arr)/num)/360
return amp_arr, freq_arr, phs_arr, amp_arr_abs | 35,990 |
def test_user_not_banned(fixture_message):
"""Test user is not banned."""
message = fixture_message
assert message.banned is False | 35,991 |
def policy_options(state, Q_omega, epsilon=0.1):
""" Epsilon-greedy policy used to select options """
if np.random.uniform() < epsilon:
return np.random.choice(range(Q_omega.shape[1]))
else:
return np.argmax(Q_omega[state]) | 35,992 |
def test_input2() -> None:
""" Test with input """
run_test(['Hello', 'there'], './tests/expected/hello2.txt') | 35,993 |
def test_auto_repr(sample):
"""
Test that the symmetry group created with the automatic representation matrix
is the matches a reference.
"""
pos_In = (0, 0, 0) # pylint: disable=invalid-name
pos_As = (0.25, 0.25, 0.25) # pylint: disable=invalid-name
orbitals = []
for spin in (sr.SPIN_UP, sr.SPIN_DOWN):
orbitals.extend([
sr.Orbital(position=pos_In, function_string=fct, spin=spin)
for fct in sr.WANNIER_ORBITALS['s'] + sr.WANNIER_ORBITALS['p']
])
orbitals.extend([
sr.Orbital(position=pos_As, function_string=fct, spin=spin)
for fct in sr.WANNIER_ORBITALS['p']
])
symops, symops_cart = mg.loadfn(sample('InAs_symops.json'))
symmetry_group = sr.SymmetryGroup(
symmetries=[
sr.SymmetryOperation.from_orbitals(
orbitals=orbitals,
real_space_operator=sr.RealSpaceOperator.
from_pymatgen(sym_reduced),
rotation_matrix_cartesian=sym_cart.rotation_matrix,
numeric=True
) for sym_reduced, sym_cart in zip(symops, symops_cart)
],
full_group=True
)
reference = sr.io.load(sample('symmetries_InAs.hdf5'))
assert symmetry_group.full_group == reference.full_group
for sym1, sym2 in zip(symmetry_group.symmetries, reference.symmetries):
assert_allclose(
sym1.real_space_operator.rotation_matrix,
sym2.real_space_operator.rotation_matrix,
atol=1e-12
)
assert_allclose(
sym1.real_space_operator.translation_vector,
sym2.real_space_operator.translation_vector,
atol=1e-12
)
assert sym1.repr.has_cc == sym2.repr.has_cc
assert_allclose(sym1.repr.matrix, sym2.repr.matrix, atol=1e-12) | 35,994 |
def test_ps_s3_creation_triggers_on_master():
""" test object creation s3 notifications in using put/copy/post on master"""
if skip_push_tests:
return SkipTest("PubSub push tests don't run in teuthology")
hostname = get_ip()
proc = init_rabbitmq()
if proc is None:
return SkipTest('end2end amqp tests require rabbitmq-server installed')
zones, _ = init_env(require_ps=False)
realm = get_realm()
zonegroup = realm.master_zonegroup()
# create bucket
bucket_name = gen_bucket_name()
bucket = zones[0].create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf = PSTopicS3(zones[0].conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Put', 's3:ObjectCreated:Copy']
}]
s3_notification_conf = PSNotificationS3(zones[0].conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket using PUT
key = bucket.new_key('put')
key.set_contents_from_string('bar')
# create objects in the bucket using COPY
bucket.copy_key('copy', bucket.name, key.name)
# create objects in the bucket using multi-part upload
fp = tempfile.TemporaryFile(mode='w')
fp.write('bar')
fp.close()
uploader = bucket.initiate_multipart_upload('multipart')
fp = tempfile.NamedTemporaryFile(mode='r')
uploader.upload_part_from_file(fp, 1)
uploader.complete_upload()
fp.close()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True)
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
for key in bucket.list():
key.delete()
# delete the bucket
zones[0].delete_bucket(bucket_name)
clean_rabbitmq(proc) | 35,995 |
def weights_init(init_type='gaussian'):
"""
from https://github.com/naoto0804/pytorch-inpainting-with-partial-conv/blob/master/net.py
"""
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun | 35,996 |
def title(default=None, level="header"):
"""
A decorator that add an optional title argument to component.
"""
def decorator(fn):
loc = get_argument_default(fn, "where", None) or st
@wraps(fn)
def wrapped(
*args,
title=default,
level=level,
header=None,
subheader=None,
where=loc,
**kwargs,
):
if header:
where.header(str(header))
elif subheader:
where.subheader(str(subheader))
elif title:
if level == "header":
where.header(str(title))
elif level == "subheader":
where.subheader(str(title))
elif level == "bold":
where.markdown(f"**{title}**")
else:
raise ValueError(f"invalid title level: {level!r}")
kwargs["where"] = where
return fn(*args, **kwargs)
return wrapped
return decorator | 35,997 |
def get_ext(path):
"""
Given a path return the file extension.
**Positional Arguments:**
path: The file whose path we assess
"""
return os.path.splitext(path)[1] | 35,998 |
def _rect_to_css(rect):
"""
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
:param rect: a dlib 'rect' object
:return: a plain tuple representation of the rect in (top, right, bottom, left) order
"""
return rect.top(), rect.right(), rect.bottom(), rect.left() | 35,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.