content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_keys():
""" Get all keys of a given class. """
yield lambda selector: base.find_keys(metadata.CRAIGSLIST, selector) | 34,600 |
def With(prop, val):
"""The 'with <property> <value>' specifier.
Specifies the given property, with no dependencies.
"""
return Specifier(prop, val) | 34,601 |
def bbox_artist(artist, renderer, props=None, fill=True):
"""
A debug function to draw a rectangle around the bounding
box returned by an artist's `.Artist.get_window_extent`
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
r = Rectangle(
xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2),
width=bbox.width + pad, height=bbox.height + pad,
fill=fill, transform=transforms.IdentityTransform(), clip_on=False)
r.update(props)
r.draw(renderer) | 34,602 |
def pad_node_id(node_id: np.uint64) -> str:
""" Pad node id to 20 digits
:param node_id: int
:return: str
"""
return "%.20d" % node_id | 34,603 |
def test_list_any_uri_enumeration_nistxml_sv_iv_list_any_uri_enumeration_1_1(mode, save_output, output_format):
"""
Type list/anyURI is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/anyURI/Schema+Instance/NISTSchema-SV-IV-list-anyURI-enumeration-1.xsd",
instance="nistData/list/anyURI/Schema+Instance/NISTXML-SV-IV-list-anyURI-enumeration-1-1.xml",
class_name="NistschemaSvIvListAnyUriEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 34,604 |
async def test_not_200_ok(status_code: int) -> None:
"""Responses that don't have status code 200 should not be cached."""
cache = Cache("locmem://null")
spy = CacheSpy(PlainTextResponse("Hello, world!", status_code=status_code))
app = CacheMiddleware(spy, cache=cache)
client = httpx.AsyncClient(app=app, base_url="http://testserver")
async with cache, client:
r = await client.get("/")
assert r.status_code == status_code
assert r.text == "Hello, world!"
assert "Expires" not in r.headers
assert "Cache-Control" not in r.headers
assert spy.misses == 1
r1 = await client.get("/")
assert ComparableHTTPXResponse(r1) == r
assert spy.misses == 2 | 34,605 |
def ballcurve(x: ArrayLike, xi: float) -> ArrayLike:
"""
function to generate the curve for the nested structure, given a shape
parameter xi. If xi= 1 is linear.
input:
----------
x: 1D array, [0,1]
initial values to be evaluated on the function
xi: number, >=1
shape parameter of how stylised is the curve
output:
----------
y: 1D array, [0,1]
evaluated function
"""
return 1 - (1 - (x) ** (1 / xi)) ** xi | 34,606 |
def plot_attention(src_words: Union[np.ndarray, Sequence[str]],
trg_words: Sequence[str],
attention_matrix: np.ndarray,
file_name: str,
size_x: numbers.Real = 8.0,
size_y: numbers.Real = 8.0) -> None:
"""This takes in source and target words and an attention matrix (in numpy format)
and prints a visualization of this to a file.
Args:
src_words: a list of words in the source; alternatively, a numpy array containing speech features.
trg_words: a list of target words
attention_matrix: a two-dimensional numpy array of values between zero and one,
where rows correspond to source words, and columns correspond to target words
file_name: the name of the file to which we write the attention
size_x: width of the main plot
size_y: height of the plot
"""
trg_words = [unidecode(w) for w in trg_words]
src_is_speech = isinstance(src_words, np.ndarray)
max_len = len(''.join(trg_words))
if not src_is_speech:
max_len = max(max_len, len(''.join(src_words)))
src_words = [unidecode(w) for w in src_words]
if max_len>150: matplotlib.rc('font', size=5)
elif max_len>50: matplotlib.rc('font', size=7)
dpi = 100 if max_len <= 150 else 150
fig, axs = plt.subplots(nrows=1, ncols=2 if src_is_speech else 1,
figsize=(size_x+(1.0 if src_is_speech else 0.0), size_y),
gridspec_kw = {'width_ratios':[1, size_x]} if src_is_speech else None)
ax = axs[1] if src_is_speech else axs
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(attention_matrix.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(attention_matrix.shape[0]) + 0.5, minor=False)
ax.invert_yaxis()
if src_is_speech: plt.yticks([], [])
# label axes by words
ax.set_xticklabels(trg_words, minor=False)
if not src_is_speech: ax.set_yticklabels(src_words, minor=False)
ax.xaxis.tick_top()
# draw the heatmap
plt.pcolor(attention_matrix, cmap=plt.cm.Blues, vmin=0, vmax=1)
plt.colorbar()
if src_is_speech:
ax = axs[0]
plot_speech_features(feature_matrix=src_words, ax=ax, dpi=dpi)
fig.tight_layout()
utils.make_parent_dir(file_name)
plt.savefig(file_name, dpi=dpi)
plt.close() | 34,607 |
def event_log(time, name):
"""开发测试用"""
print('Event ' + name + ', happened at ' + str(time)) | 34,608 |
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
fc = open("connections_log", "a+")
fc.write("%s:%s has connected.\n" % client_address)
fc.close()
client.send(bytes("Hello! You are using Vchat.Please give your name below first and proceed..", "utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start() | 34,609 |
def center_vertices(vertices, faces, flip_y=True):
"""
Centroid-align vertices.
Args:
vertices (V x 3): Vertices.
faces (F x 3): Faces.
flip_y (bool): If True, flips y verts to keep with image coordinates convention.
Returns:
vertices, faces
"""
vertices = vertices - vertices.mean(dim=0, keepdim=True)
if flip_y:
vertices[:, 1] *= -1
faces = faces[:, [2, 1, 0]]
return vertices, faces | 34,610 |
def test_schema_selection(
schema: dict,
mask: singer.SelectionMask,
stream_name: str,
):
"""Test that schema selection rules are correctly applied to SCHEMA messages."""
selected_schema = get_selected_schema(
stream_name,
schema,
mask,
logging.getLogger(),
)
# selected_schema["properties"]["required"] = []
assert (
selected_schema["properties"]
== PropertiesList(
Property(
"col_a",
ObjectType(
Property("col_a_1", StringType),
Property("col_a_3", StringType),
),
),
Property("col_d", StringType),
Property("col_e", StringType),
Property("col_f", StringType),
).to_dict()["properties"]
) | 34,611 |
def start_client(page_to_load, PORT=8563):
"""
Starts Python-Eel client and loads the page passed in
'page_to_load', on port 'PORT'.
RETURNS: None
"""
print("in start client")
try:
eel.start(page_to_load, port=PORT)
except Exception:
page_to_load = 'get_chrome.html'
try:
eel.start(page_to_load, port=PORT, mode='chrome-app')
except Exception:
eel.start(page_to_load, port=PORT, mode='edge') | 34,612 |
def test_environ(script, tmpdir):
"""$PYTHONWARNINGS was added in python2.7"""
demo = tmpdir.join('warnings_demo.py')
demo.write('''
from pip._internal.utils import deprecation
deprecation.install_warning_logger()
from logging import basicConfig
basicConfig()
from warnings import warn
warn("deprecated!", deprecation.PipDeprecationWarning)
''')
result = script.run('python', demo, expect_stderr=True)
assert result.stderr == \
'ERROR:pip._internal.deprecations:DEPRECATION: deprecated!\n'
script.environ['PYTHONWARNINGS'] = 'ignore'
result = script.run('python', demo)
assert result.stderr == '' | 34,613 |
def metadata_validator(form, value):
"""
Validates that custom citations are correct.
:param form: Metadata schemas/form to validate
:param value: Appstruct/values passed in for validation.
:return: None, raise a colander.Invalid exception if the validation fails.
"""
error = False
exc = colander.Invalid(form) # Uncomment to add a block message: , 'At least 1 research theme or Not aligned needs to be selected')
item = value.popitem()
value[item[0]] = item[1]
key = item[0][:item[0].rindex(":")+1]
if '%scustom_citation' % key in value and value['%scustom_citation' % key]:
citation = value["%scitation" % key]
citation_key = "%scitation:" % key
exc["%scitation" % key] = "Invalid custom citation."
if citation['%scitation_title' % citation_key] is None or len(citation['%scitation_title' % citation_key]) == 0:
exc.children[0]['%scitation_title' % citation_key] = "Required"
if not isinstance(citation['%scitation_publish_date' % citation_key], (date, datetime)):
exc.children[0]['%scitation_publish_date' % citation_key] = "Required"
if not isinstance(citation['%scitation_creators' % citation_key], list) or len(citation['%scitation_title' % citation_key]) == 0:
exc.children[0]['%scitation_creators' % citation_key] = "Required"
if citation['%scitation_edition' % citation_key] is None or len(citation['%scitation_edition' % citation_key]) == 0:
exc.children[0]['%scitation_edition' % citation_key] = "Required"
if citation['%scitation_publisher' % citation_key] is None or len(citation['%scitation_publisher' % citation_key]) == 0:
exc.children[0]['%scitation_publisher' % citation_key] = "Required"
if citation['%scitation_place_of_publication' % citation_key] is None or len(citation['%scitation_place_of_publication' % citation_key]) == 0:
exc.children[0]['%scitation_place_of_publication' % citation_key] = "Required"
if citation['%scitation_url' % citation_key] is None or len(citation['%scitation_url' % citation_key]) == 0:
exc.children[0]['%scitation_url' % citation_key] = "Required"
if citation['%scitation_data_type' % citation_key] is None or len(citation['%scitation_data_type' % citation_key]) == 0:
exc.children[0]['%scitation_data_type' % citation_key] = "Required"
error = True
if error:
raise exc | 34,614 |
def fingerprint_atompair(fpSize=2048, count=False):
"""Atom pair fingerprint (list of int).
Args:
fpSize: Size of the generated fingerprint (defaults to 2048).
count: The default value of False will generate fingerprint bits
(0 or 1) whereas a value of True will generate the count of each
fingerprint value.
"""
generator = rdFingerprintGenerator.GetAtomPairGenerator(fpSize=fpSize)
if count:
fingerprint_fn = _fingerprint_fn_count(generator)
else:
fingerprint_fn = _fingerprint_fn_bits(generator)
fingerprint_fn.__name__ = 'fingerprint_atompair(' + \
f'fpSize={fpSize},count={count})'
return fingerprint_fn | 34,615 |
def bend_euler_s(**kwargs) -> Component:
"""Sbend made of euler bends."""
c = Component()
b = bend_euler(**kwargs)
b1 = c.add_ref(b)
b2 = c.add_ref(b)
b2.mirror()
b2.connect("o1", b1.ports["o2"])
c.add_port("o1", port=b1.ports["o1"])
c.add_port("o2", port=b2.ports["o2"])
return c | 34,616 |
def main():
"""Function: main
Description: Initializes program-wide used variables and processes command
line arguments and values.
Variables:
dir_chk_list -> contains options which will be directories.
func_dict -> dictionary list for the function calls or other options.
opt_arg_list -> contains arguments to add to command line by default.
opt_con_req_list -> contains the options that require other options.
opt_req_list -> contains the options that are required for the program.
opt_val_list -> contains options which require values.
opt_valid_val -> contains list of types of values to be validated.
opt_xor_dict -> contains options which are XOR with its values.
Arguments:
(input) argv -> Arguments from the command line.
"""
cmdline = gen_libs.get_inst(sys)
dir_chk_list = ["-d", "-p"]
func_dict = {"-L": fetch_log_pos, "-D": fetch_log_entries, "-R": load_log}
opt_arg_list = ["--force-read", "--read-from-remote-server"]
opt_con_req_list = {"-R": ["-e"]}
opt_req_list = ["-c", "-d"]
opt_val_list = ["-c", "-e", "-d", "-f", "-g", "-p", "-s", "-t", "-y"]
opt_valid_val = {"-s": gen_libs.validate_date,
"-t": gen_libs.validate_date}
opt_xor_dict = {"-L": ["-D", "-R"], "-D": ["-L", "-R"], "-R": ["-D", "-L"]}
# Process argument list from command line.
args_array = arg_parser.arg_parse2(cmdline.argv, opt_val_list)
if not gen_libs.help_func(args_array, __version__, help_message) \
and not arg_parser.arg_require(args_array, opt_req_list) \
and arg_parser.arg_xor_dict(args_array, opt_xor_dict) \
and not arg_parser.arg_dir_chk_crt(args_array, dir_chk_list) \
and arg_parser.arg_validate(args_array, opt_valid_val) \
and arg_parser.arg_cond_req(args_array, opt_con_req_list):
try:
prog_lock = gen_class.ProgramLock(cmdline.argv,
args_array.get("-y", ""))
run_program(args_array, func_dict, opt_arg_list)
del prog_lock
except gen_class.SingleInstanceException:
print("WARNING: lock in place for mysql_log_admin with id of: %s"
% (args_array.get("-y", ""))) | 34,617 |
def default(args):
"""Default task, called when no task is provided (default: init)."""
def help_function(): pass
# paver.tasks.help(args, help_function)
call_task("help", args=args) | 34,618 |
def phot_error(star_ADU,n_pix,n_b,sky_ADU,dark,read,gain=1.0):
"""
Photometric error including
INPUT:
star_ADU - stellar flux in ADU (total ADU counts within aperture)
n_pix - number of pixels in aperture
n_b - number of background pixels
sky_ADU - in ADU/pix
dark - in e/pix
read - in e^2/pix
gain - gain in e/ADU
OUTPUT:
Photometric error N in ADUs
NOTES:
This is not the normalized error. To normalize, have to do sigma_rel = N / star_ADU
This does not include scintillation
"""
noise = np.sqrt( gain*star_ADU + n_pix *((1. + n_pix/n_b) * (gain*sky_ADU + dark + read**2. + (gain*0.289)**2. )) )/gain
return noise | 34,619 |
def tan(x):
"""
tan(x) -> number
Return the tangent of x; x in radians.
"""
try:
res, x = _init_check_mpfr(x)
gmp.mpfr_tan(res, x, gmp.MPFR_RNDN)
return mpfr._from_c_mpfr(res)
except TypeError:
res, x = _init_check_mpc(x)
gmp.mpc_tan(res, x, gmp.MPC_RNDNN)
return mpc._from_c_mpc(res) | 34,620 |
async def test_get_id_from_scene_name() -> None:
"""Test looking up a scene id from the name."""
with pytest.raises(ValueError):
scenes.get_id_from_scene_name("non_exist")
assert scenes.get_id_from_scene_name("Ocean") == 1 | 34,621 |
def test_wrapped_func():
"""
Test uncertainty-aware functions obtained through wrapping.
"""
########################################
# Function which can automatically handle numbers with
# uncertainties:
def f_auto_unc(angle, *list_var):
return umath.cos(angle) + sum(list_var)
def f(angle, *list_var):
# We make sure that this function is only ever called with
# numbers with no uncertainty (since it is wrapped):
assert not isinstance(angle, uncert_core.UFloat)
assert not any(isinstance(arg, uncert_core.UFloat)
for arg in list_var)
return f_auto_unc(angle, *list_var)
f_wrapped = uncert_core.wrap(f)
my_list = [1, 2, 3]
########################################
# Test of a wrapped function that only calls the original
# function: it should obtain the exact same result:
assert f_wrapped(0, *my_list) == f(0, *my_list)
# 1 == 1 +/- 0, so the type must be checked too:
assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list))
########################################
# Call with uncertainties:
angle = uncert_core.ufloat(1, 0.1)
list_value = uncert_core.ufloat(3, 0.2)
# The random variables must be the same (full correlation):
assert ufloats_close(f_wrapped(angle, *[1, angle]),
f_auto_unc(angle, *[1, angle]))
assert ufloats_close(f_wrapped(angle, *[list_value, angle]),
f_auto_unc(angle, *[list_value, angle]))
########################################
# Non-numerical arguments, and explicit and implicit derivatives:
def f(x, y, z, t, u):
return x+2*z+3*t+4*u
f_wrapped = uncert_core.wrap(
f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u
assert f_wrapped(10, 'string argument', 1, 0, 0) == 12
x = uncert_core.ufloat(10, 1)
assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev,
(1+2+3+4)*x.std_dev) | 34,622 |
def T2str_mag_simplified(K, TE, T2str, N):
"""Signal Model of T2str-weighted UTE GRE Magnitude Image
S = K * [ exp(-TE/T2*) ] + N
parameters:
K :: constant (proportional to proton density)
TE :: sequence echo time
T2str :: relaxation due to spin-spin effects and dephasing
N :: constant offset "noise" term
@return expected (magnitude) signal
"""
S = K * np.exp((-1.0 * TE)/T2str) + N
return S | 34,623 |
def main():
"""The program's main entry point"""
program_name = os.path.basename(sys.argv[0])
initialize_debugging(program_name)
process_environment_variables()
arguments = process_command_line()
# Reading from standard input if there are no arguments:
text = []
longuest_line = 0
if len(arguments):
arguments_line = " ".join(arguments)
text = arguments_line.split("\\n")
for line in text:
if len(line) > longuest_line:
longuest_line = len(line)
else:
for line in sys.stdin:
line = line.strip()
text.append(line)
if len(line) > longuest_line:
longuest_line = len(line)
text_width_with_spaces = longuest_line + 2 * parameters["Surrounding spaces"]
text_width_with_spaces_and_borders = text_width_with_spaces + 2
if parameters["Alignment"] == "left":
text_indent = 0
elif parameters["Alignment"] == "right":
text_indent = parameters["Columns"] - text_width_with_spaces_and_borders
else:
text_indent = (parameters["Columns"] - text_width_with_spaces_and_borders) // 2
if text_indent < 0:
text_indent = 0
print_blank_lines(parameters["Leading lines"])
print_upper_box_line(text_indent, text_width_with_spaces)
print_inter_lines(text_indent, text_width_with_spaces)
for line in text:
print_text_line(text_indent, line, longuest_line)
print_inter_lines(text_indent, text_width_with_spaces)
print_lower_box_line(text_indent, text_width_with_spaces)
print_blank_lines(parameters["Trailing lines"])
sys.exit(0) | 34,624 |
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
dna_size = len(tuning_options.tune_params.keys())
pop_size = 20
generations = 100
tuning_options["scaling"] = False
tune_params = tuning_options.tune_params
population = random_population(dna_size, pop_size, tune_params)
best_time = 1e20
all_results = []
cache = {}
for generation in range(generations):
if tuning_options.verbose:
print("Generation %d, best_time %f" % (generation, best_time))
#determine fitness of population members
weighted_population = []
for dna in population:
time = _cost_func(dna, kernel_options, tuning_options, runner, all_results, cache)
weighted_population.append((dna, time))
population = []
#'best_time' is used only for printing
if tuning_options.verbose and all_results:
best_time = min(all_results, key=lambda x: x["time"])["time"]
#population is sorted such that better configs have higher chance of reproducing
weighted_population.sort(key=lambda x: x[1])
#crossover and mutate
for _ in range(pop_size//2):
ind1 = weighted_choice(weighted_population)
ind2 = weighted_choice(weighted_population)
ind1, ind2 = crossover(ind1, ind2)
population.append(mutate(ind1, dna_size, tune_params))
population.append(mutate(ind2, dna_size, tune_params))
return all_results, runner.dev.get_environment() | 34,625 |
def test_nvswitch_traffic_p2p(handle, switchIds):
"""
Verifies that fabric can pass p2p read and write traffic successfully
"""
test_utils.skip_test("Bandwidth field not being updated yet")
# TX_0 and RX_0 on port 0
nvSwitchBandwidth0FieldIds = []
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_RX_0_P00 + 1, 1):
nvSwitchBandwidth0FieldIds.append(i)
# TX_1 and RX_1 on port 0
nvSwitchBandwidth1FieldIds = []
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_RX_1_P00 + 1, 1):
nvSwitchBandwidth1FieldIds.append(i)
dcgmHandle = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
groupName = "test_nvswitches"
allNvSwitchesGroup = pydcgm.DcgmGroup(dcgmHandle, groupName=groupName,
groupType=dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES)
fgName = "test_nvswitches_bandwidth0"
nvSwitchBandwidth0FieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, name=fgName,
fieldIds=nvSwitchBandwidth0FieldIds)
fgName = "test_nvswitches_bandwidth1"
nvSwitchBandwidth1FieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, name=fgName,
fieldIds=nvSwitchBandwidth1FieldIds)
updateFreq = int(20 / 2.0) * 1000000
maxKeepAge = 600.0
maxKeepSamples = 0
nvSwitchBandwidth0Watcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
dcgmHandle.handle, allNvSwitchesGroup.GetId(),
nvSwitchBandwidth0FieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
nvSwitchBandwidth1Watcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
dcgmHandle.handle, allNvSwitchesGroup.GetId(),
nvSwitchBandwidth1FieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
# wait for FM reports and populates stats
time.sleep(30)
# read the counters before sending traffic
nvSwitchBandwidth0Watcher.GetMore()
nvSwitchBandwidth1Watcher.GetMore()
for entityGroupId in nvSwitchBandwidth0Watcher.values.keys():
for entityId in nvSwitchBandwidth0Watcher.values[entityGroupId]:
bandwidth0FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00
bandwidth1FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00
counter0TxBefore = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
bandwidth0FieldId += 1
counter0RxBefore = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
counter1TxBefore = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
bandwidth1FieldId += 1
counter1RxBefore = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
# Generate write traffic for the nvswitches
test_utils.run_p2p_bandwidth_app(test_nvswitch_utils.MEMCPY_DTOD_WRITE_CE_BANDWIDTH)
# Generate read traffic for the nvswitches
test_utils.run_p2p_bandwidth_app(test_nvswitch_utils.MEMCPY_DTOD_READ_CE_BANDWIDTH)
# read the counters again after sending traffic
nvSwitchBandwidth0Watcher.GetMore()
nvSwitchBandwidth1Watcher.GetMore()
for entityGroupId in nvSwitchBandwidth0Watcher.values.keys():
for entityId in nvSwitchBandwidth0Watcher.values[entityGroupId]:
bandwidth0FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00
bandwidth1FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00
counter0TxAfter = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
bandwidth0FieldId += 1
counter0RxAfter = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
counter1TxAfter = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
bandwidth1FieldId += 1
counter1RxAfter = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
assert counter0TxAfter > counter0TxBefore, "Counter0Tx did not increase"
assert counter0RxAfter > counter0RxBefore, "counter0Rx did not increase"
assert counter1TxAfter > counter1TxBefore, "Counter1Tx did not increase"
assert counter1RxAfter > counter1RxBefore, "counter1Rx did not increase" | 34,626 |
def getTimeString(t, centi=True):
"""
category: General Utility Functions
Given a value in milliseconds, returns a Lstr with:
(hours if > 0):minutes:seconds:centiseconds.
WARNING: this Lstr value is somewhat large so don't use this to
repeatedly update node values in a timer/etc. For that purpose you
should use timeDisplay nodes and attribute connections.
"""
if type(t) is not int: t = int(t)
bits = []
subs = []
h = (t/1000)/(60*60)
if h != 0:
bits.append('${H}')
subs.append(('${H}', bs.Lstr(resource='timeSuffixHoursText',
subs=[('${COUNT}', str(h))])))
m = ((t/1000)/60)%60
if m != 0:
bits.append('${M}')
subs.append(('${M}', bs.Lstr(resource='timeSuffixMinutesText',
subs=[('${COUNT}', str(m))])))
# we add seconds if its non-zero *or* we havn't added anything else
if centi:
s = (t/1000.0 % 60.0)
if s >= 0.005 or not bits:
bits.append('${S}')
subs.append(('${S}', bs.Lstr(resource='timeSuffixSecondsText',
subs=[('${COUNT}', ('%.2f' % s))])))
else:
s = (t/1000 % 60)
if s != 0 or not bits:
bits.append('${S}')
subs.append(('${S}', bs.Lstr(resource='timeSuffixSecondsText',
subs=[('${COUNT}', str(s))])))
return bs.Lstr(value=' '.join(bits), subs=subs) | 34,627 |
def version(ctx):
"""Show CSE version"""
ver = 'Container Service Extension for %s' % \
'VMware vCloud Director, version %s' % \
pkg_resources.require("container-service-extension")[0].version
print(ver) | 34,628 |
def validation_supervised(model, input_tensor, y_true, loss_fn, multiclass =False, n_classes= 1):
"""
Returns average loss for an input batch of data with a supervised model.
If running on multiclass mode, it also returns the accuracy.
"""
y_pred = model(input_tensor.float())
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss_fn(y_pred, y_true.view(-1, n_classes).float())
try:
acc = accuracy(y_pred, y_true.view(-1, n_out).float())
except:
acc = None
return loss.mean().item(), acc | 34,629 |
def configure_logger(app):
"""
logging: based on the configured setting we
:param app:
:return:
"""
#
# support stream and rotating handlers
logger = app.logger
logger.setLevel(logging.INFO)
if app.config['HANDLER'] == "StreamHandler":
class InfoFilter(logging.Filter):
def filter(self, rec):
# for stdout, every below and including warning should be shown
return rec.levelno < logging.ERROR
h1 = logging.StreamHandler(sys.stdout)
# this is minimal level
h1.setLevel(logging.DEBUG if app.debug else logging.INFO)
h1.setFormatter(RequestFormatter())
h1.addFilter(InfoFilter())
logger.addHandler(h1)
# only errors to stderr
h2 = logging.StreamHandler(sys.stderr)
h2.setLevel(logging.ERROR)
h2.setFormatter(RequestFormatter())
logger.addHandler(h2)
else: # elif config.HANDLER == "RotatingFileHandler":
handler = RotatingFileHandler(
'access.log', maxBytes=10000, backupCount=1)
handler.setFormatter(RequestFormatter())
handler.setLevel(logging.DEBUG if app.debug else logging.INFO)
logger.addHandler(handler)
return logger | 34,630 |
def timing(func=None, *, name=None, is_stage=None):
"""
Decorator to measure the time taken by the function to execute
:param func: Function
:param name: Display Name of the function for which the time is being calculated
:param is_stage: Identifier for mining stage
Examples:
>>>
>>> @timing(name="foo")
>>> def func():
>>> ...
>>>
>>> @timing
>>> def func():
>>> ...
>>>
"""
if func is None:
return partial(timing, name=name, is_stage=is_stage)
@wraps(func)
def wrapper(*args, **kwargs):
start = timer()
result = func(*args, **kwargs)
end = timer()
total_time = end - start
logger.info(f"Time taken to execute `{name}`: {total_time} sec")
if not is_stage:
if name in ELAPSED_TIME_ON_FUNCTIONS:
ELAPSED_TIME_ON_FUNCTIONS[name] += total_time
else:
ELAPSED_TIME_ON_FUNCTIONS[name] = total_time
else:
if name in STAGE_WISE_TIME:
STAGE_WISE_TIME[name] += total_time
else:
STAGE_WISE_TIME[name] = total_time
return result
return wrapper | 34,631 |
def is_skip_file(filename):
""" Should the given file be skipped over for testing
:param filename: The file's name
:type filename: String
:return: True if the given file should be skipped, false otherwise
:rtype: Boolean
"""
filename_len = len(filename)
for skip_name in SKIP_FILES:
skip_name_len = len(skip_name)
if (skip_name_len <= filename_len) and (
skip_name == filename[-skip_name_len:]):
return True
return False | 34,632 |
def lowercase_or_notify(x):
""" Lowercases the input if it is valid, otherwise logs the error and sets a default value
Args:
String to lowercase
Returns:
Lowercased string if possible, else unmodified string or default value.
"""
try:
return x.lower()
except Exception:
if x and not np.isnan(x):
logger.info('Program activity of {} was unable to be lowercased. Entered as-is.'.format(x))
return x
else:
logger.info('Null value found for program activity name. Entered default value.') # should not happen
return '(not provided)' | 34,633 |
def is_not_null(node, eval_type, given_variables):
"""Process the is_not_null operator.
:param node: Formula node
:param eval_type: Type of evaluation
:param given_variables: Dictionary of var/values
:return: Boolean result, SQL query, or text result
"""
if eval_type == EVAL_EXP:
# Python evaluation
return not value_is_null(get_value(node, given_variables))
if eval_type == EVAL_SQL:
# SQL evaluation
query = sql.SQL('({0} is not null)').format(
OnTaskDBIdentifier(node['field']),
)
return query, []
# Text evaluation
return '{0} is not null'.format(node['field']) | 34,634 |
def search_for_rooms(filters, allow_admin=False, availability=None):
"""Search for a room, using the provided filters.
:param filters: The filters, provided as a dictionary
:param allow_admin: A boolean specifying whether admins have override privileges
:param availability: A boolean specifying whether (un)available rooms should be provided,
or `None` in case all rooms should be returned.
"""
query = (Room.query
.outerjoin(favorite_room_table, db.and_(favorite_room_table.c.user_id == session.user.id,
favorite_room_table.c.room_id == Room.id))
.reset_joinpoint() # otherwise filter_by() would apply to the favorite table
.options(joinedload('owner').load_only('id'))
.filter(~Room.is_deleted)
.order_by(favorite_room_table.c.user_id.is_(None), db.func.indico.natsort(Room.full_name)))
criteria = {}
if 'capacity' in filters:
query = query.filter(Room.capacity >= filters['capacity'])
if 'building' in filters:
criteria['building'] = filters['building']
if 'division' in filters:
criteria['division'] = filters['division']
query = query.filter_by(**criteria)
if 'text' in filters:
text = ' '.join(filters['text'].strip().split())
if text.startswith('#') and text[1:].isdigit():
query = query.filter(Room.id == int(text[1:]))
else:
query = query.filter(_make_room_text_filter(text))
if filters.get('equipment'):
subquery = (db.session.query(RoomEquipmentAssociation)
.with_entities(db.func.count(RoomEquipmentAssociation.c.room_id))
.filter(RoomEquipmentAssociation.c.room_id == Room.id,
EquipmentType.name.in_(filters['equipment']))
.join(EquipmentType, RoomEquipmentAssociation.c.equipment_id == EquipmentType.id)
.correlate(Room)
.as_scalar())
query = query.filter(subquery == len(filters['equipment']))
if filters.get('features'):
for feature in filters['features']:
query = query.filter(Room.available_equipment.any(EquipmentType.features.any(RoomFeature.name == feature)))
if filters.get('favorite'):
query = query.filter(favorite_room_table.c.user_id.isnot(None))
if filters.get('mine'):
ids = get_managed_room_ids(session.user)
query = query.filter(Room.id.in_(ids))
query = _filter_coordinates(query, filters)
if availability is None:
return query
start_dt, end_dt = filters['start_dt'], filters['end_dt']
repeatability = (filters['repeat_frequency'], filters['repeat_interval'])
availability_filters = [Room.filter_available(start_dt, end_dt, repeatability, include_blockings=False,
include_pre_bookings=False)]
if not (allow_admin and rb_is_admin(session.user)):
selected_period_days = (filters['end_dt'] - filters['start_dt']).days
booking_limit_days = db.func.coalesce(Room.booking_limit_days, rb_settings.get('booking_limit'))
criterion = db.and_(Room.filter_bookable_hours(start_dt.time(), end_dt.time()),
Room.filter_nonbookable_periods(start_dt, end_dt),
db.or_(booking_limit_days.is_(None),
selected_period_days <= booking_limit_days))
unbookable_ids = [room.id
for room in query.filter(db.and_(*availability_filters), ~criterion)
if not room.can_override(session.user, allow_admin=False)]
availability_filters.append(~Room.id.in_(unbookable_ids))
availability_criterion = db.and_(*availability_filters)
if availability is False:
availability_criterion = ~availability_criterion
return query.filter(availability_criterion) | 34,635 |
def effect_inc_decr_bid_factors(strategy=2):
"""
Experiment that check the effect of having all the possible values for increasing and decreasing bidding factor.
Plots a hit map with the average of difference between the starting prices and the market price in the last round.
This average difference is limited to 300.
:param strategy: Strategy to use
"""
i_range = 100
d_range = 100
differences = np.zeros((i_range, d_range))
for increasing_delta in range(i_range):
for decreasing_delta in range(1, d_range):
auctioneer = create_auctioneer(strategy)
auctioneer.increase_bidding_factor = [1 + increasing_delta / i_range for n in range(n_buyers)]
auctioneer.decrease_bidding_factor = [0 + decreasing_delta / d_range for n in range(n_buyers)]
auctioneer.start_auction()
differences[increasing_delta, decreasing_delta] = min(300,
calculate_avg_difference(auctioneer.starting_prices,
auctioneer.market_price[
auctioneer.r_rounds - 1]))
fig, ax = plt.subplots()
i_factors = [1 + n / i_range for n in range(i_range)]
d_factors = [0 + n / d_range for n in range(d_range)]
im = ax.pcolormesh(d_factors, i_factors, differences[:, :])
ax.set_xlabel("Decreasing factor")
ax.set_ylabel("Increasing factor")
# ax.set_title(
# "Increase/Decrease bidding factor effect for " + str(n_buyers) + " buyers and " + str(k_sellers) + " sellers")
fig.colorbar(im)
plt.show() | 34,636 |
def get_scalar(obj):
"""obj can either be a value, or a type
Returns the Stella type for the given object"""
type_ = type(obj)
if type_ == type(int):
type_ = obj
elif type_ == PyWrapper:
type_ = obj.py
# HACK {
if type_ == type(None): # noqa
return None_
elif type_ == str:
return Str
# } HACK
try:
return _pyscalars[type_]
except KeyError:
raise exc.TypeError("Invalid scalar type `{0}'".format(type_)) | 34,637 |
def send_test_packets(
mode=DEFAULT_SENDER_MODE,
config=DEFAULT_CONFIG,
publickey=DEFAULT_PUBLICKEY,
address=DEFAULT_HOSTNAME,
port=None,
n=3,
encrypt=True,
raw_packet=None):
"""
Send n (default 3) test packets to the DoseNet server.
"""
sleep_time = 2 # seconds
try:
config_obj = Config(config)
except IOError:
# file doesn't exist
config_obj = None
key_obj = PublicKey(publickey)
sender = ServerSender(
mode=mode, address=address, port=port,
config=config_obj, publickey=key_obj, verbosity=3)
try:
station_id = config_obj.ID
except AttributeError:
station_id = '?'
if raw_packet is None:
raw_packet = 'Test packet from station {} by mode {}'.format(
station_id, mode)
if encrypt:
packet_to_send = sender.encrypt_packet(raw_packet)
else:
packet_to_send = raw_packet
for _ in xrange(n):
sender.send_data(packet_to_send)
time.sleep(sleep_time) | 34,638 |
def get_importable_subclasses(base_class, used_in_automl=True):
"""Get importable subclasses of a base class. Used to list all of our estimators, transformers, components and pipelines dynamically.
Args:
base_class (abc.ABCMeta): Base class to find all of the subclasses for.
used_in_automl: Not all components/pipelines/estimators are used in automl search. If True,
only include those subclasses that are used in the search. This would mean excluding classes related to
ExtraTrees, ElasticNet, and Baseline estimators.
Returns:
List of subclasses.
"""
all_classes = _get_subclasses(base_class)
classes = []
for cls in all_classes:
if "blocktorch.pipelines" not in cls.__module__:
continue
try:
cls()
classes.append(cls)
except (ImportError, MissingComponentError, TypeError):
logger.debug(
f"Could not import class {cls.__name__} in get_importable_subclasses"
)
except EnsembleMissingPipelinesError:
classes.append(cls)
if used_in_automl:
classes = [cls for cls in classes if cls.__name__ not in _not_used_in_automl]
return classes | 34,639 |
def sin(x, deg=None, **kwargs):
"""Computes the sine of x in either degrees or radians"""
x = float(x)
if deg or (trigDeg and deg is None):
x = math.radians(x)
return math.sin(x) | 34,640 |
def Debug(message,
print_init_shape=True,
print_forward_shape=False,
print_inverse_shape=False,
compare_vals=False,
name='unnamed'):
# language=rst
"""
Help debug shapes
:param print_init_shape: Print the shapes
:param print_forward_shape: Print the shapes
:param print_inverse_shape: Print the shapes
:param compare_vals: Print the difference between the value of the forward pass and the reconstructed
"""
saved_val = None
def init_fun(key, input_shape, condition_shape):
if(print_init_shape):
print(message, 'input_shape', input_shape)
return name, input_shape, (), ()
def forward(params, state, log_px, x, condition, **kwargs):
if(print_forward_shape):
if(isinstance(x, tuple) or isinstance(x, list)):
print(message, 'x shapes', [_x.shape for _x in x], 'log_px shapes', [_x.shape for _x in log_px])
else:
print(message, 'x.shape', x.shape, 'log_px.shape', log_px.shape)
if(compare_vals):
nonlocal saved_val
saved_val = x
return log_px, x, state
def inverse(params, state, log_pz, z, condition, **kwargs):
if(print_inverse_shape):
if(isinstance(z, tuple) or isinstance(z, list)):
print(message, 'z shapes', [_z.shape for _z in z], 'log_pz shapes', [_z.shape for _z in log_pz])
else:
print(message, 'z.shape', z.shape, 'log_pz.shape', log_pz.shape)
if(compare_vals):
if(isinstance(z, tuple) or isinstance(z, list)):
print(message, 'jnp.linalg.norm(z - saved_val)', [jnp.linalg.norm(_z - _x) for _x, _z in zip(saved_val, z)])
else:
print(message, 'jnp.linalg.norm(z - saved_val)', jnp.linalg.norm(z - saved_val))
return log_pz, z, state
return init_fun, forward, inverse | 34,641 |
def limit_movie_char_fields(sender, instance, *args, **kwargs):
"""
Limits Movie's character fields based on maximum length.
"""
if len(instance.title) > Movie.TITLE_MAX_LENGTH:
instance.title = instance.title[:Movie.TITLE_MAX_LENGTH-1] + '…'
instance.description = utils.trim_text_by_sentence(instance.description, Movie.DESCRIPTION_MAX_LENGTH) | 34,642 |
def bloated_nested_block(block_dets, *, repeat=False, **_kwargs):
"""
Look for long indented blocks under conditionals, inside loops etc that are
candidates for separating into functions to simplify the narrative of the
main code.
"""
bloated_outer_types = set()
included_if = False
for lbl, outer_xpath in OUTER_XPATHS.items():
if has_long_block(block_dets.element, outer_xpath):
bloated_outer_types.add(lbl)
if lbl == 'if':
included_if = True
if not bloated_outer_types:
return None
title = layout("""\
### Possibility of avoiding excessively long nested blocks
""")
summary_bits = []
for bloated_outer_type in bloated_outer_types:
summary_bits.append(layout(f"""\
The code has at least one long nested block under
`{bloated_outer_type}:`
"""))
summary = ''.join(summary_bits)
short_circuit_msg = layout("""\
#### Short-circuit and exit early
It may be possible to unnest the indented code block by exiting early if the
condition in the `if` expression is not met.
""")
short_circuit_demo_msg = (
layout("""
For example, instead of:
""")
+
layout("""\
if tall_enough:
## add to basketball team
line 1
line 2
line 3
...
line 30
logging.info("Finished!")
""", is_code=True)
+
layout("""\
we could possibly write:
""")
+
layout('''\
if not tall_enough:
return
## add to basketball team
line 1
line 2
line 3
...
line 30
logging.info("Finished!")
''', is_code=True)
)
move_to_func_msg = layout("""\
#### Shift to function
It may be possible to pull most of the nested code block into a function
which can be called instead.
""")
move_to_func_demo_msg = (
layout("""
For example, instead of:
""")
+
layout("""\
for name in names:
## contact name
line 1
line 2
line 3
...
line 30
logging.info("Finished!")
""", is_code=True)
+
layout("""\
we could possibly write:
""")
+
layout('''\
def contact(name):
"""
Contact person ...
"""
line 1
line 2
line 3
...
line 30
for name in names:
contact(name)
logging.info("Finished!")
''', is_code=True)
)
if not repeat:
brief_strategy = layout("""\
You might want to consider applying a strategy for avoiding
excessively long indented blocks:
""")
if included_if:
short_circuit = short_circuit_msg
short_circuit_demo = short_circuit_demo_msg
else:
short_circuit = ''
short_circuit_demo = ''
move_to_func = move_to_func_msg
move_to_func_demo = move_to_func_demo_msg
human = layout("""\
Computers can handle lots of nesting without malfunctioning. Human
brains are not so fortunate. As it says in The Zen of Python:
> "Flat is better than nested."
""")
else:
brief_strategy = ''
short_circuit = ''
short_circuit_demo = ''
move_to_func = ''
move_to_func_demo = ''
human = ''
message = {
conf.Level.BRIEF: (title + summary + brief_strategy + short_circuit
+ move_to_func),
conf.Level.MAIN: (title + summary + brief_strategy + short_circuit
+ short_circuit_demo + move_to_func + move_to_func_demo),
conf.Level.EXTRA: human,
}
return message | 34,643 |
def _to_bytes(value: Any, type_str: str = "bytes32") -> bytes:
"""Convert a value to bytes"""
if isinstance(value, bool) or not isinstance(value, (bytes, str, int)):
raise TypeError(f"Cannot convert {type(value).__name__} '{value}' to {type_str}")
value = _to_hex(value)
if type_str == "bytes":
return eth_utils.to_bytes(hexstr=value)
if type_str == "byte":
type_str = "bytes1"
size = int(type_str.strip("bytes"))
if size < 1 or size > 32:
raise ValueError(f"Invalid type: {type_str}")
try:
return int(value, 16).to_bytes(size, "big")
except OverflowError:
raise OverflowError(f"'{value}' exceeds maximum length for {type_str}") | 34,644 |
def _plan_ftd1c(pa: str = "abc", pb: int = 50):
"""
This is plan description.
Multiline string.
Parameters
----------
pa
Description of the parameter pa
pb : int
Description
of the parameter pb
"""
yield from [pa, pb] | 34,645 |
def _map_sbs_sigs_back(df: pd.DataFrame) -> pd.Series:
"""
Map Back Single-Base Substitution Signatures.
-----------------------
Args:
* df: pandas.core.frame.DataFrame with index to be mapped
Returns:
* pandas.core.series.Series with matching indices to context96
"""
def _check_to_flip(x, ref):
if x in ref:
return x
else:
return compl(x)
if df.index.name is None: df.index.name = 'index'
df_idx = df.index.name
if ">" in df.index[0]:
# Already in arrow format
context_s = df.reset_index()[df_idx].apply(sbs_annotation_converter)
else:
# Already in word format
context_s = df.reset_index()[df_idx]
return context_s.apply(lambda x: _check_to_flip(x, context96.keys())) | 34,646 |
def test_pipeline_get_eta_metric_peak(
peak: bool,
expected: float,
source_df: pd.DataFrame
) -> None:
"""
Tests the calculation of the eta metric.
Args:
peak: Whether to use peak flux when calculating the eta.
expected: Expected eta value.
source_df: The dataframe containing the source data.
Returns:
None
"""
if expected == 0.0:
source_df = source_df.drop(source_df.index[1:])
eta = vtu.pipeline_get_eta_metric(source_df, peak=peak)
if expected == 0.0:
assert eta == expected
else:
assert eta == pytest.approx(expected) | 34,647 |
def rmsd(
coords1: np.ndarray,
coords2: np.ndarray,
atomicn1: np.ndarray,
atomicn2: np.ndarray,
center: bool = False,
minimize: bool = False,
atol: float = 1e-9,
) -> float:
"""
Compute RMSD
Parameters
----------
coords1: np.ndarray
Coordinate of molecule 1
coords2: np.ndarray
Coordinates of molecule 2
atomicn1: np.ndarray
Atomic numbers for molecule 1
atomicn2: np.ndarray
Atomic numbers for molecule 2
center: bool
Center molecules at origin
minimize: bool
Compute minimum RMSD (with QCP method)
atol: float
Absolute tolerance parameter for QCP method (see :func:`qcp_rmsd`)
Returns
-------
float
RMSD
Notes
-----
When `minimize=True`, the QCP method is used. [1]_ The molecules are
centred at the origin according to the center of geometry and superimposed
in order to minimize the RMSD.
.. [1] D. L. Theobald, *Rapid calculation of RMSDs using a quaternion-based
characteristic polynomial*, Acta Crys. A **61**, 478-480 (2005).
"""
assert np.all(atomicn1 == atomicn2)
assert coords1.shape == coords2.shape
# Center coordinates if required
c1 = utils.center(coords1) if center or minimize else coords1
c2 = utils.center(coords2) if center or minimize else coords2
if minimize:
rmsd = qcp.qcp_rmsd(c1, c2, atol)
else:
n = coords1.shape[0]
rmsd = np.sqrt(np.sum((c1 - c2) ** 2) / n)
return rmsd | 34,648 |
def get_bridge_interfaces(yaml):
"""Returns a list of all interfaces that are bridgedomain members"""
ret = []
if not "bridgedomains" in yaml:
return ret
for _ifname, iface in yaml["bridgedomains"].items():
if "interfaces" in iface:
ret.extend(iface["interfaces"])
return ret | 34,649 |
def serialize_for_c(qr, border, message):
""" generates a c-style representation of the qr code to stdout. """
# size the output for an even fit to an 8-bit matrix
output_size = 8 * (4 * qr.version + 17 + 2 * border)
# NB: trying to invert the colors here doesn't work with qrcode-6.1
img = qr.make_image(fill_color='black', back_color='white')
resized_img = img.resize((output_size, output_size), Image.NEAREST)
width = resized_img.width
height = resized_img.height
bytes_per_row = width // 8
print('#pragma once\n')
print('// Original QR code details')
print('// * Version : {}'.format(qr.version))
print('// * Error correction : {}'.format(qr.error_correction))
print('// * Box size (pixels) : {}'.format(qr.box_size))
print('// * Border size (boxes) : {}'.format(border))
print('// * Message : {}\n'.format(message))
print('#define qrcode_width {}'.format(width))
print('#define qrcode_height {}\n'.format(height))
print('static const uint8_t PROGMEM qrcode_data[] = {')
content = bytes(resized_img.tobytes(encoder_name='raw'))
for row in range(height):
offset = row * bytes_per_row
# invert the output so 1=black, 0=white
row_hex = ['0x{:02x}'.format(~b & 0xff) for b in content[offset:offset+bytes_per_row]]
if row == height - 1:
output = ' {}'
else:
output = ' {},'
print(output.format(','.join(row_hex)))
print('};') | 34,650 |
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
import os
import sys
package_cwd = os.path.dirname(__file__)
library_dirs = [package_cwd]
if sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
site_packages, _ = os.path.split(package_cwd)
python_base_install, _ = os.path.split(site_packages)
library_dirs.append(os.path.join(python_base_install,
'Library', 'lib'))
return library_dirs | 34,651 |
def init_trunk_weights(model, branch=None):
""" Initializes the trunk network weight layer weights.
# Arguments
branch: string indicating the specific branch to initialize. Default of None will initialize 'push-', 'grasp-' and 'place-'.
"""
# Initialize network weights
for m in model.named_modules():
if((branch is None and 'push-' in m[0] or 'grasp-' in m[0] or 'place-' in m[0]) or
(branch is not None and branch in m[0])):
if isinstance(m[1], nn.Conv2d):
nn.init.kaiming_normal_(m[1].weight.data)
elif isinstance(m[1], nn.BatchNorm2d):
m[1].weight.data.fill_(1)
m[1].bias.data.zero_() | 34,652 |
def get_entity_matched_docs(doc_id_map: List[str], data: List[dict]):
"""Gets the documents where the document name is contained inside the claim
Args:
doc_id_map (List[str]): A list of document names
data (List[dict]): One of the FEVEROUS datasets
Returns:
List[List[str]]: A list of lists of the related documents
"""
claims = [d["claim"] for d in data]
related_docs = []
for claim in tqdm(claims):
claim_docs = [doc_id for doc_id in doc_id_map if doc_id in claim]
claim_docs = [doc for doc in claim_docs if len(doc) > 3]
related_docs.append(claim_docs)
return related_docs | 34,653 |
def getTemplateKeys(k):
"""
Prints out templates key for license or gitignore templates from github api
Params: str
Return: code
"""
code = 0
if k.lower() == "license":
r = requests.get(GITHUB_LICENSE_API)
if r.status_code != 200:
code = 1
print("Github LICENSE template keys: ")
for item in r.json():
print(item["key"])
elif k.lower() == "git":
r = requests.get(GITHUB_GITIGNORE_API)
if r.status_code != 200:
code = 1
print("Github .gitignore template keys: ")
for item in r.json():
print(item)
else:
print("Invalid argument for --get-template-keys! : options [git, license]")
code = 2
return code | 34,654 |
def test_field_extension_exclude_and_include(app_client, load_test_data):
"""Test POST search including/excluding same field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["properties.eo:cloud_cover"],
"include": ["properties.eo:cloud_cover"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "eo:cloud_cover" not in resp_json["features"][0]["properties"] | 34,655 |
def get_num_conv2d_layers(model, exclude_downsample=True, include_linear=True):
""" Check the number of Conv2D layers. """
num = 0
for n, m in model.named_modules():
if "downsample" in n and exclude_downsample:
continue
if is_conv2d(m) or (include_linear and isinstance(m, nn.Linear)):
num += 1
return num | 34,656 |
def im_list_to_blob(ims, RGB, NIR, DEPTH):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
if RGB & NIR & DEPTH:
blob = np.zeros((num_images, max_shape[0], max_shape[1], 5),
dtype=np.float32)
elif (RGB & NIR) | (RGB & DEPTH) | (NIR & DEPTH):
blob = np.zeros((num_images, max_shape[0], max_shape[1], 4),
dtype=np.float32)
else:
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob | 34,657 |
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up entry."""
miniserver = get_miniserver_from_config_entry(hass, config_entry)
loxconfig = miniserver.lox_config.json
devices = []
for switch_entity in get_all_switch_entities(loxconfig):
if switch_entity["type"] in ["Pushbutton", "Switch"]:
switch_entity.update(
{
"room": get_room_name_from_room_uuid(
loxconfig, switch_entity.get("room", "")
),
"cat": get_cat_name_from_cat_uuid(
loxconfig, switch_entity.get("cat", "")
),
}
)
new_push_button = LoxoneSwitch(**switch_entity)
devices.append(new_push_button)
elif switch_entity["type"] == "TimedSwitch":
switch_entity.update(
{
"room": get_room_name_from_room_uuid(
loxconfig, switch_entity.get("room", "")
),
"cat": get_cat_name_from_cat_uuid(
loxconfig, switch_entity.get("cat", "")
),
}
)
new_push_button = LoxoneTimedSwitch(**switch_entity)
devices.append(new_push_button)
elif switch_entity["type"] == "Intercom":
if "subControls" in switch_entity:
for sub_name in switch_entity["subControls"]:
subcontol = switch_entity["subControls"][sub_name]
_ = subcontol
_.update(
{
"name": "{} - {}".format(
switch_entity["name"], subcontol["name"]
)
}
)
_.update(
{
"room": get_room_name_from_room_uuid(
loxconfig, switch_entity.get("room", "")
)
}
)
_.update(
{
"cat": get_cat_name_from_cat_uuid(
loxconfig, switch_entity.get("cat", "")
)
}
)
new_push_button = LoxoneIntercomSubControl(**_)
devices.append(new_push_button)
async_add_devices(devices, True)
return True | 34,658 |
def displayOutput(win,text):
"""Displays info to the user at the bottom left windows"""
text = text.split(",")
x,y = 215,485
for record in text:
text = Text(Point(x,y),record); text.setFill('grey2')
text.setSize(11); text.draw(win)
y+=25 | 34,659 |
def gradU_from_momenta(x, p, y, sigma):
"""
strain F'(x) for momenta p defined at control points y
a method "convolve_gradient" is doing a similar job but only compute (gradF . z)
x (M, D)
p (N, D)
y (N, D)
return
gradU (M, D, D)
"""
kern = deformetrica.support.kernels.factory("torch", gpu_mode=False, kernel_width=sigma)
# move tensors with respect to gpu_mode
t_x = torch.tensor(x, device="cpu")
t_y = torch.tensor(y, device="cpu")
t_p = torch.tensor(p, device="cpu")
# A = exp(-(x_i - y_j)^2/(ker^2)).
sq = kern._squared_distances(t_x, t_y)
A = torch.exp(-sq / (sigma ** 2)) # M, N
# B = -2/(ker^2) * (x_i - y_j)*exp(-(x_i - y_j)^2/(ker^2)).
B = (-2/(sigma ** 2)) * kern._differences(t_x, t_y) * A # (D, M, N)
res = torch.matmul(B, t_p) # (D, M, D)
return np.array(res.transpose(0,1)) | 34,660 |
def testSingleSidedSinewaveBoxcar():
"""
Tests teh power spectrum of a sinewave with no hann window.
"""
x = np.arange(-5, 5+0.5, 0.5)
raw_data = np.sin(x)
actual_spectrum = power_spectrum(raw_data, window='box',
siding='single')
desired_spectrum = np.array([1.118000149122749e-34,
2*0.022942929484678257,
2*0.20704159581664763,
2*0.018317774296044642,
2*0.007597511788147477,
2*0.004508971439847654,
2*0.0031729976902471545,
2*0.0024827702154015855,
2*0.0020984476697393016,
2*0.0018876552893358684,
2*0.0017933402731461997])
assert_allclose(actual_spectrum, desired_spectrum, atol=1e-15)
total_power = np.sum(desired_spectrum)
assert_equal(total_power, 0.5436879879264717) | 34,661 |
def rules_cuda_dependencies(with_rules_cc = True):
"""Loads rules_cuda dependencies. To be called from WORKSPACE file.
Args:
with_rules_cc: whether to load and patch rules_cc repository.
"""
maybe(
name = "bazel_skylib",
repo_rule = http_archive,
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
],
)
maybe(
name = "platforms",
repo_rule = http_archive,
sha256 = "48a2d8d343863989c232843e01afc8a986eb8738766bfd8611420a7db8f6f0c3",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.2/platforms-0.0.2.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.2/platforms-0.0.2.tar.gz",
],
)
_local_cuda(name = "local_cuda")
if with_rules_cc:
_rules_cc() | 34,662 |
def summarize_gp(gp_model, max_num_points=None, weight_function=None):
"""Summarize the GP model with a fixed number of data points inplace.
Parameters
----------
gp_model : gpytorch.models.ExactGPModel
max_num_points : int
The maximum number of data points to use.
weight_function: Callable[[torch.Tensor], torch.Tensor]
weighing_function that computes the weight of each input.
"""
inputs = gp_model.train_inputs[0]
targets = gp_model.train_targets
# Can add all data points directly
if max_num_points is None or len(inputs) <= max_num_points:
return
# Remove all data points but one
gp_model.set_train_data(
inputs[0].unsqueeze(0), targets[0].unsqueeze(-1), strict=False
)
gp_model.eval()
for _ in range(max_num_points - 1):
with gpytorch.settings.fast_pred_var():
# The set function to maximize is f_s = log det (I + \lambda^2 K_s).
# Greedy selection resorts to sequentially selecting the index that solves
# i^\star = \arg max_i log (1 + \lambda^2 K_(i|s))
# This is equivalent to doing \arg max_i (1 + \lambda^2 K_(i|s)) and to
# i^\star = \arg max_i K_(i|s).
# Hence, the point with greater predictive variance is selected.
# torch.log(1 + pred.variance)
pred_var = gp_model(inputs).variance
if weight_function is not None:
pred_var = torch.log(1 + pred_var) * weight_function(inputs)
index = torch.argmax(pred_var)
new_input = inputs[index].unsqueeze(0)
new_target = targets[index].unsqueeze(-1)
# Once enabled use this
# gp_model = gp_model.get_fantasy_model(new_input, new_target)
add_data_to_gp(gp_model, new_input, new_target)
# Remove data from input space
idx = int(index.item())
inputs = torch.cat((inputs[:idx], inputs[idx + 1 :]), dim=0)
targets = torch.cat((targets[:idx], targets[idx + 1 :]), dim=-1) | 34,663 |
def process(seed, K):
"""
K is model order / number of zeros
"""
print(K, end=" ")
# create the dirac locations with many, many points
rng = np.random.RandomState(seed)
tk = np.sort(rng.rand(K)*period)
# true zeros
uk = np.exp(-1j*2*np.pi*tk/period)
coef_poly = poly.polyfromroots(uk) # more accurate than np.poly
# estimate zeros
uk_hat = np.roots(np.flipud(coef_poly))
# place on unit circle?
uk_hat_unit = uk_hat / np.abs(uk_hat)
# compute error
min_dev_norm = distance(uk, uk_hat)[0]
_err_roots = 20*np.log10(np.linalg.norm(uk)/min_dev_norm)
min_dev_norm = distance(uk, uk_hat_unit)[0]
_err_unit = 20*np.log10(np.linalg.norm(uk)/min_dev_norm)
return _err_roots, _err_unit | 34,664 |
def convert_to_file(cgi_input, output_file, twobit_ref):
"""Convert a CGI var file and output VCF-formatted data to file"""
if isinstance(output_file, basestring):
output_file = auto_zip_open(output_file, 'wb')
conversion = convert(cgi_input, twobit_ref) # set up generator
for line in conversion:
output_file.write(line + "\n")
output_file.close() | 34,665 |
def _gen_sieve_array(M, factor_base):
"""Sieve Stage of the Quadratic Sieve. For every prime in the factor_base
that doesn't divide the coefficient `a` we add log_p over the sieve_array
such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i`
is an integer. When p = 2 then log_p is only added using
``-M <= soln1 + i*p <= M``.
Parameters:
===========
M : sieve interval
factor_base : factor_base primes
"""
sieve_array = [0]*(2*M + 1)
for factor in factor_base:
if factor.soln1 is None: #The prime does not divides a
continue
for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
if factor.prime == 2:
continue
#if prime is 2 then sieve only with soln_1_p
for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
return sieve_array | 34,666 |
def yolo_eval_weighted_nms(yolo_outputs,
anchors,
num_classes,
image_shape,
score_threshold=.6):
""" yolo evaluate
Args:
yolo_outputs: [batch, 13, 13, 3*85]
anchors: [9, 2]
num_classes: num of your own classes
image_shape: the shape of original image
score_threshold: when score > score threshold, the anchor is positive
Returns:
boxes_, scores_, classes_
"""
num_layers = len(yolo_outputs)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]],
num_classes,
input_shape,
image_shape,
l)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# get positive anchors by using box_scores >= score_threshold
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
return boxes_, scores_, classes_ | 34,667 |
def next_coach_id():
"""
Generates the next id for newly added coaches, since their slugs (which combine the id and name fields)
are added post-commit.
"""
c = Coach.objects.aggregate(Max("id"))
return c['id__max']+1 | 34,668 |
def get_unsigned_short(data, index):
"""Return two bytes from data as an unsigned 16-bit value"""
return (data[index+1] << 8) + data[index] | 34,669 |
def getObjDetRoI(imgSize, imgPatchSize, objx1, objy1, objx2, objy2):
"""
Get region of interest (ROI) for a given object detection with respect to image and image patch boundaries.
:param imgSize: size of the image of interest (e.g., [1920x1080]).
:param imgPatchSize: Patch size of the image patch of interest (e.g., 192).
:param objx1: Upper left x coordinate of the object detection.
:param objy1: Upper left y coordinate of the object detection.
:param objx2: Lower right x coordinate of the object detection.
:param objy2: Lower right y coordinate of the object detection.
"""
# Cast to float values for calculations
startX = float(objx1);
startY = float(objy1);
endX = float(objx2);
endY = float(objy2);
# Ensure image and image patch boundaries
xRange = endX - startX;
yRange = endY - startY;
addX = (imgPatchSize - (xRange % imgPatchSize));
addY = (imgPatchSize - (yRange % imgPatchSize));
endX = endX + addX;
endY = endY + addY;
if endX > imgSize[1]:
endX = imgSize[1]
if endY > imgSize[0]:
endY = imgSize[0]
return startX, startY, endX, endY | 34,670 |
def sigma_pp(b):
"""pair production cross section"""
return (
sigma_T
* 3.0
/ 16.0
* (1 - b ** 2)
* (2 * b * (b ** 2 - 2) + (3 - b ** 4) * np.log((1 + b) / (1 - b)))
) | 34,671 |
def plot_shots_per_lens(subplot: matplotlib.axes.Axes, data: pd.DataFrame) -> None:
"""
Barplot of the number of shots per lens used, on the provided subplot. Acts in place.
Args:
subplot: the subplot plt.axes on which to plot.
data: the pandas DataFrame with your exif data.
Returns:
Nothing, plots in place.
"""
logger.debug("Plotting shots per lens")
sns.countplot(
y="Lens", hue="Brand", data=data, ax=subplot, order=data.Lens.value_counts().index
)
subplot.set_title("Number of Shots per Lens Model", fontsize=25)
subplot.tick_params(axis="both", which="major", labelsize=13)
subplot.set_xlabel("Number of Shots", fontsize=20)
subplot.set_ylabel("Lens Model", fontsize=20)
subplot.legend(loc="lower right", fontsize=18, title_fontsize=25) | 34,672 |
def compute_radii_simple(distances):
"""
Compute the radius for every hypersphere given the pairwise distances
to satisfy Eq. 6 in the paper. Does not implement the heuristic described
in section 3.5.
"""
n_inputs = tf.shape(distances)[1]
sorted_distances = tf.sort(distances, direction="ASCENDING", axis=-1)
median_index = n_inputs // 2
radii = sorted_distances[:, median_index]
return radii | 34,673 |
def data_resolution_and_offset(data, fallback_resolution=None):
"""Compute resolution and offset from x/y axis data.
Only uses first two coordinate values, assumes that data is regularly
sampled.
Returns
=======
(resolution: float, offset: float)
"""
if data.size < 2:
if data.size < 1:
raise ValueError("Can't calculate resolution for empty data")
if fallback_resolution is None:
raise ValueError("Can't calculate resolution with data size < 2")
res = fallback_resolution
else:
res = (data[data.size - 1] - data[0]) / (data.size - 1.0)
res = res.item()
off = data[0] - 0.5 * res
return res, off.item() | 34,674 |
def show_with_memory_profiler(times, cards_fn, suites, numbers,
print_results=False):
"""A decorator to show reuslts and time elapsed.
"""
fun = memory_profiler.profile(cards_fn)
fun(suites, numbers)
cards = cards_fn(suites, numbers)
if print_results:
print(f'cards: {sorted(cards)!r}') | 34,675 |
def get_changed_files_committed_and_workdir(
repo: Git, commithash_to_compare: str
) -> List[str]:
"""Get changed files between given commit and the working copy"""
return repo.repo.git.diff("--name-only", commithash_to_compare).split() | 34,676 |
def load_document_by_string(
string: str, uri: str, loadingOptions: Optional[LoadingOptions] = None
) -> Any:
"""Load a CWL object from a serialized YAML string."""
yaml = yaml_no_ts()
result = yaml.load(string)
return load_document_by_yaml(result, uri, loadingOptions) | 34,677 |
def rename_symbol(symbol):
"""Rename the given symbol.
If it is a C symbol, prepend FLAGS.rename_string to the symbol, but
account for the symbol possibly having a prefix via split_symbol().
If it is a C++ symbol, prepend FLAGS.rename_string to all instances of the
given namespace.
Args:
symbol: C or C++ symbol to rename.
Returns:
Dictionary, keys = old symbols, values = renamed symbols.
"""
new_renames = {}
if is_cpp_symbol(symbol):
# Scan through the symbol looking for the namespace name, then modify it.
new_symbol = symbol
if FLAGS.platform in ["linux", "android", "darwin", "ios"]:
for ns in FLAGS.hide_cpp_namespaces:
if symbol_includes_cpp_namespace(symbol, ns):
# Linux and Darwin: To rename "namespace" to "prefixnamespace",
# change all instances of "9namespace" to "15prefixnamespace".
# (the number is the length of the namespace name)
# See https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
new_ns = FLAGS.rename_string + ns
new_symbol = re.sub("(?<=[^0-9])%d%s" % (len(ns), ns),
"%d%s" % (len(new_ns), new_ns), new_symbol)
new_renames[symbol] = new_symbol
elif FLAGS.platform == "windows":
for ns in FLAGS.hide_cpp_namespaces:
if symbol_includes_cpp_namespace(symbol, ns):
# Windows: To rename "namespace" to "prefixnamespace",
# change all instances of "@namespace@@" to "@prefixnamespace@@".
# See https://msdn.microsoft.com/en-us/library/56h2zst2.aspx
new_ns = FLAGS.rename_string + ns
new_symbol = re.sub("@%s@@" % ns, "@%s@@" % new_ns, new_symbol)
new_renames[symbol] = new_symbol
else:
if FLAGS.platform == "windows" and symbol.startswith("$LN"):
# Don't rename $LN*, those are local symbols.
return new_renames
# C symbol. Just split, rename, and re-join.
(prefix, remainder) = split_symbol(symbol)
new_symbol = prefix + FLAGS.rename_string + remainder
new_renames[symbol] = new_symbol
for added_prefix in _additional_symbol_prefixes.get(FLAGS.platform, []):
new_renames[added_prefix + symbol] = new_renames[symbol]
return new_renames | 34,678 |
def py_time(data):
""" returns a python Time
"""
if '.' in data:
return datetime.datetime.strptime(data, '%H:%M:%S.%f').time()
else:
return datetime.datetime.strptime(data, '%H:%M:%S').time() | 34,679 |
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H | 34,680 |
def trim_resize_frame(frame, resize_ratio, trim_factor):
"""
Resize a frame according to specified ratio while
keeping original the original aspect ratio, then trim
the longer side of the frame according to specified
factor.
Parameters
----------
frame: np.array
The input frame
resize_ratio: float
Resize factor.
trim_factor: float
Trim factor for the longer side of the frame. Must
be btw 0 and 1.
Returns
----------
np.array
Resized and trimmed frame.
"""
frame = cv2.resize(
frame, dsize=(0,0), fx=resize_ratio, fy=resize_ratio)
__hor_longer, __l = (
True, frame.shape[1] if frame.shape[1] > frame.shape[0]
else (False, frame.shape[0]))
__t = int(__l * trim_factor)
__i = int((max(__l-__t, 0))/2)
if __hor_longer:
frame = frame[:,__i:__i+__t,:]
else:
frame = frame[__i:__i+__t,:,:]
return frame | 34,681 |
def copy_doclist(doclist, no_copy = []):
"""
Save & return a copy of the given doclist
Pass fields that are not to be copied in `no_copy`
"""
cl = []
# main doc
c = Document(fielddata = doclist[0].fields.copy())
# clear no_copy fields
for f in no_copy:
if c.fields.has_key(f):
c.fields[f] = None
c.name = None
c.save(1)
cl.append(c)
# new parent name
parent = c.name
# children
for d in doclist[1:]:
c = Document(fielddata = d.fields.copy())
c.name = None
# clear no_copy fields
for f in no_copy:
if c.fields.has_key(f):
c.fields[f] = None
c.parent = parent
c.save(1)
cl.append(c)
return cl | 34,682 |
def set_from_tags(tags, title, description, all=True):
"""all=True means include non-public photos"""
user = flickr.test_login()
photos = flickr.photos_search(user_id=user.id, auth=all, tags=tags)
set = flickr.Photoset.create(photos[0], title, description)
set.editPhotos(photos)
return set | 34,683 |
def submit():
"""Upload local file.
Needs to follow the station register template.
"""
spec = get_layout_active_spec('Upload')
if request.method == 'POST':
filename = os.path.join(
app.config['UPLOAD_FOLDER'],
datetime.date.today().strftime('%y%m%d'),
request.form.get('uploaded_file')
)
if filename:
return render_template('upload_file.html',
active_spec=spec,
connect_to_reg=True)
return render_template('upload_file.html', active_spec=spec) | 34,684 |
def test_true():
"""True case test."""
assert two_sum([1, 2, 5, 6, 7], 9) == [1, 4]
assert two_sum_dict([1, 2, 5, 6, 7], 9) == [1, 4] | 34,685 |
def heg_kfermi(rs):
""" magnitude of the fermi k vector for the homogeneous electron gas (HEG)
Args:
rs (float): Wigner-Seitz radius
Return:
float: kf
"""
density = (4*np.pi*rs**3/3)**(-1)
kf = (3*np.pi**2*density)**(1./3)
return kf | 34,686 |
def stock_em_jgdy_detail():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研详细
http://data.eastmoney.com/jgdy/xx.html
:return: 机构调研详细
:rtype: pandas.DataFrame
"""
url = "http://datainterface3.eastmoney.com/EM_DataCenter_V3/api/JGDYMX/GetJGDYMX"
params = {
"js": "datatable8174128",
"tkn": "eastmoney",
"secuCode": "",
"dateTime": "",
"sortfield": "0",
"sortdirec": "1",
"pageNum": "1",
"pageSize": "5000",
"cfg": "jgdymx",
"_": "1605088363693",
}
r = requests.get(url, params=params)
data_json = json.loads(r.text[r.text.find("(")+1:-1])
temp_df = pd.DataFrame([item.split("|") for item in data_json["Data"][0]["Data"]])
temp_df.columns = data_json["Data"][0]["FieldName"].split(",") + ["_"]
temp_df = temp_df.iloc[:, :-1]
return temp_df | 34,687 |
def intersect_with_grid(int_coords, fill=False):
"""
Args:
- int_coords: projected coordinates to be used for intersection
- fill: whether to include the interior of the intersected cells. I.e.
if the coords of a box are provided and intersect with 0,0 and 4,4,
this would include the entire 25-cell grid
Returns:
GeoDataFrame with three columns:
- x: x coordinate of NDFD grid. A higher x seems to move down, towards the south?
- y: y coordinate of NDFD grid. A higher y seems to move right, towards the east?
- geometry: geometry of grid cell (reprojected back into WGS84)
"""
grid_path = create_grid()
with rasterio.Env(), rasterio.open(grid_path) as src:
intersected_cells = set()
for int_coord in int_coords:
intersected_cells.add(src.index(*int_coord))
if fill:
intersected_cells = fill_cells(intersected_cells)
# For each of the cells, generate its box
cell_boxes = []
for x, y in list(intersected_cells):
cell_boxes.append([
x, y, box(*src.xy(x, y, 'll'), *src.xy(x, y, 'ur'))])
grid = gpd.GeoDataFrame(
cell_boxes, columns=['x', 'y', 'geometry'], crs=constants.crs)
return grid.to_crs(epsg=4326) | 34,688 |
def safe_epsilon_softmax(epsilon, temperature):
"""Tolerantly handles the temperature=0 case."""
egreedy = epsilon_greedy(epsilon)
unsafe = epsilon_softmax(epsilon, temperature)
def sample_fn(key: Array, logits: Array):
return jax.lax.cond(temperature > 0,
(key, logits), lambda tup: unsafe.sample(*tup),
(key, logits), lambda tup: egreedy.sample(*tup))
def probs_fn(logits: Array):
return jax.lax.cond(temperature > 0,
logits, unsafe.probs,
logits, egreedy.probs)
def log_prob_fn(sample: Array, logits: Array):
return jax.lax.cond(temperature > 0,
(sample, logits), lambda tup: unsafe.logprob(*tup),
(sample, logits), lambda tup: egreedy.logprob(*tup))
def entropy_fn(logits: Array):
return jax.lax.cond(temperature > 0,
logits, unsafe.entropy,
logits, egreedy.entropy)
def kl_fn(p_logits: Array, q_logits: Array):
return categorical_kl_divergence(p_logits, q_logits, temperature)
return DiscreteDistribution(sample_fn, probs_fn, log_prob_fn, entropy_fn,
kl_fn) | 34,689 |
def get_object_from_controller(object_type, object_name, controller_ip, username, password, tenant):
"""
This function defines that it get the object from controller or raise
exception if object status code is less than 299
:param uri: URI to get the object
:param controller_ip: ip of controller
:param username: usename of controller
:param password: password of controller
:param tenant: tenant of controller
:return: response status_code and content
"""
# Create new session
session = ApiSession.get_session(controller_ip, username,
password=password, tenant=tenant)
try:
resp = session.get_object_by_name(object_type, object_name)
return resp
except:
raise Exception("Failed get %s" % object_name, exc_info=True) | 34,690 |
def dn2rdn_Hyspex(rdn_image_file, dn_image_file, radio_cali_file, acquisition_time):
""" Do Hyspex radiometric calibration.
Arguments:
rdn_image_file: str
Radiance image filename.
dn_image_file: str
Hyspex DN image filename.
radio_cali_file: str
Hyspex radiometric calibration coefficients filename.
acquisition_time: datetime object
Acquisition time.
"""
if os.path.exists(rdn_image_file):
logger.info('Write the radiance image to %s.' %rdn_image_file)
return
from ENVI import empty_envi_header, read_envi_header, write_envi_header
# Read calibration coefficients.
radio_cali_header = read_envi_header(os.path.splitext(radio_cali_file)[0]+'.hdr')
radio_cali_coeff = np.memmap(radio_cali_file,
dtype='float64',
mode='r',
shape=(radio_cali_header['bands'],
radio_cali_header['lines'],
radio_cali_header['samples']))
wavelengths = np.array([float(v) for v in radio_cali_header['waves'].split(',')])
fwhms = np.array([float(v) for v in radio_cali_header['fwhms'].split(',')])
# Read DN image.
dn_header = read_envi_header(os.path.splitext(dn_image_file)[0]+'.hdr')
dn_image = np.memmap(dn_image_file,
dtype='uint16',
mode='r',
offset=dn_header['header offset'],
shape=(dn_header['lines'],
dn_header['bands'],
dn_header['samples']))
# Get gain coefficients.
gain = radio_cali_coeff[0,:,:] # shape=(bands, samples)
# Do radiometric calibration.
info = 'Line (max=%d): ' %dn_header['lines']
fid = open(rdn_image_file, 'wb')
for from_line in range(0, dn_header['lines'], 500):
info += '%d, ' %(from_line+1)
# Determine chunck size.
to_line = min(from_line+500, dn_header['lines'])
# Get offset coefficients.
if radio_cali_header['bands']==2:
offset = radio_cali_coeff[1,:,:] # shape=(bands, samples)
else:
background = np.stack([radio_cali_coeff[1,:,:]]*(to_line-from_line))
backgroundLast = np.stack([radio_cali_coeff[2,:,:]]*(to_line-from_line))
factor = np.arange(from_line, to_line)/dn_header['lines']
offset = background+(backgroundLast-background)*factor[:,np.newaxis, np.newaxis] # shape=(to_line-from_line, bands, samples)
del background, backgroundLast, factor
# Convert DN to radiance.
rdn = (dn_image[from_line:to_line,:,:].astype('float32')-offset)*gain # shape=(to_line-from_line, bands, samples)
# Write radiance to the file.
fid.write(rdn.astype('float32').tostring())
# Clear temporary data.
del rdn, to_line, offset
fid.close()
info += '%d, Done!' %dn_header['lines']
logger.info(info)
# Clear data.
dn_image.flush()
radio_cali_coeff.flush()
del gain, from_line
del dn_image, radio_cali_coeff
# Write header.
rdn_header = empty_envi_header()
rdn_header['description'] = 'Hyspex radiance in mW/(cm2*um*sr)'
rdn_header['file type'] = 'ENVI Standard'
rdn_header['samples'] = dn_header['samples']
rdn_header['lines'] = dn_header['lines']
rdn_header['bands'] = dn_header['bands']
rdn_header['byte order'] = 0
rdn_header['header offset'] = 0
rdn_header['interleave'] = 'bil'
rdn_header['data type'] = 4
rdn_header['wavelength'] = list(wavelengths)
rdn_header['fwhm'] = list(fwhms)
rdn_header['wavelength units'] = 'nm'
rdn_header['default bands'] = dn_header['default bands']
rdn_header['acquisition time'] = acquisition_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
write_envi_header(os.path.splitext(rdn_image_file)[0]+'.hdr', rdn_header)
del radio_cali_header, dn_header, rdn_header
logger.info('Write the radiance image to %s.' %rdn_image_file) | 34,691 |
def scale_reshaping(scale: np.ndarray,
op2d: common.BaseNode,
kernel_channel_mapping: DefaultDict,
in_channels: bool = True) -> np.ndarray:
"""
Before scaling a kernel, the scale factor needs is reshaped to the correct
dimensions. This is a function of the layer that is scaled and whether its input channels or
output channels that should be scaled.
The index of the correct kernel index is obtained from kernel_channel_mapping.
Args:
scale: Scale factor to scale the kernel channels by.
op2d: Node to scale its kernel.
kernel_channel_mapping: Mapping from a layer to a tuple of indices of its output/input kernel channels.
in_channels: Kernel's index of input channels.
Returns:
The scale factor after reshaping it to the correct shape.
"""
op_ndims = op2d.get_weights_by_keys(KERNEL).ndim
reshape_target = np.ones(op_ndims, dtype=np.int)
reshape_target[kernel_channel_mapping.get(op2d.type)[int(in_channels)]] = -1
return np.reshape(scale, reshape_target) | 34,692 |
def process_metadata(split_name, caption_data, image_dir):
"""Process the captions and combine the data into a list of ImageMetadata.
Args:
split_name: A train/test/val split name.
caption_data: caption file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
print("Processing image-text...")
id_to_captions = {}
image_metadata = []
num_captions = 0
count = 0
for img in caption_data:
count += 1
label = img['id']
filename = os.path.join(image_dir, img['file_path'])
print(filename)
if not os.path.exists(filename):
continue;
assert os.path.exists(filename)
captions = img['processed_tokens']
id_to_captions.setdefault(label, [])
id_to_captions[label].append(captions)
split = img['split']
assert split == split_name
image_metadata.append(ImageMetadata(label, filename, captions, split))
num_captions += len(captions)
if len(captions) > 2:
print("index %d with %d captions" % (count, len(captions)))
num_examples = len(caption_data)
num_classes = len(id_to_captions)
print("Finished processing %d captions for %d images of %d identities in %s" %
(num_captions, num_examples, num_classes, split_name))
# Write out the data preparation information.
output_file = '%s/%s_data_info.txt' % (FLAGS.output_dir, split_name)
with tf.gfile.FastGFile(output_file, "w") as f:
f.write("Finished processing %d captions for %d images of %d identities in %s." %
(num_captions, num_examples, num_classes, split_name))
return image_metadata | 34,693 |
def test_circuits_update_interface(runner, circuit, interface):
""" Test updating a circuit's Z side interface """
with runner.isolated_filesystem():
result = runner.run('circuits update -i {0} -Z {1}'.format(
circuit['name'], interface['id']
))
assert_output(result, ['Updated circuit!']) | 34,694 |
def add_new_exif(info):
"""
创建exif记录(从表)
:param info:
:return:
"""
return ExifInfo(make=info.get('Image Make'),
model=info.get('Image Model'),
orientation=info.get('Image Orientation'),
date_original=info.get('EXIF DateTimeOriginal'),
x_resolution=info.get('Image XResolution'),
y_resolution=info.get('Image YResolution'),
resolution_unit=info.get('Image ResolutionUnit'),
artist=info.get('Image Artist'),
copyright=info.get('Image Copyright'),
software=info.get('Image Software'),
img_length=info.get('EXIF ExifImageLength'),
img_width=info.get('EXIF ExifImageWidth'),
exposure_time=info.get('EXIF ExposureTime'),
exposure_program=info.get('EXIF ExposureProgram'),
exposure_bias=info.get('EXIF ExposureBiasValue'),
exposure_mode=info.get('EXIF ExposureMode'),
fnumber=info.get('EXIF FNumber'),
sensitivity=info.get('EXIF ISOSpeedRatings'),
metering_mode=info.get('EXIF MeteringMode'),
flash=info.get('EXIF Flash'),
focal_len=info.get('EXIF FocalLength'),
white_balance=info.get('EXIF WhiteBalance'),
gps_latitude_ref=info.get('GPS GPSLatitudeRef'),
gps_latitude=info.get('GPS GPSLatitude'),
gps_longitude_ref=info.get('GPS GPSLongitudeRef'),
gps_longitude=info.get('GPS GPSLongitude'),
gps_altitude=info.get('GPS GPSAltitude'),
gps_datetime=info.get('GPS GPSDatetime'),
gps_direction=info.get(''),
gps_pos_err=info.get('')) | 34,695 |
def test_bad_file():
""" Dies on bad file """
bad = random_string()
rv, out = getstatusoutput(f'{RUN} {bad}')
assert rv != 0
assert re.search(f"No such file or directory: '{bad}'", out) | 34,696 |
def prefetch_input_data(reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue"):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
if is_training:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string],
name="random_" + value_queue_name)
else:
"""
num_epochs: If specified, string_input_producer produces each string
from string_tensor num_epochs times before generating an OutOfRange error.
If not specified, string_input_producer can cycle through the strings in
string_tensor an unlimited number of times.
"""
filename_queue = tf.train.string_input_producer(
data_files, num_epochs=None, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(
capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops))
tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
return values_queue | 34,697 |
def find_rst(
aligntk_path,
pairs,
images,
mask,
output,
rotation,
max_res,
scale,
tx,
ty,
summary):
""" Wrapper for find_rst in aligntk.
Args:
aligntk_path: Path to aligntk bins
pairs: Path to a *.lst file with lines in form of "{src} {tgt} {src}_{tgt}"
images: Path to a *.lst file with lines in form of image names
mask: Dir with mask images
output: Output dir
rotation: Tuple of ints, specifying allowed rotation range
max_res: Max resolution
scale: Tuple of ints, specifying allowed scale range
tx: X-translation allowed range
ty: Y-translation allowed range
summary: summary dir
"""
command = '{}/find_rst -pairs {} -tif -images {} -mask {} -output {} -rotation {} -max_res {} -scale {} -tx {} -ty {} -summary {}'.format(
aligntk_path,
pairs,
images,
mask,
output,
rotation,
max_res,
scale,
tx,
ty,
summary)
os.system(command) | 34,698 |
def save_result_subj_task(res, subject, task, resultdir=None):
"""
Save partial (subj,task) results data from a classifier
inputs:
res - results output of do_subj_classification
subject - subject id - sid00[0-9]{4}
task - name of task from tasks
resultdir - directory for results
outputs:
saves file in RESULTDIR "%s_%s_res_part.pickle"%(subject,task)
"""
resultdir = RESULTDIR if resultdir is None else resultdir
fname = "%s_%s_res_part.pickle"
with open(opj(resultdir, fname%(subject,task)), "w") as f:
pickle.dump(res, f) | 34,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.