content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def filter_example(config, example, mode="train"):
"""
Whether filter a given example according to configure.
:param config: config contains parameters for filtering example
:param example: an example instance
:param mode: "train" or "test", they differs in filter restrictions
:return: boolean
"""
if mode == "train":
return (len(example["ans_sent_tokens"]) > config.sent_limit or
len(example["ques_tokens"]) > config.ques_limit or
(example["y2_in_sent"] - example["y1_in_sent"]) >
config.ans_limit)
elif mode == "test":
return (len(example["ans_sent_tokens"]) > config.sent_limit or
len(example["ques_tokens"]) > config.ques_limit)
else:
print("mode must be train or test")
| 5,343,700
|
def _dict_flatten(data):
"""Return flattened dict of input dict <data>.
After https://codereview.stackexchange.com/revisions/21035/3
Parameters
----------
data : dict
Input dict to flatten
Returns
-------
fdata : dict
Flattened dict.
"""
def expand(key, value):
"""Expand list."""
if isinstance(value, dict):
return [(key+'>'+k, v) for k, v in _dict_flatten(value).items()]
else:
return [(key, value)]
return dict([item for k, v in data.items() for item in expand(k, v)])
| 5,343,701
|
def main1():
"""
采用生成器
"""
n=int(input("How many queens?\n"))
for index,result in enumerate(findnextpos(n,())):
print(repr(index)+":")
resultprint(result)
print("\n")
| 5,343,702
|
def merkleroot(elements):
"""
Args:
elements (List[str]): List of hashes that make the merkletree.
Returns:
str: The root element of the merkle tree.
"""
return Merkletree(elements).merkleroot
| 5,343,703
|
def category_start(update, context):
"""Separate function for category selection to filter the options with inline keyboard."""
update.message.reply_text(
"Choose a Group",
reply_markup=create_category_inline(trx_categories.keys(), "group_sel"),
)
return CATEGORY_REPLY_CHOOSE_TRX_OPTS
| 5,343,704
|
def query_handler(query, data):
"""Handle UPDATE and INSERT queries in the admin panel."""
try:
db_conn = get_db_connection()
with db_conn.cursor() as cur:
cur.execute(query, data)
flash('Operation successfully completed', 'success')
except psycopg2.Error as e:
db_conn.rollback()
flash('Error: {}'.format(e), 'error')
| 5,343,705
|
def betatest():
"""Main Function Definition"""
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--host',\
required=True,\
help="site hostname")
parser.add_argument('--outputfile',\
'-o',\
required=True,\
help="write results to this file")
parser.add_argument('--ntests',\
'-n',\
default=1,\
type=int,\
help="# of requests per path")
parser.add_argument('--timeout',\
'-t',\
default=30,\
type=float,\
help="timeout (seconds)")
parser.add_argument('--delay',\
'-d',\
default=0,\
type=float,\
help="wait between requests (ms)")
parser.add_argument('--processes',\
'-p',\
default=32,\
type=int,\
help="# of parallel processes")
parser.add_argument('--addresses',\
'-a',\
nargs='+',\
help="addresses to use instead of DNS")
args = parser.parse_args()
# Request the urls in parallel
pool = Pool(args.processes)
try:
results = pool.map(functools.partial(process_request,\
timeout=args.timeout,\
delay=args.delay),
generate_requests(paths=PATHS,\
host=args.host,\
addresses=args.addresses,\
tests_per_path=args.ntests))
except KeyboardInterrupt:
pool.terminate()
sys.exit(1)
# Group results by everything, and count
groupby = collections.defaultdict(lambda: [0, 0.0, None])
for result, elapsed in results:
groupby[result][0] += 1
groupby[result][1] += elapsed
# Apply some heuristics to analyze each result
for result, info in sorted(groupby.iteritems()):
info[2] = analyze_result(result)
# Write the results as csv to our destination fil
with open(args.outputfile, 'wb') as file_pointer:
writer = csv.writer(file_pointer, quoting=csv.QUOTE_ALL)
for result, (count, elapsed, outcome) in sorted(groupby.iteritems()):
row = list(result)
row.append(count)
row.append(elapsed / count)
row.append(outcome)
writer.writerow(row)
return "beta test completed"
| 5,343,706
|
def create_player(mode, race, char_name):
""" Create the player's character """
# Evil
if mode == 2:
if race == 1:
player = character.Goblin(char_name, 1, app)
elif race == 2:
player = character.Orc(char_name, 1, app)
elif race == 3:
player = character.Uruk(char_name, 1, app)
else:
player = character.Wizard(char_name, 1, app)
# Good
else:
if race == 1:
player = character.Human(char_name, 1, app)
elif race == 2:
player = character.Wizard(char_name, 1, app)
elif race == 3:
player = character.Warrior(char_name, 1, app)
"""elif race == 4:
player = character.Hobbit(char_name, 1, app)
elif race == 6:
player = character.Bishop(char_name, 1, app)
else:
player = character.Wizard(char_name, 1, app)"""
return player
| 5,343,707
|
def _get_item(i, j, block):
"""
Returns a single item from the block. Coords must be in block space.
"""
return block[i, j]
| 5,343,708
|
def calculate_potentials_python(volume, mass, volume_material_mass, mass_material_mass):
""" Easy to read python function which calculates potentials using two Python loops
Still uses NumPy for the rote math.
"""
potentials = np.zeros(len(volume), dtype=np.float32)
for volume_i, volume_coord in enumerate(volume):
for mass_coord in mass:
potentials[volume_i] += (G * volume_material_mass * mass_material_mass) / np.sqrt(
np.square(volume_coord - mass_coord).sum())
return potentials
| 5,343,709
|
def snapshot(locks, source, destination):
"""Convert a possibly COW layered disk file into a snapshot."""
util_process.execute(
locks,
('qemu-img convert --force-share -o cluster_size=%s -O qcow2 -c %s %s'
% (constants.QCOW2_CLUSTER_SIZE, source, destination)),
iopriority=util_process.PRIORITY_LOW)
| 5,343,710
|
def who_is_in_lab(bot, msg):
"""Report on who is currently in the lab."""
staff = {session.user for session in staff_in_lab()}
total = users_in_lab_count()
if total != 1:
are_number_people = 'are {} people'.format(total)
else:
are_number_people = 'is 1 person'
if staff:
staff_list = ': {}'.format(', '.join(sorted(_prevent_ping(staffer) for staffer in staff)))
else:
staff_list = ''
msg.respond('there {} in the lab, including {} staff{}'.format(
are_number_people,
len(staff),
staff_list,
))
| 5,343,711
|
def test_bad_pct():
""" Dies on bad percent """
bad = random.randint(1, 10)
rv, out = getstatusoutput(f'{RUN} -p {bad} {N1K}')
assert rv != 0
assert re.match('usage:', out, re.I)
assert re.search(f'--percent "{float(bad)}" must be between 0 and 1', out)
| 5,343,712
|
def rect2sphericalcoord3D(
v: list[Number, Number, Number]
) -> list[float, float, float]:
"""Does a 3D coordinate transform
from rectangular to spherical
coordinate system
p = The length of the hypotenuse
or the magnitude of the
vector
theta = is the angle between the
positive x-axis and p
(azimuth)
phi = is the angle between the
positive z-axis and p
(colatitude)
Args:
vspherecoord: [p, theta, phi]
spherical
coordinates
Returns:
[p: float,
theta: float,
phi: float]
"""
p = vmag(v)
return [p, atan(v[1] / v[0]),
acos(v[2] / p)]
| 5,343,713
|
def GetAssignmentByKeyName(key_name):
"""Gets the assignment with the specified key name."""
return Assignment.get_by_key_name(key_name)
| 5,343,714
|
def fit_integer_type(n, is_signed=True):
"""Determine the minimal space needed to store integers of maximal value n
"""
if is_signed:
m = 1
types = [np.int8, np.int16, np.int32, np.int64]
else:
m = 0
types = [np.uint8, np.uint16, np.uint32, np.uint64]
if n < 2 ** (8 - m):
return types[0]
elif n < 2 ** (16 - m):
return types[1]
elif n < 2 ** (32 - m):
return types[2]
elif n < 2 ** (64 - m):
return types[3]
else:
raise ValueError('Values are too big to be represented by 64 bits \
integers!')
| 5,343,715
|
def admin_userforms_order_by_field(user_id):
""" Set User's forms order_by preference
"""
if not g.is_admin:
return jsonify("Forbidden"), 403
data = request.get_json(silent=True)
if not 'order_by_field_name' in data:
return jsonify("Not Acceptable"), 406
field_names = [ field['name'] for field in default_admin_userforms_field_index ]
if not data['order_by_field_name'] in field_names:
return jsonify("Not Acceptable"), 406
g.current_user.admin['userforms']['order_by'] = data['order_by_field_name']
flag_modified(g.current_user, 'admin')
g.current_user.save()
return jsonify(
{'order_by_field_name': g.current_user.admin['userforms']['order_by']}
), 200
| 5,343,716
|
def check_all_rows(A):
"""
Check if all rows in 2-dimensional matrix don't have more than one queen
"""
for row_inx in range(len(A)):
# compute sum of row row_inx
if sum(A[row_inx]) > 1:
return False
return True
| 5,343,717
|
def import_silda_command_line():
"""
Do the SILDa to kapture import using the command line parameters provided by the user.
"""
parser = argparse.ArgumentParser(description='imports SILDa dataset to kapture format.')
####################################################################################################################
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument(
'-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument(
'-q', '--silent', '--quiet', action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='Force delete kapture if already exists.')
# import ###########################################################################################################
parser.add_argument('-i', '--input',
help='path to silda root directory.')
parser.add_argument('-o', '--output', required=True,
help='output directory where to save NLE files.')
parser.add_argument('-s', '--split_cams', action='store_true', default=False,
help='reorganises the image file per camera folders.')
parser.add_argument('--image_transfer', type=TransferAction, default=TransferAction.link_absolute,
help=f'How to import images [link_absolute], '
f'choose among: {", ".join(a.name for a in TransferAction)}')
parser.add_argument('--corpus', choices=['mapping', 'query'],
help='restrain (or not) do only mapping or query images.')
parser.add_argument('--cam_model', choices=['OPENCV_FISHEYE', 'FOV'], default='FOV',
help='camera model to be used.')
parser.add_argument('--rig_collapse', action='store_true', default=False,
help='Replace camera poses with rig poses.')
####################################################################################################################
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
import_silda(args.input,
args.output,
fallback_cam_model=args.cam_model,
do_split_cams=args.split_cams,
corpus=args.corpus,
replace_pose_rig=args.rig_collapse,
force_overwrite_existing=args.force,
images_import_strategy=args.image_transfer)
| 5,343,718
|
def field_name_validator(field_name):
"""
Validates a field name for a document.
Note that this validator allows periods in the name. Dot notation
is permitted because it will be used to nest the field within the
document. E.g., a field name 'user.screen_name' will be saved as
the field 'screen_name' within the embedded document 'user'.)
"""
reserved_names = [
'_id',
_DISTILLERY_SETTINGS['LABEL_KEY'],
_DISTILLERY_SETTINGS['RAW_DATA_KEY']
]
if field_name in reserved_names:
raise ValidationError(_('%s is a reserved field name' % field_name))
elif re.match(r'^\$', field_name):
raise ValidationError(_('Field name cannot start with "$"'))
elif re.search(r'\s', field_name):
raise ValidationError(_('Field name cannot contain spaces'))
elif re.search(r'\W', field_name.replace('$', '').replace('@', '')):
raise ValidationError(_('Field name cannot contain special characters'
'other than underscores, @, and $.'))
| 5,343,719
|
def extract_largest_connected_region(vtk_im, label_id):
"""
Extrac the largest connected region of a vtk image
Args:
vtk_im: vtk image
label_id: id of the label
Return:
new_im: processed vtk image
"""
fltr = vtk.vtkImageConnectivityFilter()
fltr.SetScalarRange(label_id, label_id)
fltr.SetExtractionModeToLargestRegion()
fltr.SetInputData(vtk_im)
fltr.Update()
new_im = fltr.GetOutput()
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
py_im = vtk_to_numpy(vtk_im.GetPointData().GetScalars())
py_mask = vtk_to_numpy(new_im.GetPointData().GetScalars())
mask = np.logical_and(py_im==label_id, py_mask==0)
py_im[mask] = 0
vtk_im.GetPointData().SetScalars(numpy_to_vtk(py_im))
return vtk_im
| 5,343,720
|
def _run_cli_cmd(cmd_list):
"""Run a shell command and return the error code.
:param cmd_list: A list of strings that make up the command to execute.
"""
try:
return subprocess.call(cmd_list)
except Exception as e:
print(str(e))
sys.exit(1)
| 5,343,721
|
def findStandards(elm, min=0.1, max=1.0, sim=False):
"""findStandards(elm, min=0.1, max=1.0, simulate=False):
Search the standard database for suitable materials for use as a standard for the specified element."""
elm = element(elm)
sdb = App.getStandardsDatabase()
stds = sdb.findStandards(elm, min , [])
for std in stds:
if std.weightFraction(elm, False) <= max:
print std.descriptiveString(False)
print "\tAvailable: %s" % ", ".join(("'%s'" % str(std) for std in sdb.find(std.getName())))
if sim:
display(simulate(std, d1, keV=25.0))
| 5,343,722
|
def group_set_array_data_ptr(d):
"""
call view%set_external_data_ptr
hide c_loc call and add target attribute
"""
# XXX - should this check the type/shape of value against the view?
# typename - part of function name
# nd - number of dimensions
# f_type - fortran type
# shape - :,:, to match nd
if d['rank'] == 0:
extents_decl = 'extents(1)'
extents_asgn = 'extents(1) = 1_SIDRE_IndexType'
else:
extents_decl = 'extents(%d)' % d['rank']
extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)'
return """
! Generated by genfsidresplicer.py
! This function does nothing if view name does not exist in group.
subroutine group_set_array_data_ptr_{typename}{nd}(grp, name, value)
use iso_c_binding
implicit none
class(SidreGroup), intent(IN) :: grp
character(len=*), intent(IN) :: name
{f_type}, target, intent(IN) :: value{shape}
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
! integer(SIDRE_IndexType) :: {extents_decl}
! integer(C_INT), parameter :: type = {sidre_type}
type(C_PTR) addr, viewptr
lname = len_trim(name)
! {extents_asgn}
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
if (c_associated(view%addr)) then
#ifdef USE_C_LOC_WITH_ASSUMED_SHAPE
addr = c_loc(value)
#else
call SIDRE_C_LOC(value{lower_bound}, addr)
#endif
call c_view_set_external_data_ptr_only(view, addr)
! call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents)
endif
end subroutine group_set_array_data_ptr_{typename}{nd}""".format(
extents_decl=extents_decl,
extents_asgn=extents_asgn, **d)
| 5,343,723
|
def releaseTagName(version: Version) -> str:
"""
Compute the name of the release tag for the given version.
"""
return cast(str, version.public())
| 5,343,724
|
def get_calibrated_values(timeout=10):
"""Return an instance of CalibratedValues containing the 6 spectral bands."""
t_start = time.time()
while _as7262.CONTROL.get_data_ready() == 0 and (time.time() - t_start) <= timeout:
pass
with _as7262.CALIBRATED_DATA as DATA:
return CalibratedValues(DATA.get_r(),
DATA.get_o(),
DATA.get_y(),
DATA.get_g(),
DATA.get_b(),
DATA.get_v())
| 5,343,725
|
def customizable_admin(cls):
"""
Returns a customizable admin class
"""
class CustomSearchableAdmin(BaseAdmin):
form = customizable_form(cls)
def __init__(self, *args, **kwargs):
super(CustomSearchableAdmin, self).__init__(*args, **kwargs)
# add the custom fields to the fieldsets (if present)
# @see customizable_form and ContentTypeCustomField
if self.fieldsets:
if isinstance(self.fieldsets, tuple):
self.fieldsets = list(self.fieldsets)
fieldset = ContentTypeCustomField.get_fieldset_for_model(self.form._meta.model)
if fieldset: self.fieldsets.append(fieldset)
def get_form(self, request, obj=None, **kwargs):
## modify visualization for certain users
#if not request.user.is_superuser:
# self.exclude.append('field_to_hide')
# self.inlines.remove(UserInline)
# pass
form = super(CustomSearchableAdmin, self).get_form(request, obj, **kwargs)
return form
def get_changelist(self, request, **kwargs):
return CustomChangeList
def queryset(self, request):
qs = super(CustomSearchableAdmin, self).queryset(request)
#qs = qs.filter(Q(is_staff=True) | Q(is_superuser=True))
return qs
def has_change_permission(self, request, obj=None):
has_permission = super(CustomSearchableAdmin, self).has_change_permission(request, obj)
#if obj is not None and not request.user.is_superuser and request.user.id != obj.user.id:
return has_permission
return CustomSearchableAdmin
| 5,343,726
|
def restoreIm(transformeddata, pca, origshape, datamean, datastd):
"""Given a PCA object and transformeddata that consists of projections onto
the PCs, return images by using the PCA's inverse transform and reshaping to
the provided origshape."""
if transformeddata.shape[0] < transformeddata.shape[1]:
transformeddata = np.transpose(transformeddata)
data = pca.inverse_transform(transformeddata)
# restore the shape and scale of the data before plotting
data = data*datastd
data = data + datamean
data = np.transpose(data)
return data.reshape(origshape)
| 5,343,727
|
def ga_multi(gene_info, ga_info):
"""Main loop which sets DEAP objects and calls a multi objective EA algorithm.
Parameters
-------
gene_info, GeneInfo class
See respective class documentation.
ga_info, GAInfo class
See respective class documentation.
Returns
-------
pop, DEAP object
stats, DEAP object
hof, DEAP object
See post_run function for examples of how to interpret results.
"""
random.seed(ga_info.seed)
creator.create("Fitness", base.Fitness, weights=(1.0, ))
creator.create("Individual", set, fitness=creator.Fitness)
toolbox = base.Toolbox()
toolbox.register("indices", indiv_builder, gene_info)
toolbox.register("individual", tools.initIterate, creator.Individual,
toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", single_eval, gene_info)
if len(gene_info.obj_list) < 2:
print('Attempted to start multi objective GA with single objective.',
file=sys.stderr)
if ga_info.cross_meth == 'ops':
toolbox.register("mate", cx_OPS, gene_info)
elif ga_info.cross_meth == 'sdb':
toolbox.register("mate", cx_SDB, gene_info)
else:
raise AttributeError('Invalid crossover string specified')
toolbox.register("mutate", mut_flipper, gene_info)
toolbox.register("select", tools.selTournament, tournsize=ga_info.nk)
pop = toolbox.population(n=ga_info.pop)
hof = tools.HallOfFame(1)
# Empty, as SoR objects are special
stats = tools.Statistics()
eaSoR(ga_info, gene_info, pop, toolbox, ga_info.cxpb, ga_info.mutpb,
ga_info.gen, stats, halloffame=hof)
return pop, stats, hof
| 5,343,728
|
def sample(population, k=None):
"""Behaves like random.sample, but if k is omitted, it default to
randint(1, len(population)), so that a non-empty sample is returned."""
population = list(population)
if k is None:
k = randint(1, len(population))
return random_sample(population, k)
| 5,343,729
|
def test_all_unique_violation_codes(all_violations):
"""Ensures that all violations have unique violation codes."""
codes = []
for violation in all_violations:
codes.append(int(violation.code))
assert len(set(codes)) == len(all_violations)
| 5,343,730
|
def dense_to_one_hot(array, class_num, dtype_, axis=-1):
"""
this function offer a method to change the numpy array to one hot like base on axis
as we know array dims_size in axis should be 1
keep_shape
:param array: a numpy array, data type should be int
:param class_num: one hot class number
:param dtype_: numpy data type declaration
:param axis: broadcast dim
:return:
'
algorithm:
base on axis: base_point(the local of the dim we want to one hot)
we transpose the array to [...., base_point]
and than we make a zeros array [array_element_amount, class_num]
make an array np.arange(num_labels) * class_num for support the offset
which means the step to make sure the array.flat location which set to 1(dtype_)
'
"""
array_shape = array.shape
assert array_shape[axis] == 1, DenseToOneHotLogger.error(Fore.RED + 'dim {0} should be size: 1'.format(axis))
if array.max() >= class_num:
raise ValueError('class_num(a) should bigger than the max of array(b), '
'but a vs. b = {0} vs.{1}'.format(class_num, array.max()))
base_point = axis % len(array_shape)
transpose_axes = []
back_transpose_axes = []
DenseToOneHotLogger.debug("start generate transpose_axes and back_transpose_axes")
if base_point == len(array_shape):
pass
elif base_point == 0:
transpose_axes += list(range(1, len(array_shape)))
transpose_axes.append(0)
back_transpose_axes += [len(array_shape) - 1] + list(range(1, len(array_shape)))
pass
else:
f_start = 0
f_end = base_point
b_start = base_point + 1
b_end = len(array_shape) - 1
transpose_axes += list(range(f_start, f_end))
transpose_axes += list(range(b_start, b_end))
transpose_axes.append(base_point)
back_transpose_axes += list(range(f_start, base_point))
back_transpose_axes += [len(array_shape) - 1]
back_transpose_axes += list(range(base_point, len(array_shape) - 2))
DenseToOneHotLogger.debug('transpose')
np.transpose(array, transpose_axes)
shape = list(array.shape)
shape[-1] = class_num
num_labels = 1
for i in list(np.transpose(array).shape)[0:]:
num_labels *= i
pass
index_offset = np.arange(num_labels) * class_num
label_one_hot = np.zeros(shape, dtype=dtype_)
label_one_hot.flat[index_offset + array.ravel()] = 1
DenseToOneHotLogger.debug("re transpose")
np.transpose(label_one_hot, back_transpose_axes)
return label_one_hot
pass
| 5,343,731
|
def add_momentum_ta(df, high, low, close, volume, fillna=False):
"""Add trend technical analysis features to dataframe.
Args:
df (pandas.core.frame.DataFrame): Dataframe base.
high (str): Name of 'high' column.
low (str): Name of 'low' column.
close (str): Name of 'close' column.
fillna(bool): if True, fill nan values.
Returns:
pandas.core.frame.DataFrame: Dataframe with new features.
"""
df['momentum1'] = rsi(df[close], n=14, fillna=fillna)
df['momentum2'] = money_flow_index(df[high], df[low], df[close],
df[volume], n=14, fillna=fillna)
df['momentum3'] = tsi(df[close], r=25, s=13, fillna=fillna)
return df
| 5,343,732
|
def get_experiment_type(filename):
"""
Get the experiment type from the filename.
The filename is assumed to be in the form of:
'<reliability>_<durability>_<history kind>_<topic>_<timestamp>'
:param filename: The filename to get the type.
:return: A string where the timesptamp is taken out from the filename.
"""
file_type = ''
filename = filename.split('/')[-1]
elements = filename.split('_')
for i in range(0, len(elements) - 3):
file_type += '{}_'.format(elements[i])
file_type = file_type[:-1]
return file_type
| 5,343,733
|
def async_check_significant_change(
hass: HomeAssistant,
old_state: str,
old_attrs: dict,
new_state: str,
new_attrs: dict,
**kwargs: Any,
) -> bool | None:
"""Test if state significantly changed."""
if old_state != new_state:
return True
if old_attrs.get(ATTR_EFFECT) != new_attrs.get(ATTR_EFFECT):
return True
old_color = old_attrs.get(ATTR_HS_COLOR)
new_color = new_attrs.get(ATTR_HS_COLOR)
if old_color and new_color:
# Range 0..360
if check_absolute_change(old_color[0], new_color[0], 5):
return True
# Range 0..100
if check_absolute_change(old_color[1], new_color[1], 3):
return True
if check_absolute_change(
old_attrs.get(ATTR_BRIGHTNESS), new_attrs.get(ATTR_BRIGHTNESS), 3
):
return True
if check_absolute_change(
# Default range 153..500
old_attrs.get(ATTR_COLOR_TEMP),
new_attrs.get(ATTR_COLOR_TEMP),
5,
):
return True
if check_absolute_change(
# Range 0..255
old_attrs.get(ATTR_WHITE_VALUE),
new_attrs.get(ATTR_WHITE_VALUE),
5,
):
return True
return False
| 5,343,734
|
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', ' ' or 'legacy', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. If 'legacy', print a
space for positive values except in 0d arrays.
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
# Deprecation 05-16-2017 v1.14
if style is not np._NoValue:
warnings.warn("'style' argument is deprecated and no longer functional",
DeprecationWarning, stacklevel=3)
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter)
options = _format_options.copy()
options.update(overrides)
if a.size == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, options, separator, prefix)
return lst
| 5,343,735
|
def registered_types():
""" list of registered types """
return list(Registry.types.get_all().keys())
| 5,343,736
|
def is_retain_bg_files(config: Dict[str, ConfigVO] = None) -> bool:
"""
在拉取新的壁纸前,是否保留旧的壁纸
"""
key = const.Key.Task.RETAIN_BGS.value
vo = config.get(key) if config else dao.get_config(key)
return vo and vo.value
| 5,343,737
|
def getAwareTime(tt):
"""
Generates timezone aware timestamp from timezone unaware timestamp
PARAMETERS
------------
:param tt: datatime
timezome unaware timestamp
RETURNS
------------
:return: datatime
timezone aware timestamp
"""
timezone = pytz.timezone("Europe/Amsterdam")
return (timezone.localize(tt))
| 5,343,738
|
def test_get_teams_id(flask_cli, init_api_teams):
"""Tests the API endpoint ``GET /v1/teams/{id}``."""
team_id = init_api_teams[2]['id']
resp = flask_cli.get(f'/v1/teams/{team_id}')
data = json.loads(resp.data)
assert data == init_api_teams[2]
| 5,343,739
|
def xdfs(request, tmpdir, vol_name, dos_format):
"""return (xdf_file, xdf_size_spec, vol_name) for various disks"""
size = request.param
if size == "880K":
file_name = tmpdir / "disk.adf"
size = ""
else:
file_name = tmpdir / "disk-" + size + ".hdf"
size = "size=" + size
return XDFSpec(str(file_name), size, vol_name, dos_format)
| 5,343,740
|
def nfvi_get_networks(paging, callback):
"""
Get a list of networks
"""
cmd_id = _network_plugin.invoke_plugin('get_networks', paging,
callback=callback)
return cmd_id
| 5,343,741
|
def get_fasta(uniprot_id):
"""Get the protein sequence for a UniProt ID as a string.
Args:
uniprot_id: Valid UniProt ID
Returns:
str: String of the protein (amino acid) sequence
"""
# Silencing the "Will be moved to Biokit" message
with ssbio.utils.suppress_stdout():
return bsup.get_fasta_sequence(uniprot_id)
| 5,343,742
|
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existant
segments will return as None)
:raises: ValueError if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
| 5,343,743
|
def get_purchases_formset(n_forms=0):
"""
Helper method that returns a Django formset for a dynamic amount of Purchases. Initially `n_forms` empty
forms are shown.
"""
return modelformset_factory(Purchase, fields=('amount', 'fruit'), extra=n_forms)
| 5,343,744
|
async def git_pull():
"""
Pulls any changes down from github and returns the result of the command.
_> changed: str
"""
cmd = Popen(["git", "pull"], stdout=PIPE)
out, _ = cmd.communicate()
out = out.decode()
return out
| 5,343,745
|
def inverse_word_map(word_map):
""" Create an inverse word mapping.
:param word_map: word mapping
"""
return {v: k for k, v in word_map.items()}
| 5,343,746
|
def get_coalition_wins_sql_string_for_state(coalition_id,state_id):
"""
:type party_id: integer
"""
str = """ select
lr.candidate_id,
c.fullname as winning_candidate,
lr.constituency_id,
cons.name as constituency,
lr.party_id,
lr.max_votes,
(lr.max_votes-sr.votes) as lead,
sr.candidate_id,
loosing_candidate.fullname as runner_up,
loosing_party.name as runner_up_party,
sr.party_id,
winning_party.name,
ltw.party_id
from latest_results lr
inner join
latest_runners_up as sr
on
sr.constituency_id = lr.constituency_id
inner join
candidate c
on
c.id = lr.candidate_id
inner join
constituency cons
on
cons.id = lr.constituency_id
inner join party winning_party
on
lr.party_id = winning_party.id
inner join party loosing_party
on
loosing_party.id = sr.party_id
inner join candidate loosing_candidate
on
loosing_candidate.id = sr.candidate_id
inner join last_time_winners ltw
on
ltw.constituency_id = lr.constituency_id
where
winning_party.coalition_id = %s
and
cons.state_id = %s
and
lr.status = 'DECLARED'
order by
lead DESC""" % (coalition_id,state_id)
return str;
| 5,343,747
|
def computeAPLSF(data):
"""
Compute the LSF kernel for each chip
"""
index = 2047
## define lsf range and pixel centers
xlsf = numpy.linspace(-7.,7.,43)
xcenter = numpy.arange(0,4096)
## compute LSF profiles for each chip as a function of pixel
raw_out2_a = raw(xlsf,xcenter,data.lsfcoeff[0])
raw_out2_b = raw(xlsf,xcenter,data.lsfcoeff[1])
raw_out2_c = raw(xlsf,xcenter,data.lsfcoeff[2])
## normalize
raw_out2_a_norm = raw_out2_a/numpy.tile(numpy.sum(raw_out2_a,axis=1),(len(xlsf),1)).T
raw_out2_b_norm = raw_out2_b/numpy.tile(numpy.sum(raw_out2_b,axis=1),(len(xlsf),1)).T
raw_out2_c_norm = raw_out2_c/numpy.tile(numpy.sum(raw_out2_c,axis=1),(len(xlsf),1)).T
return numpy.array([raw_out2_a_norm[index],raw_out2_b_norm[index],raw_out2_c_norm[index]])
| 5,343,748
|
def test_random_density_not_real():
"""Generate random non-real density matrix."""
mat = random_density_matrix(2)
np.testing.assert_equal(is_density(mat), True)
| 5,343,749
|
def login():
"""Log in a registered user by adding the user id to the session."""
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
error = None
user = User.query.filter_by(name=username).first()
if user is None:
error = "Incorrect username."
elif not user.check_password(password):
error = "Incorrect password."
if error is None:
db.session.clear()
db.session["user_id"] = user["id"]
return redirect(url_for("mainpage"))
flash(error)
return render_template("auth/login.html")
| 5,343,750
|
def test_metabolite_annotation_overview(model, db):
"""
Expect all metabolites to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each metabolite annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all metabolites consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Metabolite of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_metabolite_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(
model.metabolites, db))
ann["metric"][db] = len(ann["data"][db]) / len(model.metabolites)
ann["message"][db] = wrapper.fill(
"""The following {} metabolites ({:.2%}) lack annotation for {}:
{}""".format(len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
| 5,343,751
|
async def create_pr_review(
gh: GitHubAPI, *, pull_request: Mapping[str, Any], comments: list[dict[str, Any]]
) -> None:
"""Submit a comment review for the given pull request.
`comments` is a list of ``parser.record.ReviewComment`` as dictionary which
represents the pull request review comment.
"""
await gh.post(
pull_request["url"] + "/reviews",
data={
"commit_id": pull_request["head"]["sha"],
"body": PR_REVIEW_BODY,
"event": "COMMENT",
"comments": comments,
},
accept="application/vnd.github.comfort-fade-preview+json",
oauth_token=await gh.access_token,
)
| 5,343,752
|
def _timedeltaformat(value, include_ms=False):
"""Formats a timedelta in a sane way.
Ignores sub-second precision by default.
"""
if not value:
return NON_BREAKING_HYPHEN + NON_BREAKING_HYPHEN
total_seconds = value.total_seconds()
suffix = ''
if include_ms:
ms = int(round(total_seconds-int(total_seconds), 3) * 1000)
if ms:
suffix = '.%03d' % ms
hours, remainder = divmod(int(round(total_seconds)), 3600)
minutes, seconds = divmod(remainder, 60)
if hours:
return '%d:%02d:%02d%s' % (hours, minutes, seconds, suffix)
# Always prefix minutes, even if 0, otherwise this looks weird. Revisit this
# decision if bikeshedding is desired.
return '%d:%02d%s' % (minutes, seconds, suffix)
| 5,343,753
|
def get_viame_src(url):
"""
Get image src from via.me API.
"""
END_POINT = 'http://via.me/api/v1/posts/'
tmp = url.split('/')
viame_id = tmp[-1][1:]
address = END_POINT + viame_id
result = httpget(address)['response']['post']
return result['thumb_300_url']
| 5,343,754
|
def Geom_BSplineCurve_MaxDegree(*args):
"""
* Returns the value of the maximum degree of the normalized B-spline basis functions in this package.
:rtype: int
"""
return _Geom.Geom_BSplineCurve_MaxDegree(*args)
| 5,343,755
|
def iou_score(pred_cls, true_cls, nclass, drop=(), mask=None):
"""
compute the intersection-over-union score
both inputs should be categorical (as opposed to one-hot)
"""
assert pred_cls.shape == true_cls.shape, 'Shape of predictions should match GT'
if mask is not None:
assert mask.dim() == true_cls.dim(), \
'Mask should have the same dimensions as inputs'
intersect_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device())
union_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device())
idx = 0
for i in range(nclass):
if i not in drop:
intersect = (pred_cls == i).byte() + (true_cls == i).byte()
if mask is not None:
intersect *= mask.byte()
intersect = intersect.eq(2).sum()
union = (pred_cls == i).byte() + (true_cls == i).byte()
if mask is not None:
union *= mask.byte()
union = union.ge(1).sum()
intersect_[idx] = intersect
union_[idx] = union
idx += 1
return intersect_, union_
| 5,343,756
|
def check_args(source_path, args):
"""Checks lengths of supplied args match or raise an error.
Lists can have only one element where they are automatically extended.
Args:
source_path(list(str)): List of source_paths supplied to turbiniactl.
args(list(list)): List of args (i.e. name, source, partitions, etc) and
their values supplied to turbiniactl.
Raises:
TurbiniaException: If length of args don't match.
Returns:
list(str): List of arg or None """
ret = list()
if not args[0]:
args[0] = source_path
for arg in args:
if not arg:
arg = [None]
if len(arg) > 1 and len(arg) != len(source_path):
raise TurbiniaException(
'Number of passed in args ({0:d}) must equal to one or '
'number of source_paths/disks ({1:d}).'.format(
len(arg), len(source_path)))
if len(arg) == 1:
arg = [arg[0] for _ in source_path]
ret.append(arg)
return ret
| 5,343,757
|
def futures_dce_position_rank(date: str = "20160104") -> pd.DataFrame:
"""
大连商品交易日每日持仓排名-具体合约
http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rcjccpm/index.html
:param date: 指定交易日; e.g., "20200511"
:type date: str
:return: 指定日期的持仓排名数据
:rtype: pandas.DataFrame
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = "http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesBatchData.html"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Length": "160",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "www.dce.com.cn",
"Origin": "http://www.dce.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
payload = {
"memberDealPosiQuotes.variety": "a",
"memberDealPosiQuotes.trade_type": "0",
"contract.contract_id": "a2009",
"contract.variety_id": "a",
"year": date.year,
"month": date.month - 1,
"day": date.day,
"batchExportFlag": "batch",
}
r = requests.post(url, payload, headers=headers)
big_dict = dict()
with zipfile.ZipFile(BytesIO(r.content), "r") as z:
for i in z.namelist():
file_name = i.encode('cp437').decode('GBK')
try:
data = pd.read_table(z.open(i), header=None, sep="\t").iloc[:-6]
if len(data) < 12: # 处理没有活跃合约的情况
big_dict[file_name.split("_")[1]] = pd.DataFrame()
continue
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
data = data.iloc[start_list[0]:, data.columns[data.iloc[start_list[0], :].notnull()]]
data.reset_index(inplace=True, drop=True)
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist()
part_one = data[start_list[0]: end_list[0]].iloc[1:, :]
part_two = data[start_list[1]: end_list[1]].iloc[1:, :]
part_three = data[start_list[2]: end_list[2]].iloc[1:, :]
temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True),
part_three.reset_index(drop=True)], axis=1, ignore_index=True)
temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"]
temp_df["rank"] = range(1, len(temp_df) + 1)
del temp_df["名次"]
temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest",
"long_open_interest_chg", "short_party_name", "short_open_interest",
"short_open_interest_chg", "rank"]
temp_df["symbol"] = file_name.split("_")[1]
temp_df["variety"] = file_name.split("_")[1][:-4].upper()
temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank",
"short_open_interest", "short_open_interest_chg", "short_party_name",
"vol", "vol_chg", "vol_party_name", "symbol", "variety"]]
big_dict[file_name.split("_")[1]] = temp_df
except UnicodeDecodeError as e:
try:
data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=3)
except:
data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=4)
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist()
part_one = data[start_list[0]: end_list[0]].iloc[1:, :]
part_two = data[start_list[1]: end_list[1]].iloc[1:, :]
part_three = data[start_list[2]: end_list[2]].iloc[1:, :]
temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True),
part_three.reset_index(drop=True)], axis=1, ignore_index=True)
temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"]
temp_df["rank"] = range(1, len(temp_df) + 1)
del temp_df["名次"]
temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest",
"long_open_interest_chg", "short_party_name", "short_open_interest",
"short_open_interest_chg", "rank"]
temp_df["symbol"] = file_name.split("_")[1]
temp_df["variety"] = file_name.split("_")[1][:-4].upper()
temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank",
"short_open_interest", "short_open_interest_chg", "short_party_name",
"vol", "vol_chg", "vol_party_name", "symbol", "variety"]]
big_dict[file_name.split("_")[1]] = temp_df
return big_dict
| 5,343,758
|
def make_earray(file_name, arrays, atom, sizes):
"""
make_earray(file_name, arrays, atom, sizes)
General purpose algorithm to create an empty earray
Parameters
----------
file_name: str
Name of file
arrays: str, list
List of references for arrays in data table
atom: type
Type of data in earray
sizes: int, tuple
Shape of arrays in data set
"""
with tables.open_file(file_name, 'w') as outfile:
for i, array in enumerate(arrays):
outfile.create_earray(outfile.root, array, atom, sizes[i])
| 5,343,759
|
def gen_rsa():
"""
Generate an RSA Key Pair for digital signature
this is designed to be called once per user
TODO maybe this belongs in server-specific code since server will
need to know public and private keys
"""
pkey = PKey()
pkey.generate_key(TYPE_RSA, RSA_BITS)
pkey.check()
return pkey
| 5,343,760
|
def get_token_type_str(token):
"""根据传入的token对象的类型返回该token的类型的字符串表达
参数
----
token : Token
返回
----
str : str
"""
token_type = token.type
if token_type == TOKEN_TYPE_IF:
return '(keyword)if'
elif token_type == TOKEN_TYPE_ELIF:
return '(keyword)elif'
elif token_type == TOKEN_TYPE_ELSE:
return '(keyword)else'
elif token_type == TOKEN_TYPE_FOR:
return '(keyword)for'
elif token_type == TOKEN_TYPE_IN:
return '(keyword)in'
elif token_type == TOKEN_TYPE_WHILE:
return '(keyword)while'
elif token_type == TOKEN_TYPE_BREAK:
return '(keyword)break'
elif token_type == TOKEN_TYPE_NOT:
return '(keyword)not'
elif token_type == TOKEN_TYPE_AND:
return '(keyword)and'
elif token_type == TOKEN_TYPE_OR:
return '(keyword)or'
elif token_type == TOKEN_TYPE_RETURN:
return '(keyword)return'
elif token_type == TOKEN_TYPE_IMPORT:
return '(keyword)import'
elif token_type == TOKEN_TYPE_FUN:
return '(keyword)fun'
elif token_type == TOKEN_TYPE_CLASS:
return '(keyword)class'
elif token_type == TOKEN_TYPE_LET:
return '(keyword)let'
elif token_type == TOKEN_TYPE_GLOBAL:
return '(keyword)global'
elif token_type == TOKEN_TYPE_TRUE:
return '(keyword)True'
elif token_type == TOKEN_TYPE_FALSE:
return '(keyword)False'
elif token_type == TOKEN_TYPE_CONTINUE:
return '(keyword)continue'
elif token_type == TOKEN_TYPE_DEL:
return '(keyword)del'
elif token_type == TOKEN_TYPE_ADD:
return '(add)+'
elif token_type == TOKEN_TYPE_SUB:
return '(sub)-'
elif token_type == TOKEN_TYPE_MUL:
return '(mul)*'
elif token_type == TOKEN_TYPE_DIV:
return '(div)/'
elif token_type == TOKEN_TYPE_MOD:
return '(mod)%'
elif token_type == TOKEN_TYPE_POWER:
return '(power)**'
elif token_type == TOKEN_TYPE_EQU:
return '(equ)=='
elif token_type == TOKEN_TYPE_NEQU:
return '(nequ)!='
elif token_type == TOKEN_TYPE_GT:
return '(gt)>'
elif token_type == TOKEN_TYPE_LT:
return '(lt)<'
elif token_type == TOKEN_TYPE_GE:
return '(ge)>='
elif token_type == TOKEN_TYPE_LE:
return '(le)<='
elif token_type == TOKEN_TYPE_ASSIGN:
return '(assign)='
elif token_type == TOKEN_TYPE_LOGIC_AND:
return '(logic_and)&'
elif token_type == TOKEN_TYPE_LOGIC_OR:
return '(logic_or)|'
elif token_type == TOKEN_TYPE_LOGIC_NOT:
return '(logic_not)~'
elif token_type == TOKEN_TYPE_LOGIC_XOR:
return '(logic_xor)^'
elif token_type == TOKEN_TYPE_LOGIC_SHL:
return '(logic_shl)<<'
elif token_type == TOKEN_TYPE_LOGIC_SHR:
return '(logic_shr)>>'
elif token_type == TOKEN_TYPE_NUM:
return '(num)' + token.str
elif token_type == TOKEN_TYPE_STR:
return '(str)' + token.str
elif token_type == TOKEN_TYPE_COMMA:
return '(comma),'
elif token_type == TOKEN_TYPE_POINT:
return '(point).'
elif token_type == TOKEN_TYPE_COLON:
return '(colon):'
elif token_type == TOKEN_TYPE_SEMICOLON:
return '(semicolon);'
elif token_type == TOKEN_TYPE_LEFT_PARENT:
return '(left_parent)('
elif token_type == TOKEN_TYPE_RIGHT_PARENT:
return '(right_parent))'
elif token_type == TOKEN_TYPE_LEFT_BRACKET:
return '(left_bracket)['
elif token_type == TOKEN_TYPE_RIGHT_BRACKET:
return '(right_bracket)]'
elif token_type == TOKEN_TYPE_LEFT_BRACE:
return '(left_brace){'
elif token_type == TOKEN_TYPE_RIGHT_BRACE:
return '(right_brace)}'
elif token_type == TOKEN_TYPE_DOUBLE_QUOTATION:
return '(double_quotation)"'
elif token_type == TOKEN_TYPE_SINGLE_QUOTE:
return "(single_quote)'"
elif token_type == TOKEN_TYPE_ID:
return '(id)' + token.str
elif token_type == TOKEN_TYPE_STR_LINES:
return '(str_line)' + token.str
elif token_type == TOKEN_TYPE_SELF:
return '(keyword)this'
elif token_type == TOKEN_TYPE_UNKNOWN:
return '(unknown)UNKNOWN'
elif token_type == TOKEN_TYPE_EOF:
return '(eof)EOF'
print("Token '%s' doesn't exist!" % token.str)
sys.exit(1)
| 5,343,761
|
def _(
sklearn_model: ensemble.GradientBoostingRegressor,
path: os.PathLike,
) -> tf.keras.Model:
"""Converts a gradient boosting regression model into a TFDF model."""
if isinstance(sklearn_model.init_, dummy.DummyRegressor):
# If the initial estimator is a DummyRegressor, then it predicts a constant
# which can be passed to GradientBoostedTreeBuilder as a bias.
init_pytree = None
bias = sklearn_model.init_.constant_[0][0]
elif isinstance(sklearn_model.init_, tree.DecisionTreeRegressor):
# If the initial estimator is a DecisionTreeRegressor, we add it as the
# first tree in the ensemble and set the bias to zero. We could also support
# other tree-based initial estimators (e.g. RandomForest), but this seems
# like a niche enough use case that we don't for the moment.
init_pytree = convert_sklearn_tree_to_tfdf_pytree(sklearn_model.init_)
bias = 0.0
elif sklearn_model.init_ == "zero":
init_pytree = None
bias = 0.0
else:
raise ValueError("The initial estimator must be either a DummyRegressor"
"or a DecisionTreeRegressor, but got"
f"{type(sklearn_model.init_)}.")
gbt_builder = tfdf.builder.GradientBoostedTreeBuilder(
path=path,
objective=tfdf.py_tree.objective.RegressionObjective(label="label"),
bias=bias,
)
if init_pytree:
gbt_builder.add_tree(init_pytree)
for weak_learner in sklearn_model.estimators_.ravel():
gbt_builder.add_tree(convert_sklearn_tree_to_tfdf_pytree(
weak_learner,
weight=sklearn_model.learning_rate,
))
gbt_builder.close()
return tf.keras.models.load_model(path)
| 5,343,762
|
def task_checkqueue(storage):
"""
Task that watches a queue for messages and acts on them when received.
"""
# Get the queue object from the storage dictionary
thequeue = storage.get("queue")
try:
# Use a timeout so it blocks for at-most 0.5 seconds while waiting for a message. Smaller values can be used to
# increase the cycling of the task and responsiveness to Threadify control signals (like pause) if desired.
msg = thequeue.get(block=True, timeout=.5)
except queue.Empty:
print("_", end="")
else:
if msg == "QUIT":
return False
# Print received message
print("{:s}".format(msg), end="")
return True
| 5,343,763
|
def to_sparse(x):
""" converts dense tensor x to sparse format """
x_typename = torch.typename(x).split('.')[-1]
sparse_tensortype = getattr(torch.sparse, x_typename)
indices = torch.nonzero(x)
if len(indices.shape) == 0: # if all elements are zeros
return sparse_tensortype(*x.shape)
indices = indices.t()
values = x[tuple(indices[i] for i in range(indices.shape[0]))]
return sparse_tensortype(indices, values, x.size())
| 5,343,764
|
def get_top_article_categories():
"""
获取顶级文章分类列表
自定义模版标签
"""
return Category.objects.filter(level=1)
| 5,343,765
|
def filter_variants_top_k(log, k, parameters=None):
"""
Keeps the top-k variants of the log
Parameters
-------------
log
Event log
k
Number of variants that should be kept
parameters
Parameters
Returns
-------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
variants = variants_get.get_variants_count(log, parameters=parameters)
variant_count = []
for variant in variants:
variant_count.append([variant, variants[variant]])
variant_count = sorted(variant_count, key=lambda x: (x[1], x[0]), reverse=True)
variant_count = variant_count[:min(k, len(variant_count))]
variants_to_filter = [x[0] for x in variant_count]
return apply(log, variants_to_filter, parameters=parameters)
| 5,343,766
|
def create_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Creates a classifier in the user\'s account. This can be a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field of the request is present.
See also: AWS API Documentation
Exceptions
:example: response = client.create_classifier(
GrokClassifier={
'Classification': 'string',
'Name': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Classification': 'string',
'Name': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nName (string) -- [REQUIRED]The name of the new classifier.\n\nGrokPattern (string) -- [REQUIRED]The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) -- [REQUIRED]A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. Must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
| 5,343,767
|
def get_weighted_average(embedding, x, w):
"""
Compute the weighted average vectors
:param embedding: embedding[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
n_samples = x.shape[0]
emb = np.zeros((n_samples, embedding.shape[1]))
for i in range(n_samples):
emb[i, :] = w[i, :].dot(embedding[x[i, :], :]) / np.count_nonzero(w[i, :])
return emb
| 5,343,768
|
async def startup_jobs_client(app: Application):
"""
An application `on_startup` callback that initializes a Virtool
:class:`virtool.job_manager.Manager` object and puts it in app state.
:param app: the app object
:type app: :class:`aiohttp.aiohttp.web.Application`
"""
app["jobs"] = JobsClient(app)
| 5,343,769
|
def _build_gdef(ufo):
"""Build a table GDEF statement for ligature carets."""
from glyphsLib import glyphdata # Expensive import
bases, ligatures, marks, carets = set(), set(), set(), {}
category_key = GLYPHLIB_PREFIX + 'category'
subCategory_key = GLYPHLIB_PREFIX + 'subCategory'
for glyph in ufo:
has_attaching_anchor = False
for anchor in glyph.anchors:
name = anchor.name
if name and not name.startswith('_'):
has_attaching_anchor = True
if name and name.startswith('caret_') and 'x' in anchor:
carets.setdefault(glyph.name, []).append(round(anchor['x']))
lib = glyph.lib
glyphinfo = glyphdata.get_glyph(glyph.name)
# first check glyph.lib for category/subCategory overrides; else use
# global values from GlyphData
category = lib.get(category_key)
if category is None:
category = glyphinfo.category
subCategory = lib.get(subCategory_key)
if subCategory is None:
subCategory = glyphinfo.subCategory
# Glyphs.app assigns glyph classes like this:
#
# * Base: any glyph that has an attaching anchor
# (such as "top"; "_top" does not count) and is neither
# classified as Ligature nor Mark using the definitions below;
#
# * Ligature: if subCategory is "Ligature" and the glyph has
# at least one attaching anchor;
#
# * Mark: if category is "Mark" and subCategory is either
# "Nonspacing" or "Spacing Combining";
#
# * Compound: never assigned by Glyphs.app.
#
# https://github.com/googlei18n/glyphsLib/issues/85
# https://github.com/googlei18n/glyphsLib/pull/100#issuecomment-275430289
if subCategory == 'Ligature' and has_attaching_anchor:
ligatures.add(glyph.name)
elif category == 'Mark' and (subCategory == 'Nonspacing' or
subCategory == 'Spacing Combining'):
marks.add(glyph.name)
elif has_attaching_anchor:
bases.add(glyph.name)
if not any((bases, ligatures, marks, carets)):
return None
lines = ['table GDEF {', ' # automatic']
glyphOrder = ufo.lib[PUBLIC_PREFIX + 'glyphOrder']
glyphIndex = lambda glyph: glyphOrder.index(glyph)
fmt = lambda g: ('[%s]' % ' '.join(sorted(g, key=glyphIndex))) if g else ''
lines.extend([
' GlyphClassDef',
' %s, # Base' % fmt(bases),
' %s, # Liga' % fmt(ligatures),
' %s, # Mark' % fmt(marks),
' ;'])
for glyph, caretPos in sorted(carets.items()):
lines.append(' LigatureCaretByPos %s %s;' %
(glyph, ' '.join(unicode(p) for p in sorted(caretPos))))
lines.append('} GDEF;')
return '\n'.join(lines)
| 5,343,770
|
def winningRate2(r, s, X, Y):
"""
revised version, now we want to investigate how value of X and Y will affect.
r: int = remaining round of game
s: int = current score
X: int = points winning for X-head
Y: int = points wining for Y-head
(assuming X and Y are both fair, and we always assume Y > X)
"""
if X > Y:
X, Y = Y, X
def rec(r, s):
if (r, s) not in cache:
if r < 1:
raise (TypeError("r can not be smaller than 1."))
if r == 1:
if s <= -Y: # only Y head for the win.
cache[(r, s)] = 0
return cache[(r, s)]
if s >= (-Y + 1) and s <= X: # play X or Y shall be the same
cache[(r, s)] = 0.5
return cache[(r, s)]
if s > X: # play X, guarenteed win
cache[(r, s)] = 1
return cache[(r, s)]
cache[(r, s)] = max(
(rec(r - 1, s + X) + rec(r - 1, s - X)) / 2,
(rec(r - 1, s + Y) + rec(r - 1, s - Y)) / 2,
)
return cache[(r, s)]
return rec(r, s)
| 5,343,771
|
def handler(event, context):
"""Deletes all user content based on username provided in body,
only accessible from authenticated users with the custom:group=admin"""
logger.info(f"Received event: {json.dumps(event)}")
try:
if event["requestContext"]["authorizer"]["claims"]["custom:group"] != "admin":
logger.error("User does not have permissions to call this function")
retval = {
"body": "ERROR: User does not have permissions to call this function",
"headers": httpHeaders,
"statusCode": 200,
}
return retval
except KeyError:
logger.error("custom:group field not found in token")
retval = {
"body": "ERROR: custom:group field not found in token",
"headers": httpHeaders,
"statusCode": 200,
}
return retval
username = json.loads(event["body"])["username"]
user_pool_id = os.environ["USER_POOL_ID"]
table = ddb.Table(os.environ["USER_TABLE"])
# Query user and return contents of assets
response = table.query(KeyConditionExpression=Key("userName").eq(username))
if len(response["Items"]) == 1:
if response["Items"][0]["assets"] == None:
# User exists but no assets have been created. Only delete the Cognito user
AWS_delete.cognito_user(username, user_pool_id)
logger.info(f"INFO: User: {username} delete from Cognito, no other assets found")
else:
assets = response["Items"][0]["assets"]
# Remove dispenser from DispenserTable (and entry into to event table)
AWS_delete.clean_dispenser_tables(assets["iot"]["thingName"])
# Detach Cognito identity from IoT policy
AWS_delete.cognito_identity_iot_policy(
cognito_identity_id = assets["cognito"]["principalId"],
iot_policy=assets["cognito"]["iotPolicy"]
)
# Delete AWS thing, cert
AWS_delete.iot_thing_certificate(
assets["iot"]["certificateArn"], assets["iot"]["thingName"]
)
AWS_delete.cloud9(environment_id=assets["cloud9"]["environmentId"])
# Delete Cognito
AWS_delete.cognito_user(username, user_pool_id)
# Delete IAM user last
AWS_delete.iam_user(username)
try:
# Delete User's DynamoDB record
response = table.delete_item(Key={"userName": username})
except ClientError as e:
if e.response["Error"]["Code"] == "ConditionalCheckFailedException":
print(e.response["Error"]["Message"])
else:
raise
logger.info(f"INFO: User: {username} assets and entry deleted")
retval = {
"body": f"INFO: User: {username} assets and entry deleted",
"headers": httpHeaders,
"statusCode": 200,
}
else:
retval = {
"body": f"WARNING: User: {username} not found, no action taken",
"headers": httpHeaders,
"statusCode": 200,
}
return retval
| 5,343,772
|
def DayOfWeek(year,month,day):
"""DayOfWeek returns the day of week 1-7, 1 being Monday for the given year, month
and day"""
num=year*365
num=num+year//4+1
num=num-(year//100+1)
num=num+year//400+1
if month<3 and LeapYear(year):
num=num-1
return (num+MONTH_OFFSETS[month-1]+day+4)%7+1
| 5,343,773
|
def get_stand_exe() -> str:
"""Get the path to standexe
Returns:
Path to standexe
Raises:
ValueError: If STAND_EXE is not found in environment variables.
"""
if os.environ['STAND_EXE']:
return os.environ['STAND_EXE']
else:
raise ValueError('STAND_EXE environment variable is not found.')
| 5,343,774
|
def chef_execute_cli_commands(configuration):
"""
API to generate sonic cli commands with the provided configuration
:param configuration:
:return:
"""
if not configuration:
return False
commands = ""
action_run = "action:run"
for module in configuration:
if module == "vlans":
member_commands = config_cmd = member_config_cmd = ""
for action in configuration[module]:
if action == "add":
module_action = "vlan_add"
member_action = "vlan_member_add"
elif action == "del":
module_action = "vlan_del"
member_action = "vlan_member_del"
commands += "execute '{}' do\n".format(module_action)
member_commands += "execute '{}' do\n".format(member_action)
for vlan_id in configuration[module][action]:
config_cmd += "config vlan {} {}".format(action, vlan_id) + " && "
if "members" in configuration[module][action][vlan_id]:
for member in configuration[module][action][vlan_id]["members"]:
untag = "" if member["tagged"] or member["tagged"] == "True" else "-u"
member_config_cmd += "config vlan member {} {} {} {}".format(action, vlan_id,
member["port"],
untag).strip() + " && "
else:
member_commands = ""
config_cmd = config_cmd.rstrip(" &")
member_config_cmd = member_config_cmd.rstrip(" &")
commands += " command '{}'\n".format(config_cmd)
member_commands += " command '{}'\n".format(member_config_cmd)
commands += " {}\n".format(action_run)
commands += "end\n\n"
if member_commands:
member_commands += " {}\n".format(action_run)
member_commands += "end\n\n"
commands += member_commands
if module == "fdbs":
for action in configuration[module]:
config_cmd = ""
if action == "add":
module_action = "fdb_add"
elif action == "del":
module_action = "fdb_del"
commands += "execute '{}' do\n".format(module_action)
for entry in configuration[module][action]:
mac = entry["mac"] if "mac" in entry else ""
vlan_id = entry["vlan_id"] if "vlan_id" in entry else ""
port = entry["port"] if "port" in entry else ""
if action == "del":
config_cmd += "config mac {} {} {}".format(action, mac, vlan_id)+" && "
else:
config_cmd += "config mac {} {} {} {}".format(action, mac, vlan_id, port)+" && "
config_cmd = config_cmd.rstrip(" && ")
commands += " command '{}'\n".format(config_cmd)
commands += " {}\n".format(action_run)
commands += "end\n\n"
if module == "lags":
member_commands = ""
for action in configuration[module]:
fallback = min_links = config_cmd = member_config_cmd = ""
if action == "add":
module_action = "lag_add"
member_action = "lag_member_add"
elif action == "del":
module_action = "lag_del"
member_action = "lag_member_del"
commands += "execute '{}' do\n".format(module_action)
member_commands += "execute '{}' do\n".format(member_action)
for portchannel in configuration[module][action]:
portchannel_config = configuration[module][action][portchannel]
if "fallback" in portchannel_config and (
portchannel_config["fallback"] or portchannel_config["fallback"] == "True"):
fallback = "--fallback true"
if "min-links" in portchannel_config:
min_links = "--min-links {}".format(portchannel_config["min-links"])
config_cmd += "config portchannel {} {} {} {}".format(action, portchannel, fallback,
min_links).strip() + " && "
if "links" in configuration[module][action][portchannel]:
for member in configuration[module][action][portchannel]["links"]:
member_config_cmd += "config portchannel member {} {} {}".format(action, portchannel,
member) + " && "
else:
member_commands = ""
config_cmd = config_cmd.rstrip(" && ")
member_config_cmd = member_config_cmd.rstrip(" && ")
member_commands += " command '{}'\n".format(member_config_cmd)
commands += " command '{}'\n".format(config_cmd)
commands += " {}\n".format(action_run)
commands += "end\n\n"
if member_commands:
member_commands += " {}\n".format(action_run)
member_commands += "end\n\n"
commands += member_commands
if module == "interfaces":
config_cmd = ""
commands += "execute 'interface' do\n"
for interface in configuration[module]:
if "admin_status" in configuration[module][interface]:
operation = "shutdown" if configuration[module][interface]["admin_status"] == "down" else "startup"
config_cmd += "config interface {} {}".format(operation, interface) + " && "
if "speed" in configuration[module][interface]:
config_cmd += "config interface {} speed {}".format(interface, configuration[module][interface][
"speed"]) + " && "
config_cmd = config_cmd.rstrip(" && ")
commands += " command '{}'\n".format(config_cmd)
commands += " {}\n".format(action_run)
commands += "end\n\n"
st.log("complete_command: \n{}".format(commands))
return commands
| 5,343,775
|
def _normalize(vector):
"""Returns a normalized version of a numpy vector."""
return vector/np.sqrt(np.dot(vector, vector));
| 5,343,776
|
def load_ext(ext_name, name, func=None, endpoint=None):
"""
Load an external module.
Example: ``load_ext("distkv_ext","owfs","model")`` loads …/distkv_ext/owfs/model.py
and returns its global dict. When "ep" is given it returns the entry
point.
Any additional keywords are added to the module dictionary.
TODO: This doesn't yet return a proper module.
Don't use this with modules that are also loaded the regular way.
"""
if ext_name not in _ext_cache:
_cache_ext(ext_name)
n = f"{ext_name}.{name}"
past = this_load.set(n)
try:
if endpoint is None:
return load_one(ext_name, name, endpoint=func)
else:
return load_one(n, func, endpoint=endpoint)
finally:
this_load.reset(past)
| 5,343,777
|
def get_chol_factor(lower_tri_vals):
"""
Args:
lower_tri_vals: numpy array, shaped as the number of lower triangular
elements, number of observations.
The values ordered according to np.tril_indices(p)
where p is the dimension of the multivariate normal distn
Returns:
Nxpxp numpy array, with the lower triangle filled in. The diagonal is exponentiated.
"""
lower_size, N = lower_tri_vals.shape
# solve p(p+3)/2 = lower_size to get the
# number of dimensions.
p = (-1 + (1 + 8 * lower_size) ** 0.5) / 2
p = int(p)
if not isinstance(lower_tri_vals, np.ndarray):
lower_tri_vals = np.array(lower_tri_vals)
L = np.zeros((N, p, p))
for par_ind, (k, l) in enumerate(zip(*np.tril_indices(p))):
if k == l:
# Add a small number to avoid singular matrices.
L[:, k, l] = np.exp(lower_tri_vals[par_ind, :]) + 1e-6
else:
L[:, k, l] = lower_tri_vals[par_ind, :]
return L
| 5,343,778
|
def debug(msg):
"""Send a debug message to the OTR debug buffer."""
debug_option = weechat.config_get(config_prefix('general.debug'))
global otr_debug_buffer
if weechat.config_boolean(debug_option):
if not otr_debug_buffer:
otr_debug_buffer = weechat.buffer_new("OTR Debug", "", "",
"debug_buffer_close_cb", "")
weechat.buffer_set(otr_debug_buffer, 'title', 'OTR Debug')
weechat.buffer_set(otr_debug_buffer, 'localvar_set_no_log', '1')
prnt(otr_debug_buffer, ('{script} debug\t{text}'.format(
script=SCRIPT_NAME,
text=PYVER.unicode(msg)
)))
| 5,343,779
|
def get_angles_gram_mask(gram, mask):
"""
Input: (gram) square numpy array, (mask) square numpy array where
1 = select, 0 = do not select
Output: (angles) numpy array or angles in mask in degrees
"""
angles = gram * mask
angles = angles[angles != 0]
angles = np.degrees(np.arccos(angles))
return angles
| 5,343,780
|
def _expectation(p, kern1, feat1, kern2, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
- Ka_{.,.}, Kb_{.,.} :: Linear kernels
Ka and Kb as well as Z1 and Z2 can differ from each other, but this is supported
only if the Gaussian p is Diagonal (p.cov NxD) and Ka, Kb have disjoint active_dims
in which case the joint expectations simplify into a product of expectations
:return: NxMxM
"""
if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian): # no joint expectations required
eKxz1 = expectation(p, (kern1, feat1))
eKxz2 = expectation(p, (kern2, feat2))
return eKxz1[:, :, None] * eKxz2[:, None, :]
if kern1 != kern2 or feat1 != feat2:
raise NotImplementedError("The expectation over two kernels has only an "
"analytical implementation if both kernels are equal.")
kern = kern1
feat = feat1
with params_as_tensors_for(kern, feat):
# use only active dimensions
Xcov = kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov)
Z, Xmu = kern._slice(feat.Z, p.mu)
N = tf.shape(Xmu)[0]
var_Z = kern.variance * Z
tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD
XX = Xcov + tf.expand_dims(Xmu, 1) * tf.expand_dims(Xmu, 2) # NxDxD
return tf.matmul(tf.matmul(tiled_Z, XX), tiled_Z, transpose_b=True)
| 5,343,781
|
def rotX(angle):
"""
-----------------------------------------------------------------------
Purpose: Calculate the matrix that represents a 3d rotation
around the X axis.
Input: Rotation angle in degrees
Returns: A 3x3 matrix representing the rotation about angle around
X axis.
Reference: Diebel, J. 2006, Stanford University, Representing Attitude:
Euler angles, Unit Quaternions and Rotation Vectors.
http://ai.stanford.edu/~diebel/attitude.html
Notes: Return the rotation matrix for a rotation around the X axis.
This is a rotation in the YZ plane. Note that we construct
a new vector with: xnew = R1.x
In the literature, this rotation is usually called R1
-----------------------------------------------------------------------
"""
a = d2r(angle)
v = n.asmatrix(n.zeros((3,3), 'd'))
cosa = n.cos(a)
sina = n.sin(a)
v[0,0] = 1.0; v[0,1] = 0.0; v[0,2] = 0.0;
v[1,0] = 0.0; v[1,1] = cosa; v[1,2] = sina;
v[2,0] = 0.0; v[2,1] = -sina; v[2,2] = cosa;
return v
| 5,343,782
|
def extract_url(url):
"""
extract the real url from yahoo rss feed item
"""
_url = None
if '*' in url: # old style yahoo redirect link
_url = "http" + url.split("*http")[-1]
elif url.startswith("http://finance.yahoo.com/r/"): # new style yahoo redirect link
headers = {
"User-Agent": "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; en-gb) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"From": "http://finance.yahoo.com"
}
res = requests.get(url, headers=headers)
if res.status_code == 200:
page_source = res.text
if page_source.startswith("<script src="): # yahoo now uses javascript to make page redirection
_url = page_source.split("URL=\'")[-1].split("\'")[0]
else:
_url = url # TODO: is this correct?
else:
logging.warning("%sabnormal http status code [%s] url=%s%s", Back.RED, res.status_code, url, Style.RESET_ALL)
else:
_url = url
# if _url is not None:
# if "=yahoo" in _url: # ignore redirect tracking parameters
# _url = "{0}://{1}{2}".format(*urlparse.urlparse(_url))
return _url
| 5,343,783
|
def LF_positive_MeshTerm(report):
"""
Looking for positive mesh terms
"""
for idx in range(1,len(categories)):
reg_pos = re.compile(categories[idx],re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}'+categories[idx],re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
| 5,343,784
|
def adopt(
objs: K8sObjects,
owner: Optional[bodies.Body] = None,
*,
nested: Optional[Iterable[dicts.FieldSpec]] = None,
) -> None:
"""
The children should be in the same namespace, named after their parent, and owned by it.
"""
real_owner = _guess_owner(owner)
append_owner_reference(objs, owner=real_owner)
harmonize_naming(objs, name=real_owner.get('metadata', {}).get('name', None))
adjust_namespace(objs, namespace=real_owner.get('metadata', {}).get('namespace', None))
label(objs, labels=real_owner.get('metadata', {}).get('labels', {}), nested=nested)
| 5,343,785
|
def user_ratio_shuffle_split_with_targets(X,
train_ratio=0.8,
n_valid_users=1000,
n_test_users=1000,
minimum_interaction=3,
rand_state=None):
""" Split given test / valid user records into subsets
User records are splitted proportionally per user
as same as `user_ratio_shuffle_split`.
However, split is only made for randomly selected test / valid user population.
Inputs:
X (scipy.sparse.csr_matrix): user-item matrix
train_ratio (float): ratio of training records per user
n_valid_users (int): number of validation users
n_test_users (int): number of testing users
minimum_interaction (int): minimum interaction of user to be considered.
if it's smaller than this,
put all records to the training set
rand_state (bool or int): random state seed number or None
Returns:
scipy.sparse.csr_matrix: training matrix
scipy.sparse.csr_matrix: validation matrix
scipy.sparse.csr_matrix: testing matrix
"""
# first draw valid / test users
rnd_idx = np.random.permutation(X.shape[0])
valid_users = rnd_idx[:n_valid_users]
test_users = rnd_idx[n_valid_users:n_valid_users + n_test_users]
train_users = rnd_idx[n_valid_users + n_test_users:]
# split records for valid / test users
Xvl, Xvl_vl, Xvl_ts = user_ratio_shuffle_split(X[valid_users],
train_ratio,
0.5, # valid_ratio
minimum_interaction,
rand_state)
# merge them, as this scheme does not need within user validation set
Xvl_ts = Xvl_vl + Xvl_ts
Xts, Xts_vl, Xts_ts = user_ratio_shuffle_split(X[test_users],
train_ratio,
0.5, # valid ratio
minimum_interaction,
rand_state)
Xts_ts = Xts_vl + Xts_ts # merge
# assign them back to the original data
Xtr = X[train_users]
Xtr_ = sp.vstack([Xvl, Xts, Xtr])
Xts_ = sp.vstack([Xvl_ts, Xts_ts, Xtr])
# un-shuffle
reverse_idx = {j:i for i, j in enumerate(rnd_idx)}
reverse_idx = [reverse_idx[i] for i in range(X.shape[0])]
Xtr_ = Xtr_[reverse_idx]
Xts_ = Xts_[reverse_idx]
return Xtr_, Xts_, (train_users, valid_users, test_users)
| 5,343,786
|
def slice_core(core_tensor, inputs):
"""
Get matrix slices by indexing or contracting inputs, depending on input dtype
"""
assert isinstance(core_tensor, torch.Tensor)
assert isinstance(inputs, torch.Tensor)
if is_int_type(inputs):
return core_tensor[:, inputs, :]
else:
return torch.einsum("jak,ba->jbk", core_tensor, inputs)
| 5,343,787
|
def start_with_strategy(args, strategy, ants):
"""Reads command-line arguments and starts a game with those options."""
import argparse
parser = argparse.ArgumentParser(description="Play Ants vs. SomeBees")
parser.add_argument('-d', type=str, metavar='DIFFICULTY',
help='sets difficulty of game (test/easy/normal/hard/extra-hard)')
parser.add_argument('-w', '--water', action='store_true',
help='loads a full layout with water')
parser.add_argument('--food', type=int,
help='number of food to start with when testing', default=2)
args = parser.parse_args()
assault_plan = make_normal_assault_plan(ants)
layout = ants.dry_layout
tunnel_length = 10
num_tunnels = 3
food = args.food
if args.water:
layout = ants.wet_layout
if args.d in ['t', 'test']:
assault_plan = make_test_assault_plan(ants)
num_tunnels = 1
elif args.d in ['e', 'easy']:
assault_plan = make_easy_assault_plan(ants)
num_tunnels = 2
elif args.d in ['n', 'normal']:
assault_plan = make_normal_assault_plan(ants)
num_tunnels = 3
elif args.d in ['h', 'hard']:
assault_plan = make_hard_assault_plan(ants)
num_tunnels = 4
elif args.d in ['i', 'extra-hard']:
assault_plan = make_extra_hard_assault_plan(ants)
num_tunnels = 4
beehive = ants.Hive(assault_plan)
dimensions = (num_tunnels, tunnel_length)
return ants.GameState(strategy, beehive, ants.ant_types(), layout, dimensions, food).simulate()
| 5,343,788
|
def is_trivially_equal(lhs, rhs):
"""
True if lhs and rhs are trivially equal.
Use this for comparison of Sage expressions. Otherwise you
may start the whole proof machinery which may not exist at
the time of testing.
"""
assert (lhs - rhs).is_trivial_zero()
| 5,343,789
|
async def test_discovery_by_firstbeat(
socket_push: wizlight, caplog: pytest.LogCaptureFixture
) -> None:
"""Test discovery from first beat."""
bulb_type = await socket_push.get_bulbtype()
assert bulb_type == BulbType(
features=Features(
color=False,
color_tmp=False,
effect=False,
brightness=False,
dual_head=False,
),
name="ESP10_SOCKET_06",
kelvin_range=KelvinRange(max=2700, min=2700),
bulb_type=BulbClass.SOCKET,
fw_version="1.25.0",
white_channels=2,
white_to_color_ratio=20,
)
last_discovery: Optional[DiscoveredBulb] = None
discovery_event = asyncio.Event()
def _on_discovery(discovery: DiscoveredBulb) -> None:
nonlocal last_discovery
last_discovery = discovery
discovery_event.set()
with patch("pywizlight.push_manager.LISTEN_PORT", 0):
assert await socket_push.start_push(lambda data: None) is True
assert socket_push.mac is not None
socket_push.set_discovery_callback(_on_discovery)
push_manager = PushManager().get()
push_port = push_manager.push_transport.get_extra_info("sockname")[1]
push_in_transport_proto = await asyncio.get_event_loop().create_datagram_endpoint(
lambda: WizProtocol(on_response=lambda resp, addr: None),
remote_addr=("127.0.0.1", push_port),
)
push_transport = cast(asyncio.DatagramTransport, push_in_transport_proto[0])
push_transport.sendto(
b"test",
("127.0.0.1", push_port),
)
push_transport.sendto(
b"GARBAGE",
("127.0.0.1", push_port),
)
push_transport.sendto(
to_wiz_json(
{
"method": "firstBeat",
"env": "pro",
"params": {"mac": socket_push.mac},
}
).encode(),
("127.0.0.1", push_port),
)
await asyncio.wait_for(discovery_event.wait(), timeout=1)
assert last_discovery is not None
assert last_discovery == DiscoveredBulb("127.0.0.1", socket_push.mac)
push_transport.close()
assert "GARBAGE" in caplog.text
| 5,343,790
|
def assert_object_equals_dict(attributemap, obj, d):
"""Assert that the attributes of obj equal the dict values
:param attributemap: a dict to map attributes to dict keys
:type attributemap: :class:`dict`
:param obj: the object with attributes
:param d: the dictionary
:type d: :class:`dict`
:returns: None
:rtype: None
:raises: :class:`AssertionError`
"""
for attr, key in attributemap.items():
assert getattr(obj, attr) == d.get(key)
| 5,343,791
|
def replace_text(name, old, new, recurse=False, ext=''):
"""Replaces `name` with binary string params `old` and `new`"""
from clay.files.core import _rt_helper
if recurse:
sure = eval(input('Replace all "{1}" in "{0}" with "{2}" (True/False)? '.format(name, old, new)))
if sure:
fp = None
for root, dirs, files in _os.walk(name):
for f in files:
try:
fp = open(_os.path.join(root, f), 'rb')
if old in fp.read() and f.endswith(ext):
_rt_helper(_os.path.join(root, f), old, new)
except Exception as e:
raise e
finally:
if fp: fp.close()
print('Done')
else:
print('Aborted')
else:
_rt_helper(name, old, new)
| 5,343,792
|
def diff_tools(preferred_tool='auto'):
# type: (str) -> Iterator[Tuple[str, str, List[str]]]
"""Yields a number of installed and working diff tools that we can use for this program.
We compare a "Hello, World!" program against itself with two different modifications and
check if a diff tool returns the expected results.
The diff tool must treat any kind of change in whitespace as a difference.
Also it must be able to function with a bit of binary data.
"""
source = '\n'.join(HELLOWORLD.splitlines()) + '\n'
# Change leading whitespace.
mod1 = source.replace(' printf', ' printf')
# Insert two blank lines and trailing whitespace.
tmp = mod1.replace(' return 0;', '\n\n return 0; ')
# Replace last line feed with two CRLFs with a zero byte in between.
mod2 = tmp[:-1] + '\r\n\000\r\n'
expected_distances = [17, 4, 0]
fd, tmpfilename = tempfile.mkstemp(suffix='.c', prefix='whatstyle_hello_')
os.write(fd, bytestr(source))
os.close(fd)
try:
for difftool in DIFF_SPECS:
if preferred_tool != 'auto' and preferred_tool != difftool[0]:
continue
works = True
try:
for content2, expdist in zip([mod2, mod1, source], expected_distances):
[dist] = distances_from_diffs(difftool, [(tmpfilename, content2)])
if dist != expdist:
works = False
break
except OSError as exc:
if exc.errno in [errno.ENOENT, None]:
# The diff tool was not found
continue
else:
raise
if works:
yield difftool
finally:
os.remove(tmpfilename)
| 5,343,793
|
def dep_fetch_devices(app: Flask, dep: DEP, dep_account_id: int):
"""Perform fetch or sync of devices.
TODO: If default DEP Profile is nominated, it is queued for assignment here. But may want to check `profile_status`
to see whether only devices with the `removed` status are considered unassigned.
See:
https://docs.sqlalchemy.org/en/latest/orm/contextual.html
"""
thread_session = db.create_scoped_session()
dep_account: DEPAccount = thread_session.query(DEPAccount).one()
if dep_account.cursor is not None:
app.logger.info('Syncing using previous cursor: %s', dep_account.cursor)
else:
app.logger.info('No DEP cursor found, performing a full fetch')
# TODO: if fetched_until is quite recent, there's no reason to fetch again
for device_page in dep.devices(dep_account.cursor):
print(device_page)
for device in device_page['devices']:
if 'op_type' in device: # its a sync, not a fetch
optype = DEPOperationType(device['op_type'])
if optype == DEPOperationType.Added:
app.logger.debug('DEP Added: %s', device['serial_number'])
elif optype == DEPOperationType.Modified:
app.logger.debug('DEP Modified: %s', device['serial_number'])
elif optype == DEPOperationType.Deleted:
app.logger.debug('DEP Deleted: %s', device['serial_number'])
else:
app.logger.error('DEP op_type not recognised (%s), skipping', device['op_type'])
continue
else:
pass
try:
d: Device = thread_session.query(Device).filter(Device.serial_number == device['serial_number']).one()
d.description = device['description']
d.model = device['model']
d.os = device['os']
d.device_family = device['device_family']
d.color = device['color']
d.profile_status = device['profile_status']
if device['profile_status'] != 'empty':
d.profile_uuid = device.get('profile_uuid', None) # Only exists in DEP Sync not Fetch?
d.profile_assign_time = dateutil.parser.parse(device['profile_assign_time'])
d.device_assigned_by = device['device_assigned_by']
d.device_assigned_date = dateutil.parser.parse(device['device_assigned_date'])
d.is_dep = True
except sqlalchemy.orm.exc.NoResultFound:
app.logger.debug('No existing device record for serial: %s', device['serial_number'])
if device['profile_status'] != 'empty':
device['profile_assign_time'] = dateutil.parser.parse(device['profile_assign_time'])
device['device_assigned_date'] = dateutil.parser.parse(device['device_assigned_date'])
if 'op_type' in device:
del device['op_type']
del device['op_date']
del device['profile_assign_time']
del device['device_assigned_date']
d = Device(**device)
d.is_dep = True
thread_session.add(d)
except sqlalchemy.exc.StatementError as e:
app.logger.error('Got a statement error trying to insert a DEP device: {}'.format(e))
app.logger.debug('Last DEP Cursor was: %s', device_page['cursor'])
dep_account.cursor = device_page.get('cursor', None)
dep_account.more_to_follow = device_page.get('more_to_follow', None)
dep_account.fetched_until = dateutil.parser.parse(device_page['fetched_until'])
thread_session.commit()
| 5,343,794
|
def geo2xy(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
transforma coordenadas geográficas em coordenadas cartesianas
:param ff_lat_pto: latitude em graus
:param ff_lng_pto: longitude em graus
:param ff_lat_ref: latitude do ponto de referência
:param ff_lng_ref: longitude do ponto de referência
:returns: coordenadas polares do ponto (azimute, distância em NM)
"""
# logger
M_LOG.info(">> geo2xy")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# converte de geográfica para polar
lf_azim, lf_dist = geo2pol(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref)
# converte de polar para cartesiana
lf_x = lf_dist * math.sin(math.radians(lf_azim))
lf_y = lf_dist * math.cos(math.radians(lf_azim))
# correção das coordenadas X e Y devido ao efeito da declinação magnetica
# lf_x, lf_y = decl_xyz(lf_x, lf_y, lf_z, f_ref.f_dcl_mag)
# return
return lf_x, lf_y
| 5,343,795
|
def size(e):
"""
:rtype: Column
"""
return col(Size(parse(e)))
| 5,343,796
|
def b64pad(b64data):
"""Pad base64 string with '=' to achieve a length that is a multiple of 4
"""
return b64data + '=' * (4 - (len(b64data) % 4))
| 5,343,797
|
def get_peak_electric_demand(points_on_line):
"""
Initialize Power Demand
:param points_on_line: information about every node in study case
:type points_on_line: GeoDataFrame
:returns:
- **dict_peak_el**: Value is the ELECTRIC peak demand depending on thermally connected or disconnected.
:rtype: dict[node index][thermally connected bool]
"""
dict_peak_el = {}
dict_peak_el['thermally_conn_peak_el'] = {}
dict_peak_el['thermally_disconn_peak_el'] = {}
for idx_node, node in points_on_line.iterrows():
if not np.isnan(node['GRID0_kW']):
thermally_conn_peak_el = (node['Eal0_kW']
+ node['Edata0_kW']
+ node['Epro0_kW']
+ node['Eaux0_kW']
+ node['E_ww0_kW'])
thermally_disconn_peak_el = (thermally_conn_peak_el
+ node['E_hs0_kW']
+ node['E_cs0_kW'])
dict_peak_el['thermally_conn_peak_el'][idx_node] = thermally_conn_peak_el / (S_BASE * 10 ** 3) # kW/MW
dict_peak_el['thermally_disconn_peak_el'][idx_node] = thermally_disconn_peak_el / (
S_BASE * 10 ** 3) # kW / MW
return dict_peak_el
| 5,343,798
|
def replace_obfuscatables(module, tokens, obfunc, replace, name_generator, table=None):
"""
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, replacing the given identifier name
(*replace*) by calling *obfunc* on each token with the following parameters:
- **module:** The name of the script we're currently obfuscating.
- **tokens:** The current list of all tokens.
- **index:** The current position.
- **replace:** The token string that we're replacing.
- **replacement:** A randomly generated, unique value that will be used to replace, *replace*.
- **right_of_equal:** A True or False value representing whether or not the token is to the right of an equal sign. **Note:** This gets reset to False if a comma or open paren are encountered.
- **inside_parens:** An integer that is incremented whenever an open paren is encountered and decremented when a close paren is encountered.
- **inside_function:** If not False, the name of the function definition we're inside of (used in conjunction with *keyword_args* to determine if a safe replacement can be made).
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__open_paren__'** Increment the inside_parens value
- **'__close_paren__'** Decrement the inside_parens value
- **'__comma__'** Reset the right_of_equal value to False
- **'__right_of_equal__'** Sets the right_of_equal value to True
**Note:** The right_of_equal and the inside_parens values are reset whenever a NEWLINE is encountered.
When obfuscating a list of files, *table* is used to keep track of which
obfuscatable identifiers are which inside each resulting file. It must be
an empty dictionary that will be populated like so::
{orig_name: obfuscated_name}
This *table* of "what is what" will be used to ensure that references from
one script/module that call another are kept in sync when they are replaced
with obfuscated values.
"""
# Pretend the first line is '#\n':
skip_line = False
skip_next = False
right_of_equal = False
inside_parens = 0
inside_function = None
indent = 0
function_indent = 0
replacement = next(name_generator)
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
# if token_string == 'chairperson':
# print ('-------------\n')
if token_type == tokenize.NEWLINE:
skip_line = False
right_of_equal = False
inside_parens = 0
if token_type != tokenize.NAME:
continue
# elif token_type == tokenize.INDENT:
# indent += 1
# elif token_type == tokenize.DEDENT:
# indent -= 1
# if inside_function and function_indent == indent:
# function_indent = 0
# inside_function = False
if token_string == 'function':
# function_indent = indent
function_name = tokens[index+1][1]
inside_function = function_name
elif inside_function is not None and token_string == '}':
inside_function = None
result = obfunc(tokens, index, replace, replacement, right_of_equal, inside_parens, inside_function)
if result:
if skip_next:
skip_next = False
elif skip_line:
pass
elif result == '__skipline__':
skip_line = True
elif result == '__skipnext__':
skip_next = True
elif result == '__open_paren__':
right_of_equal = False
inside_parens += 1
elif result == '__close_paren__':
inside_parens -= 1
elif result == '__comma__':
right_of_equal = False
elif result == '__right_of_equal__':
# We only care if we're right of the equal sign outside of
# parens (which indicates arguments)
if not inside_parens:
right_of_equal = True
else:
if table: # Save it for later use in other files
combined_name = "%s.%s" % (module, token_string)
try: # Attempt to use an existing value
tokens[index][1] = table[0][combined_name]
except KeyError: # Doesn't exist, add it to table
table[0].update({combined_name: result})
tokens[index][1] = result
else:
tokens[index][1] = result
| 5,343,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.