content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def getCommandLine() -> argparse.ArgumentParser:
"""Получить агрументы коммандной строки
Returns:
argparse.ArgumentParser: _description_
"""
parser = argparse.ArgumentParser(description='Конвертация тестов с TFS в xml формат Testrail/TestIT')
action = parser.add_subparsers(dest='action', required=True)
subparser = action.add_parser('to-xml',help='Скачивание и конвертация тестов с TFS в xml',add_help=False)
subparser.add_argument('-i','--id',type=int,required=True,help='ID тест плана')
subparser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Показывает команды для конверта в xml')
subparser.add_argument('--project','-pj',required=True, type=str,help='Название проекта в коллекции')
subparser.add_argument('--collection','-ct',required=True,type=str,help='Название коллекции')
subparser.add_argument('--cert','-c',required=False,type=Path,help='Сертификат для доступа к TFS',default=None)
return parser.parse_args()
| 18,300
|
def listall(context, uri=None):
"""
*musicpd.org, music database section:*
``listall [URI]``
Lists all songs and directories in ``URI``.
"""
result = []
root_path = translator.normalize_path(uri)
# TODO: doesn't the dispatcher._call_handler have enough info to catch
# the error this can produce, set the command and then 'raise'?
try:
uri = context.directory_path_to_uri(root_path)
except MpdNoExistError as e:
e.command = 'listall'
e.message = 'Not found'
raise
browse_futures = [(root_path, context.core.library.browse(uri))]
while browse_futures:
base_path, future = browse_futures.pop()
for ref in future.get():
if ref.type == Ref.DIRECTORY:
path = '/'.join([base_path, ref.name.replace('/', '')])
result.append(('directory', path))
browse_futures.append(
(path, context.core.library.browse(ref.uri)))
elif ref.type == Ref.TRACK:
result.append(('file', ref.uri))
if not result:
raise MpdNoExistError('Not found')
return [('directory', root_path)] + result
| 18,301
|
def send_task_to_executor(task_tuple: TaskInstanceInCelery) \
-> Tuple[TaskInstanceKey, CommandType, Union[AsyncResult, ExceptionWithTraceback]]:
"""Sends task to executor."""
key, _, command, queue, task_to_run = task_tuple
try:
with timeout(seconds=OPERATION_TIMEOUT):
result = task_to_run.apply_async(args=[command], queue=queue)
except Exception as e: # pylint: disable=broad-except
exception_traceback = "Celery Task ID: {}\n{}".format(key, traceback.format_exc())
result = ExceptionWithTraceback(e, exception_traceback)
return key, command, result
| 18,302
|
def createPyarchFilePath(filePath):
"""
This method translates from an ESRF "visitor" path to a "pyarch" path:
/data/visitor/mx415/id14eh1/20100209 -> /data/pyarch/2010/id14eh1/mx415/20100209
"""
pyarchFilePath = None
if isinstance(filePath, str):
filePath = pathlib.Path(filePath)
listOfDirectories = filePath.parts
if UtilsConfig.isEMBL():
if 'p13' in listOfDirectories[0:3] or 'P13' in listOfDirectories[0:3]:
pyarchFilePath = os.path.join('/data/ispyb/p13',
*listOfDirectories[4:])
else:
pyarchFilePath = os.path.join('/data/ispyb/p14',
*listOfDirectories[4:])
return pyarchFilePath
listBeamlines = ['bm30a', 'id14eh1', 'id14eh2', 'id14eh3', 'id14eh4',
'id23eh1', 'id23eh2', 'id29', 'id30a1',
'id30a2', 'id30a3', 'id30b']
# Check that we have at least four levels of directories:
if len(listOfDirectories) > 5:
dataDirectory = listOfDirectories[1]
secondDirectory = listOfDirectories[2]
thirdDirectory = listOfDirectories[3]
fourthDirectory = listOfDirectories[4]
fifthDirectory = listOfDirectories[5]
year = fifthDirectory[0:4]
proposal = None
beamline = None
if dataDirectory == 'data' and secondDirectory == 'gz':
if thirdDirectory == 'visitor':
proposal = fourthDirectory
beamline = fifthDirectory
elif fourthDirectory == 'inhouse':
proposal = fifthDirectory
beamline = thirdDirectory
else:
raise RuntimeError(
'Illegal path for UtilsPath.createPyarchFilePath: ' +
'{0}'.format(filePath))
listOfRemainingDirectories = listOfDirectories[6:]
elif dataDirectory == 'data' and secondDirectory == 'visitor':
proposal = listOfDirectories[3]
beamline = listOfDirectories[4]
listOfRemainingDirectories = listOfDirectories[5:]
elif dataDirectory == 'data' and secondDirectory in listBeamlines:
beamline = secondDirectory
proposal = listOfDirectories[4]
listOfRemainingDirectories = listOfDirectories[5:]
if proposal is not None and beamline is not None:
pyarchFilePath = pathlib.Path('/data/pyarch') / year / beamline
pyarchFilePath = pyarchFilePath / proposal
for directory in listOfRemainingDirectories:
pyarchFilePath = pyarchFilePath / directory
if pyarchFilePath is None:
logger.warning(
'UtilsPath.createPyarchFilePath: path not converted for' +
' pyarch: %s ' % filePath)
else:
pyarchFilePath = pyarchFilePath.as_posix()
return pyarchFilePath
| 18,303
|
def test_pre_create_undeploy_for_blue_green(m_stop, mock_filter_units,
mock_undeploy):
"""
Should undeploy all versions for mode: red-green
"""
# Given: Deployment parameters
deployment = _create_test_deployment_with_defaults_applied()
deployment['deployment']['mode'] = DEPLOYMENT_MODE_BLUEGREEN
# Mock implementation for filter_units
mock_filter_units.return_value = []
# When: I undeploy in pre-create phase
result = _pre_create_undeploy.s(deployment, mock_callback.si())\
.apply_async()
result.get(timeout=1).result
# Then: All versions of application are un-deployed.
mock_undeploy.assert_called_with(ANY, deployment['deployment']['name'],
deployment['deployment']['version'],
exclude_version=None)
m_stop.assert_called_with(
ANY, deployment['deployment']['name'],
version=deployment['deployment']['version'], exclude_version=None)
| 18,304
|
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
| 18,305
|
def geometries_from_bbox(north, south, east, west, tags):
"""
Create a GeoDataFrame of OSM entities within a N, S, E, W bounding box.
Parameters
----------
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
tags : dict
Dict of tags used for finding objects in the selected area. Results
returned are the union, not intersection of each individual tag.
Each result matches at least one given tag. The dict keys should be
OSM tags, (e.g., `building`, `landuse`, `highway`, etc) and the dict
values should be either `True` to retrieve all items with the given
tag, or a string to get a single tag-value combination, or a list of
strings to get multiple values for the given tag. For example,
`tags = {'building': True}` would return all building footprints in
the area. `tags = {'amenity':True, 'landuse':['retail','commercial'],
'highway':'bus_stop'}` would return all amenities, landuse=retail,
landuse=commercial, and highway=bus_stop.
Returns
-------
gdf : geopandas.GeoDataFrame
Notes
-----
You can configure the Overpass server timeout, memory allocation, and
other custom settings via ox.config().
"""
# convert bounding box to a polygon
polygon = utils_geo.bbox_to_poly(north, south, east, west)
# create GeoDataFrame of geometries within this polygon
gdf = geometries_from_polygon(polygon, tags)
return gdf
| 18,306
|
def test():
"""测试用"""
api = TestHlp()
# 读取配置文件
print(api.loadConfig("Hsconfig.ini"))
# 初始化
print(api.init())
# 连接服务器
print(api.connectServer())
| 18,307
|
def dBzdtAnalCircT(a, t, sigma):
"""
Hz component of analytic solution for half-space (Circular-loop source)
Src and Rx are on the surface and receiver is located at the center of the loop.
Src waveform here is step-off.
.. math::
\\frac{\partial h_z}{\partial t} = -\\frac{I}{\mu_0\sigma a^3} \
\left( 3erf(\\theta a) - \\frac{2}{\sqrt{\pi}}\\theta a (3+2\\theta^2 a^2) e^{-\\theta^2a^2}\\right)
.. math::
\\theta = \sqrt{\\frac{\sigma\mu}{4t}}
"""
theta = np.sqrt((sigma*mu_0)/(4*t))
const = -1/(mu_0*sigma*a**3)
ta = theta*a
eta = erf(ta)
t1 = 3*eta
t2 = -2/(np.pi**0.5)*ta*(3+2*ta**2)*np.exp(-ta**2)
dhzdt = const*(t1+t2)
return mu_0*dhzdt
| 18,308
|
def download_files(dir_path: str, urls: List[str]) -> None:
"""
Asynchronous download, decompress files.
:param dir_path:
:param urls:
:return:
"""
os.makedirs(dir_path, exist_ok=True)
sema = asyncio.BoundedSemaphore(5)
async def fetch_file(url):
fname = url.split("/")[-1]
# The website has the 2007 year zip files without the '07' preffix. The dates for other years are still YYMMDD.
if fname.startswith('07'):
fname = fname[2:]
downloaded_date = os.path.basename(fname).split('.')[0]
async with sema, aiohttp.ClientSession() as session:
logger.info("Download %s", url)
async with session.get(url, ssl=False) as resp:
assert resp.status == 200
data = await resp.read()
async with aiofiles.open(str(Path(dir_path, fname)), "wb") as outfile:
await outfile.write(data)
zip_dir_path = str(Path(dir_path, downloaded_date))
if os.path.isdir(zip_dir_path):
shutil.rmtree(zip_dir_path)
os.makedirs(zip_dir_path)
with BytesIO(data) as fin:
with zipfile.ZipFile(fin) as zipfin:
logger.info("Extracting file %s", fname)
for zipinfo in zipfin.infolist():
if zipinfo.filename.endswith('.txt'):
date_filename = os.path.basename(zipinfo.filename)
async with aiofiles.open(str(Path(zip_dir_path, date_filename)), "wb") as outfile:
logger.info("Extracting file %s", date_filename)
with zipfin.open(zipinfo) as file_data:
await outfile.write(file_data.read())
loop = asyncio.get_event_loop()
tasks = [loop.create_task(fetch_file(url)) for url in urls]
loop.run_until_complete(asyncio.wait(tasks))
rename_directories(dir_path)
| 18,309
|
def count_increasing(ratings, n):
"""
Only considering the increasing case
"""
arr = [1] * n
cnt = 1
for i in range(1, n):
cnt = cnt + 1 if ratings[i - 1] < ratings[i] else 1
arr[i] = cnt
return arr
| 18,310
|
def xxxputInHTMLdir(filename = 'test.py'):
""" create a directory for the python file
then put the pythonfile into it
"""
verbose=False
HTMLdir = filename.replace('.py','')
#newDir = makeHTMLdir(HTMLdir)
shutil.copyfile(filename, HTMLdir+'/'+ filename)
if verbose: print('putInHTMLdir\t', filename, 'copied into', HTMLdir)
| 18,311
|
def load_train_data_frame(train_small, target, keras_options, model_options, verbose=0):
"""
### CAUTION: TF2.4 Still cannot load a DataFrame with Nulls in string or categoricals!
############################################################################
#### TF 2.4 still cannot load tensor_slices into ds if an object or string column
#### that has nulls in it! So we need to find other ways to load tensor_slices by
#### first filling dataframe with pandas fillna() function!
#############################################################################
"""
train_small = copy.deepcopy(train_small)
DS_LEN = model_options['DS_LEN']
#### do this for dataframes ##################
try:
batch_size = keras_options["batchsize"]
if isinstance(keras_options["batchsize"], str):
batch_size = find_batch_size(DS_LEN)
except:
#### If it is not given find it here ####
batch_size = find_batch_size(DS_LEN)
######### Modify or Convert column names to fit tensorflow rules of no space in names!
sel_preds = ["_".join(x.split(" ")) for x in list(train_small) ]
#### This can also be a problem with other special characters ###
sel_preds = ["_".join(x.split("(")) for x in sel_preds ]
sel_preds = ["_".join(x.split(")")) for x in sel_preds ]
sel_preds = ["_".join(x.split("/")) for x in sel_preds ]
sel_preds = ["_".join(x.split("\\")) for x in sel_preds ]
sel_preds = ["_".join(x.split("?")) for x in sel_preds ]
sel_preds = [x.lower() for x in sel_preds ]
if isinstance(target, str):
target = "_".join(target.split(" "))
target = "_".join(target.split("("))
target = "_".join(target.split(")"))
target = "_".join(target.split("/"))
target = "_".join(target.split("\\"))
target = "_".join(target.split("?"))
target = target.lower()
model_label = 'Single_Label'
else:
target = ["_".join(x.split(" ")) for x in target ]
target = ["_".join(x.split("(")) for x in target ]
target = ["_".join(x.split(")")) for x in target ]
target = ["_".join(x.split("/")) for x in target ]
target = ["_".join(x.split("\\")) for x in target ]
target = ["_".join(x.split("?")) for x in target ]
target = [x.lower() for x in target ]
model_label = 'Multi_Label'
train_small.columns = sel_preds
print('Alert! Modified column names to satisfy rules for column names in Tensorflow...')
#### if target is changed you must send that modified target back to other processes ######
### usecols is basically target in a list format. Very handy to know when target is a list.
try:
modeltype = model_options["modeltype"]
if model_options["modeltype"] == '':
### usecols is basically target in a list format. Very handy to know when target is a list.
modeltype, model_label, usecols = find_problem_type(train_small, target, model_options, verbose)
else:
if isinstance(target, str):
usecols = [target]
else:
usecols = copy.deepcopy(target)
except:
### if modeltype is given, then do not find the model type using this function
modeltype, model_label, usecols = find_problem_type(train_small, target, model_options, verbose)
### Cat_Vocab_Dict contains all info about vocabulary in each variable and their size
print(' Classifying variables using data sample in pandas...')
train_small, var_df, cat_vocab_dict = classify_features_using_pandas(train_small, target, model_options, verbose=verbose)
########## Just transfer all the values from var_df to cat_vocab_dict ##################################
for each_key in var_df:
cat_vocab_dict[each_key] = var_df[each_key]
############################################################################################################
model_options['modeltype'] = modeltype
model_options['model_label'] = model_label
cat_vocab_dict['target_variables'] = usecols
cat_vocab_dict['modeltype'] = modeltype
model_options['batch_size'] = batch_size
########## Find small details about the data to help create the right model ###
target_transformed = False
if modeltype != 'Regression':
if isinstance(target, str):
#### This is for Single Label Problems ######
if train_small[target].dtype == 'object' or str(train_small[target].dtype).lower() == 'category':
target_transformed = True
target_vocab = train_small[target].unique()
num_classes = len(target_vocab)
else:
if 0 not in np.unique(train_small[target]):
target_transformed = True ### label encoding must be done since no zero class!
target_vocab = train_small[target].unique()
num_classes = len(train_small[target].value_counts())
elif isinstance(target, list):
#### This is for Multi-Label Problems #######
copy_target = copy.deepcopy(target)
num_classes = []
for each_target in copy_target:
if train_small[target[0]].dtype == 'object' or str(train_small[target[0]].dtype).lower() == 'category':
target_transformed = True
target_vocab = train_small[target].unique().tolist()
num_classes_each = len(target_vocab)
else:
if 0 not in np.unique(train_small[target[0]]):
target_transformed = True ### label encoding must be done since no zero class!
target_vocab = train_small[target[0]].unique()
num_classes_each = train_small[target].apply(np.unique).apply(len).max()
num_classes.append(int(num_classes_each))
else:
num_classes = 1
target_vocab = []
########### find the number of labels in data ####
if isinstance(target, str):
num_labels = 1
elif isinstance(target, list):
if len(target) == 1:
num_labels = 1
else:
num_labels = len(target)
#### This is where we set the model_options for num_classes and num_labels #########
model_options['num_labels'] = num_labels
model_options['num_classes'] = num_classes
cat_vocab_dict['num_labels'] = num_labels
cat_vocab_dict['num_classes'] = num_classes
cat_vocab_dict["target_transformed"] = target_transformed
#### fill missing values using this function ##############
train_small = fill_missing_values_for_TF2(train_small, cat_vocab_dict)
##### Do the deletion of cols after filling with missing values since otherwise fill errors!
drop_cols = var_df['cols_delete']
cat_vocab_dict['columns_deleted'] = drop_cols
if len(drop_cols) > 0: ### drop cols that have been identified for deletion ###
print(' Dropping %s columns marked for deletion...' %drop_cols)
train_small.drop(drop_cols,axis=1,inplace=True)
######### Now load the train Dataframe into a tf.data.dataset #############
if target_transformed:
####################### T R A N S F O R M I N G T A R G E T ########################
train_small[target], cat_vocab_dict = transform_train_target(train_small, target, modeltype,
model_label, cat_vocab_dict)
if isinstance(target, str):
#### For single label do this: labels can be without names since there is only one label
if target != '':
labels = train_small[target]
features = train_small.drop(target, axis=1)
ds = tf.data.Dataset.from_tensor_slices((dict(features), labels))
else:
print('target variable is blank - please fix input and try again')
return
elif isinstance(target, list):
#### For multi label do this: labels must be dict and hence with names since there are many targets
labels = train_small[target]
features = train_small.drop(target, axis=1)
ds = tf.data.Dataset.from_tensor_slices((dict(features), dict(labels)))
else:
ds = tf.data.Dataset.from_tensor_slices(dict(train_small))
###### Now save some defaults in cat_vocab_dict ##########################
try:
keras_options["batchsize"] = batch_size
cat_vocab_dict['batch_size'] = batch_size
except:
batch_size = find_batch_size(DS_LEN)
keras_options["batchsize"] = batch_size
cat_vocab_dict['batch_size'] = batch_size
##########################################################################
#### C H E C K F O R I N F I N I T E V A L U E S H E R E ##########
##########################################################################
cols_with_infinity = find_columns_with_infinity(train_small)
if cols_with_infinity:
train_small = drop_rows_with_infinity(train_small, cols_with_infinity, fill_value=True)
return train_small, ds, var_df, cat_vocab_dict, keras_options, model_options
| 18,312
|
def stat_float_times(space, newval=-1):
"""stat_float_times([newval]) -> oldval
Determine whether os.[lf]stat represents time stamps as float objects.
If newval is True, future calls to stat() return floats, if it is False,
future calls return ints.
If newval is omitted, return the current setting.
"""
state = space.fromcache(StatState)
if newval == -1:
return space.newbool(state.stat_float_times)
else:
state.stat_float_times = (newval != 0)
| 18,313
|
def decorate(rvecs):
"""Output range vectors into some desired string format"""
return ', '.join(['{%s}' % ','.join([str(x) for x in rvec]) for rvec in rvecs])
| 18,314
|
def test_atomic_g_year_max_exclusive_4_nistxml_sv_iv_atomic_g_year_max_exclusive_5_5(mode, save_output, output_format):
"""
Type atomic/gYear is restricted by facet maxExclusive with value 2030.
"""
assert_bindings(
schema="nistData/atomic/gYear/Schema+Instance/NISTSchema-SV-IV-atomic-gYear-maxExclusive-5.xsd",
instance="nistData/atomic/gYear/Schema+Instance/NISTXML-SV-IV-atomic-gYear-maxExclusive-5-5.xml",
class_name="NistschemaSvIvAtomicGYearMaxExclusive5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 18,315
|
def update_topic_collection_items(request_ctx, collection_item_id, topic_id, **request_kwargs):
"""
Accepts the same parameters as create
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param collection_item_id: (required) ID
:type collection_item_id: string
:param topic_id: (required) ID
:type topic_id: string
:return: Update a topic
:rtype: requests.Response (with void data)
"""
path = '/v1/collection_items/{collection_item_id}/discussion_topics/{topic_id}'
url = request_ctx.base_api_url + path.format(collection_item_id=collection_item_id, topic_id=topic_id)
response = client.put(request_ctx, url, **request_kwargs)
return response
| 18,316
|
def test_graph_difference_more_non_isolated_relations_source(stub_graph_set, stub_relation_set):
""" Tests graph_difference returns graph with expected nodes if source graph has non-isolated
nodes which are not present in target catalog """
_, vals = stub_graph_set
common_relation = Relation(name=rand_string(10), **stub_relation_set.rand_relation_helper())
source_catalog = [common_relation,
vals.downstream_relation,
vals.upstream_relation,
vals.birelation_right]
target_catalog = {common_relation,
Relation(name=rand_string(10), **stub_relation_set.rand_relation_helper()),
Relation(name=rand_string(10), **stub_relation_set.rand_relation_helper()),
vals.birelation_left,
vals.birelation_right}
config_dict = copy.deepcopy(BASIC_CONFIGURATION)
config_dict["source"]["specified_relations"] = [
{
"database": vals.downstream_relation.database,
"schema": vals.downstream_relation.schema,
"relation": vals.downstream_relation.name,
"relationships": {
"directional": [
{
"local_attribute": vals.directional_key,
"database": ".*",
"schema": ".*",
"relation": ".*relation.*$",
"remote_attribute": vals.directional_key
}
]
}
}
]
config = ConfigurationParser().from_file_or_path(StringIO(yaml.dump(config_dict)))
shgraph = SnowShuGraph()
with mock.MagicMock() as adapter_mock:
adapter_mock.build_catalog.return_value = source_catalog
config.source_profile.adapter = adapter_mock
shgraph.build_graph(config)
expected_nodes = source_catalog[1:] # all non-isolated
actual = SnowShuGraph.catalog_difference(shgraph, target_catalog)
assert list(actual.nodes) == expected_nodes
| 18,317
|
def initialize_client(conn):
"""
called when new client connect. if new connected client is not the first connected
client, the send the initial weights to
the new connected client
:param conn:
"""
if detailed_output:
print("connected clients: ", len(connectedclients))
if len(connectedclients) == 1:
msg = 0
else:
if client_weights_available:
initial_weights = client_weights
#initial_weights = client.state_dict()
msg = initial_weights
print("init_weights")
else:
msg = 0
send_request(conn, 0, msg)
| 18,318
|
def upsample(inputs, factor=(2, 2), interpolation='nearest'):
"""
Upsampling layer by factor
Parameters
----------
inputs: Input tensor
factor: The upsampling factors for (height, width). One integer or tuple of
two integers
interpolation: A string, one of [`nearest`, `bilinear`, 'bicubic', 'area'].
"""
# get new_size
_, height, width, _ = inputs.get_shape().as_list()
factor = _make_pair(factor)
new_height = height * factor[0]
new_width = width * factor[1]
new_size = (new_height, new_width)
# get interpolation type
interp_types = {
'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR,
'bilinear': tf.image.ResizeMethod.BILINEAR,
'bicubic': tf.image.ResizeMethod.BICUBIC,
'area': tf.image.ResizeMethod.AREA,
}
if interpolation not in interp_types.keys():
raise ValueError("interpolation must be one of "
"['nearest', 'bilinear', 'bicubic', 'area']")
interp_type = interp_types.get(interpolation)
return tf.image.resize_images(inputs, size=new_size, method=interp_type)
| 18,319
|
def write_cmake_var_file(base_path: Path, file_paths: list[str]):
"""Write CMake script of source files.
Args:
base_dir (Path): Base directory.
file_paths (list[str]): Paths of source files.
"""
source_list_cmake_path = base_path / SOURCE_LIST_CMAKE_SUFFIX
with open(
str(source_list_cmake_path), mode="w", encoding="ascii", newline="\n"
) as file:
file.write("set(SOURCE_FILES\n")
for source_file in file_paths:
file.write(f" {source_file}\n")
file.write(")\n")
| 18,320
|
def root_mean_squared_error(*args, **kwargs):
"""
Returns the square-root of ``scikit-learn``'s ``mean_squared_error`` metric.
All arguments are forwarded to that function.
"""
return np.sqrt(mean_squared_error(*args, **kwargs))
| 18,321
|
def launch_process(simulation, episode, epsilon, mode, return_dict):
"""
Method to launch the simulation depending on the simulation, episode, epsilon and mode.
"""
simulation.run(episode, epsilon)
return_dict[mode] = simulation.stop()
| 18,322
|
def idwt2(Wimg, level=4):
""" inverse 2d wavelet transform
:param Wimg: 2d array
wavelet coefficients
:param level: int
level of wavelet transform - image shape has to be multiples of 2**level
:return: 2d array
image
"""
coeffs = _from_img_to_coeffs(Wimg, levels=level)
return pywt.waverec2(coeffs, wavelet='db4', mode='per')
| 18,323
|
def is_volume_encryption_enabled(audit_options):
"""Validate volume encryption is enabled in Cinder.
Security Guide Check Name: Check-Block-09
:param audit_options: Dictionary of options for audit configuration
:type audit_options: Dict
:raises: AssertionError if the assertion fails.
"""
key_manager = audit_options['cinder-conf']['key_manager']
assert key_manager.get('backend') is not None, \
"key_manager.backend should be set"
| 18,324
|
def get_color(card):
"""Returns the card's color
Args:
card (webelement): a visible card
Returns:
str: card's color
"""
color = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][2]").get_attribute("stroke")
# both light and dark theme
if (color == "#ff0101" or color == "#ffb047"):
color = "red"
elif (color == "#800080" or color == "#ff47ff"):
color = "purple"
else:
color = "green"
return color
| 18,325
|
def test_plugin_manager_duplicate_priorities():
""" Make sure that the plugin manager raises an exception when multiple
different plugins define the same operator
"""
# Note, the plugin manager does not currently compare to make sure that the
# two definitions are different before raising an error, so we can just
# pass two instances of the same plugin in order to induce the error
mgr = PluginManager(
plugins=[PluginForTesting(), PluginForTesting()],
load_package_plugins=False)
with pytest.raises(Exception):
print(mgr.operators({
'type': 'java',
'name': 'my-operator',
'properties': {'main_class': 'org.testing.MyTestClass'},
}).operator_class)
| 18,326
|
def setup_box():
"""
Will install python and all libs needed to set up this box to run the
examjam code. Eventually this needs to be more RPM based
"""
#place_pub_key()
install_box_libraries()
install_python()
install_easy_install()
install_python_dependancies()
| 18,327
|
def is_valid_path(parameters: Dict[str, Any]) -> bool:
"""Single "." chars and empty strings "" are excluded from path by urllib3.
A path containing to "/" or "%2F" will lead to ambiguous path resolution in
many frameworks and libraries, such behaviour have been observed in both
WSGI and ASGI applications.
In this case one variable in the path template will be empty, which will lead to 404 in most of the cases.
Because of it this case doesn't bring much value and might lead to false positives results of Schemathesis runs.
"""
path_parameter_blacklist = (".", SLASH, "")
return not any(
(value in path_parameter_blacklist or is_illegal_surrogate(value) or isinstance(value, str) and SLASH in value)
for value in parameters.values()
)
| 18,328
|
def to_tensor(x):
"""
Arguments:
x: an instance of PIL image.
Returns:
a float tensor with shape [3, h, w],
it represents a RGB image with
pixel values in [0, 1] range.
"""
x = np.array(x)
x = torch.FloatTensor(x)
return x.permute(2, 0, 1).unsqueeze(0).div(255.0)
| 18,329
|
def load_messier_catalog_images(path, img_size=None, disable_tqdm=False):
"""
Data loader for Messier catalog images. The images are available
in `messier-catalog-images` repository of MiraPy organisation.
:param path: String. Directory path.
:param img_size: Final dimensions of the image.
:param disable_tqdm: Boolean. Set True to disable progress bar.
:return: Array of images.
"""
images = []
for filename in tqdm(os.listdir(path), disable=disable_tqdm):
filepath = os.path.join(path, filename)
img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
img = img/img.max()
img = img * 255.
if img_size:
img = cv2.resize(img, img_size)
images.append(np.array(img))
return np.array(images)
| 18,330
|
def load_stl10(data_dir, flatten=False, one_hot=True, normalize_range=False,
whiten_pixels=True, border_pad_size=0):
"""
Large part of this loader and the associated functions has been inspired from,
https://github.com/mttk/STL10/blob/master/stl10_input.py
"""
# path to the binary train file with image data
train_img_path = os.path.join(data_dir,'stl10_binary','train_X.bin')
# path to the binary train file with labels
train_label_path = os.path.join(data_dir,'stl10_binary','train_y.bin')
# path to the binary test file with image data
test_img_path = os.path.join(data_dir,'stl10_binary','test_X.bin')
# path to the binary test file with labels
test_label_path = os.path.join(data_dir,'stl10_binary','test_y.bin')
download_and_extract(data_dir)
# test to check if the whole dataset is read correctly
images_train = read_all_images(train_img_path)
print("Training images",images_train.shape)
labels_train = read_labels(train_label_path)
print("Training labels",labels_train.shape)
images_test = read_all_images(test_img_path)
print("Test images",images_test.shape)
labels_test = read_labels(test_label_path)
print("Test labels",labels_test.shape)
Xtrain = images_train.astype(np.float32) / 255.0
ytrain = labels_train
for i in range(len(ytrain)) :
ytrain[i] -= 1
split = int(np.floor(0.9 * Xtrain.shape[0]))
Xval = Xtrain[split:Xtrain.shape[0]]
yval = ytrain[split:Xtrain.shape[0]]
Xtrain = Xtrain[:split]
ytrain = ytrain[:split]
Xtest, ytest = images_test.astype(np.float32) / 255.0, labels_test
for i in range(len(ytest)) :
ytest[i] -= 1
if flatten :
print("Flatten Not Supported")
if normalize_range :
print("Normalize Range Not Supported")
if one_hot:
print("Train Shapes before one hot encoding ",Xtrain.shape, ytrain.shape)
ytest = idx_to_onehot(ytest, 10).astype(np.float32)
ytrain = idx_to_onehot(ytrain, 10).astype(np.float32)
yval = idx_to_onehot(yval, 10).astype(np.float32)
print("Train Shapes after one hot encoding",Xtrain.shape, ytrain.shape)
if whiten_pixels:
mean = Xtrain.mean(axis=0)[None, :]
std = Xtrain.std(axis=0)[None, :]
print("Other mean/std", mean.shape, std.shape)
Xtrain = (Xtrain - mean) / std
Xval = (Xval - mean) / std
Xtest = (Xtest - mean) / std
# NOTE: the zero padding is done after the potential whitening
if border_pad_size > 0:
Xtrain = zero_pad_border(Xtrain, border_pad_size)
Xval = zero_pad_border(Xval, border_pad_size)
Xtest = zero_pad_border(Xtest, border_pad_size)
return (Xtrain, ytrain, Xval, yval, Xtest, ytest)
| 18,331
|
def fetch_data(fold_path):
"""Fetch data saving in fold path.
Convert data into suitable format, using csv files in fold path.
:param fold_path: String. The fold in which data files are saved.
:return:
training_data: Dataframe. Combined dataframe to create training data.
testing_data: Dataframe. Combined dataframe to create testing data.
"""
# Read all the data from target fold path.
pokemon = pd.read_csv(fold_path+'/pokemon.csv')
combats = pd.read_csv(fold_path+'/combats.csv')
test_data = pd.read_csv(fold_path+'/tests.csv')
# Convert data into suitable format for training and testing.
training_data = convert_data(combats, pokemon, win_column='Winner')
testing_data = convert_data(test_data, pokemon)
return training_data, testing_data
| 18,332
|
def add_chr_prefix(band):
"""
Return the band string with chr prefixed
"""
return ''.join(['chr', band])
| 18,333
|
def group_by_distance(iterable, distance=1):
"""group integers into non-overlapping intervals that
are at most *distance* apart.
>>> list( group_by_distance( (1,1,2,4,5,7) ) )
[(1, 3), (4, 6), (7, 8)]
>>> list( group_by_distance( [] ) )
[]
>>> list( group_by_distance( [3] ) )
[(3, 4)]
>>> list( group_by_distance( [3,2] ) )
Traceback (most recent call last):
...
ValueError: iterable is not sorted: 2 < 3
.. note::
This snippet was downloaded from an unknown source.
"""
i = iter(iterable)
end = None
start = end = cur = i.next()
for cur in i:
if cur < end:
raise ValueError("iterable is not sorted: %i < %i" % (cur, end))
if cur - end > distance:
yield (start, end + 1)
start = cur
end = cur
yield (start, end + 1)
| 18,334
|
def disable_text_recog_aug_test(cfg, set_types=None):
"""Remove aug_test from test pipeline of text recognition.
Args:
cfg (mmcv.Config): Input config.
set_types (list[str]): Type of dataset source. Should be
None or sublist of ['test', 'val']
Returns:
cfg (mmcv.Config): Output config removing
`MultiRotateAugOCR` in test pipeline.
"""
assert set_types is None or isinstance(set_types, list)
if set_types is None:
set_types = ['val', 'test']
for set_type in set_types:
if cfg.data[set_type].pipeline[1].type == 'MultiRotateAugOCR':
cfg.data[set_type].pipeline = [
cfg.data[set_type].pipeline[0],
*cfg.data[set_type].pipeline[1].transforms
]
return cfg
| 18,335
|
def layer_svg(svg_bottom, svg_top, offset: list = [0.0, 0.0]):
"""
Adds one SVG over another. Modifies the bottom SVG in place.
:param svg_bottom: The bottom SVG, in in xml.etree.ElementTree form
:param svg_top: The top SVG, in in xml.etree.ElementTree form
:param offset: How far to offset the top SVG elements
"""
if svg_top is None:
return
# print(svg_top.tag)
for child in list(svg_top):
apply_offset(child, offset, offset_children=True)
svg_bottom.append(child)
return svg_bottom
| 18,336
|
def get_data_home(data_home=None):
"""Return a path to the cache directory for example datasets.
This directory is then used by :func:`load_dataset`.
If the ``data_home`` argument is not specified, it tries to read from the
``CF_DATA`` environment variable and defaults to ``~/cf-data``.
"""
if data_home is None:
data_home = os.environ.get("CF_DATA", os.path.join("~", "cf-data"))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
| 18,337
|
def main(username,password):
""" Opens FacebookClient and chats with random person """
# Setup
fb = FacebookClient(username,password)
chatbot = cleverbot3.Session()
# choose random "friend" to chat with
chat_list = fb.browser.find_element(By.CSS_SELECTOR,"ul.fbChatOrderedList.clearfix")
items = chat_list.find_elements(By.TAG_NAME,"li")
n = random.randint(0,len(items)-1)
items[n].find_element(By.TAG_NAME,"a").click()
# initiate coversation
chat_tabs_pagelet = fb.browser.find_element(By.ID,"ChatTabsPagelet")
textarea = chat_tabs_pagelet.find_element(By.TAG_NAME,"textarea")
textarea.send_keys("Hi!")
textarea.send_keys(Keys.RETURN)
# track conversation for 5 minutes
conversation_div = fb.browser.find_element(By.CSS_SELECTOR,"div.conversation")
conversation = conversation_div.find_element(By.CSS_SELECTOR,"div").text
current = conversation.split('\n')
start = time.time()
# trolling
while (time.time() - start) < 300:
conversation_div = fb.browser.find_element(By.CSS_SELECTOR,"div.conversation")
conversation = conversation_div.find_element(By.CSS_SELECTOR,"div").text
lines = conversation.split('\n')
# check for changes in conversation
if len(lines) != len(current):
new_lines = lines[len(current):len(lines)-1]
question = ""
for new_line in new_lines:
question += new_line + " "
print(question)
answer = chatbot.Ask(question)
textarea.send_keys(answer)
textarea.send_keys(Keys.RETURN)
conversation_div = fb.browser.find_element(By.CSS_SELECTOR,"div.conversation")
conversation = conversation_div.find_element(By.CSS_SELECTOR,"div").text
current = conversation.split('\n')
time.sleep(10)
# end conversation
textarea.send_keys("I g2g! Peace out g fowg!")
textarea.send_keys(Keys.RETURN)
# close browser
fb.destroy()
| 18,338
|
def utxo_cmd(ctx, dry_run):
"""Get the node's current UTxO with the option of filtering by address(es)"""
try:
CardanoCli.execute(cmd=["cardano-cli", "query", "utxo"], dry_run=dry_run, include_network=True)
except CardanoPyError as cpe:
ctx.fail(cpe.message)
return cpe.return_code
| 18,339
|
def intronator(exons):
"""
Builds introns from pairs of Exon objects
in >exons<.
sort_attr is used for ordering of introns.
"""
def _window(seq):
"""
Taken from https://docs.python.org/release/2.3.5/lib/itertools-example.html
Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
n = 2
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
exons = sorted_in_coding_direction(exons)
# accumulate lengths of children to calculate intron phases
# and relative positions at each index
exon_lengths = [e.length for e in exons]
child_cumsum = np.array(exon_lengths)[:-1].cumsum()
frac_positions = ((child_cumsum / sum(exon_lengths)) * 100).round(3)
for index, pair in enumerate(_window(exons)):
# edge case where exons might overlap in same transcript/gene;
# causes problems downstream but parsing later requires more
# work and it's rare enough that warning user should suffice
pair_coords = [(p.start, p.stop) for p in pair]
if overlap_check(*pair_coords):
overlap_log(pair)
# don't create intron from overlapping features
continue
new_intron = Intron.from_exon_pair(*pair)
# Record up- and downstream exon names in case needed
# for annotation file modification later
us_ex, ds_ex = [ex.name for ex in pair]
new_intron.upstream_exon = us_ex
new_intron.downstream_exon = ds_ex
new_intron.fractional_position = frac_positions[index]
yield new_intron
| 18,340
|
def parse_file(ifile, ofile, sra_data, out_dir, verbose=False):
"""
Parse the file and create a new metadata file.
Writes everything to the directory SRA_Submission, including a set of fasta files,
one per biosample.
"""
cols = {'bioproject_accession' : 0, 'biosample_accession' : 1, 'library_ID' : 2,
'title' : 3, 'library_strategy' : 4, 'library_source' : 5, 'library_selection' : 6,
'library_layout' : 7, 'platform' : 8, 'instrument_model' : 9, 'design_description' : 10,
'filetype' : 11, 'filename' : 12, 'filename2' : 13,
'filename3' : 14, 'filename4' : 15, 'assembly' : 16}
if os.path.exists(out_dir):
sys.stderr.write(f"{bcolors.FAIL}ERROR: {out_dir} exists. Not overwriting\n")
sys.exit(-1)
os.mkdir(out_dir)
volume = {}
linecount = 1
filecount = 0
if ".tsv" in ofile:
ofile = ofile.replace(".tsv", "")
out = open(f"{out_dir}/{ofile}.{filecount}.tsv", 'w')
out.write("bioproject_accession\tbiosample_accession\tlibrary_ID\ttitle\tlibrary_strategy\tlibrary_source\tlibrary_selection\tlibrary_layout\tplatform\tinstrument_model\tdesign_description\tfiletype\tfilename\tfilename2\tfilename3\tfilename4\tassembly\n")
os.mkdir(os.path.join(out_dir, str(filecount)))
increment_filecount = False
with open(ifile, 'r') as f:
for l in f:
if l.startswith("BioProject_Accession"):
continue
# this is to ensure that we write the fasta sequences to the correct subdir
# for sequences that we are not processing further
if increment_filecount:
filecount+=1
increment_filecount = False
out.close()
out = open(f"{out_dir}/{ofile}.{filecount}.tsv", 'w')
out.write("bioproject_accession\tbiosample_accession\tlibrary_ID\ttitle\tlibrary_strategy\tlibrary_source\tlibrary_selection\tlibrary_layout\tplatform\tinstrument_model\tdesign_description\tfiletype\tfilename\tfilename2\tfilename3\tfilename4\tassembly\n")
os.mkdir(os.path.join(out_dir, str(filecount)))
p = l.rstrip("\n").split("\t")
# important columns:
# bioproject_accession: 0
# biosample_accession: 1
# sequence_ID: 2
# sample name: 3
# title: 188
# src: 200
# sra_id: 199
# sequence: 234
# write the sequence out
# note that we do this before we process the line because we need
# all sequences, but we don't process all of them if we have already
# seen the biosample ID before
subdir = str(filecount)
if p[1] in volume:
subdir = str(volume[p[1]])
fao = open(os.path.join(out_dir, subdir, f"{p[1]}.fasta"), 'a')
fao.write(f">{p[2]}\n{p[234]}\n")
fao.close()
if p[1] in volume:
continue
volume[p[1]] = filecount
linecount+=1
if linecount > 999:
linecount = 1
increment_filecount = True
row = [p[0], p[1], p[3], p[188], "", "", "", "", "", "", "", "fastq", f"{p[1]}.fasta", "", "", "", ""]
if 'SRA' == p[200]:
if not p[199]:
sys.stderr.write(f"{bcolors.FAIL}FATAL: for {p[2]} src is SRA but there is no SRA ID{bcolors.ENDC}\n")
sys.exit(-1)
if p[199] not in sra_data:
sys.stderr.write(f"{bcolors.FAIL}FATAL: for {p[2]} SRA ID {p[199]} is not in the sra data{bcolors.ENDC}\n")
sys.exit(-1)
for k in ['library_strategy', 'library_source', 'library_selection', 'library_layout', 'platform', 'instrument_model']:
if k not in sra_data[p[199]]:
sys.stderr.write(f"{bcolors.FAIL}FATAL: {k} not in SRA data for {p[199]}\n")
continue
row[cols[k]] = sra_data[p[199]][k]
row[cols['design_description']] = f"Extracted from SRA run {p[199]} using Gretel version 0.0.8 (https://github.com/SamStudio8/gretel)"
else:
for k in our_data:
row[cols[k]] = our_data[k]
row[cols['design_description']] = f"PCR amplified from a raw environmental sample and seqeunced using Sanger sequencing. "
out.write("{}\n".format("\t".join(row)))
| 18,341
|
def delta_in_ms(delta):
"""
Convert a timedelta object to milliseconds.
"""
return delta.seconds*1000.0+delta.microseconds/1000.0
| 18,342
|
def test_dbus_getall_oom(dev, apdev):
"""D-Bus GetAll wpa_config_get_all() OOM"""
(bus,wpas_obj,path,if_obj) = prepare_dbus(dev[0])
id = dev[0].add_network()
dev[0].set_network(id, "disabled", "0")
dev[0].set_network_quoted(id, "ssid", "test")
res = if_obj.Get(WPAS_DBUS_IFACE, 'Networks',
dbus_interface=dbus.PROPERTIES_IFACE)
if len(res) != 1:
raise Exception("Missing Networks entry: " + str(res))
net_obj = bus.get_object(WPAS_DBUS_SERVICE, res[0])
for i in range(1, 50):
with alloc_fail(dev[0], i, "wpa_config_get_all"):
try:
props = net_obj.GetAll(WPAS_DBUS_NETWORK,
dbus_interface=dbus.PROPERTIES_IFACE)
except dbus.exceptions.DBusException as e:
pass
| 18,343
|
def getNodes(pattern="*", scene=None, useLists=False):
"""Return a dictionary of nodes where the name or id matches the ``pattern``.
By default, ``pattern`` is a wildcard and it returns all nodes associated
with ``slicer.mrmlScene``.
If multiple node share the same name, using ``useLists=False`` (default behavior)
returns only the last node with that name. If ``useLists=True``, it returns
a dictionary of lists of nodes.
"""
import slicer, collections, fnmatch
nodes = collections.OrderedDict()
if scene is None:
scene = slicer.mrmlScene
count = scene.GetNumberOfNodes()
for idx in range(count):
node = scene.GetNthNode(idx)
name = node.GetName()
id = node.GetID()
if (fnmatch.fnmatchcase(name, pattern) or
fnmatch.fnmatchcase(id, pattern)):
if useLists:
nodes.setdefault(node.GetName(), []).append(node)
else:
nodes[node.GetName()] = node
return nodes
| 18,344
|
def get_cls(
query: Union[None, str, Type[X]],
base: Type[X],
lookup_dict: Mapping[str, Type[X]],
lookup_dict_synonyms: Optional[Mapping[str, Type[X]]] = None,
default: Optional[Type[X]] = None,
suffix: Optional[str] = None,
) -> Type[X]:
"""Get a class by string, default, or implementation."""
if query is None:
if default is None:
raise ValueError(f'No default {base.__name__} set')
return default
elif not isinstance(query, (str, type)):
raise TypeError(f'Invalid {base.__name__} type: {type(query)} - {query}')
elif isinstance(query, str):
key = normalize_string(query, suffix=suffix)
if key in lookup_dict:
return lookup_dict[key]
if lookup_dict_synonyms is not None and key in lookup_dict_synonyms:
return lookup_dict_synonyms[key]
raise ValueError(f'Invalid {base.__name__} name: {query}')
elif issubclass(query, base):
return query
raise TypeError(f'Not subclass of {base.__name__}: {query}')
| 18,345
|
def selection_sort(data):
"""Sort a list of unique numbers in ascending order using selection sort. O(n^2).
The process includes repeatedly iterating through a list, finding the smallest element, and sorting that element.
Args:
data: data to sort (list of int)
Returns:
sorted list
"""
sorted_data = data[:]
for i, value in enumerate(sorted_data):
# find smallest value in unsorted subset
min_value = min(sorted_data[i:])
index_min = sorted_data.index(min_value)
# place smallest value at start of unsorted subset
sorted_data[i], sorted_data[index_min] = min_value, value
return sorted_data
| 18,346
|
def _conv(args, filter_size, num_features, bias, reuse, w_init=None, b_init=0.0, scope='_conv'):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D
batch x n, Tensors.
filter_size: int tuple of filter height and width.
reuse: None/True, whether to reuse variables
w_init: weights initializer object
b_init: a `int`, bias initializer value
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3, 4, 5]:
raise ValueError("Conv Linear expects 3D, 4D or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args to be of same Dimensiton: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = tf.nn.conv1d
strides = 1
elif shape_length == 4:
conv_op = tf.nn.conv2d
strides = shape_length * [1]
elif shape_length == 5:
conv_op = tf.nn.conv3d
strides = shape_length * [1]
# Now the computation.
with tf.variable_scope(scope, reuse=reuse):
kernel = tf.get_variable(
"W", filter_size + [total_arg_size_depth, num_features], dtype=dtype, initializer=w_init)
if len(args) == 1:
res = conv_op(args[0], kernel, strides, padding='SAME')
else:
res = conv_op(tf.concat(axis=shape_length - 1, values=args), kernel, strides, padding='SAME')
if not bias:
return res
bias_term = tf.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=tf.constant_initializer(b_init, dtype=dtype))
return res + bias_term
| 18,347
|
def make_bright_star_mask_in_hp(nside, pixnum, verbose=True, gaiaepoch=2015.5,
maglim=12., matchrad=1., maskepoch=2023.0):
"""Make a bright star mask in a HEALPixel using Tycho, Gaia and URAT.
Parameters
----------
nside : :class:`int`
(NESTED) HEALPixel nside.
pixnum : :class:`int`
A single HEALPixel number.
verbose : :class:`bool`
If ``True`` then log informational messages.
Returns
-------
:class:`recarray`
The bright star mask in the form of `maskdatamodel.dtype`.
Notes
-----
- Runs in a a minute or so for a typical nside=4 pixel.
- See :func:`~desitarget.brightmask.make_bright_star_mask` for
descriptions of the output mask and the other input parameters.
"""
# ADM start the clock.
t0 = time()
# ADM read in the Tycho files.
tychofns = find_tycho_files_hp(nside, pixnum, neighbors=False)
tychoobjs = []
for fn in tychofns:
tychoobjs.append(fitsio.read(fn, ext='TYCHOHPX'))
tychoobjs = np.concatenate(tychoobjs)
# ADM create the Tycho reference magnitude, which is VT then HP
# ADM then BT in order of preference.
tychomag = tychoobjs["MAG_VT"].copy()
tychomag[tychomag == 0] = tychoobjs["MAG_HP"][tychomag == 0]
tychomag[tychomag == 0] = tychoobjs["MAG_BT"][tychomag == 0]
# ADM discard any Tycho objects below the input magnitude limit
# ADM and outside of the HEALPixels of interest.
theta, phi = np.radians(90-tychoobjs["DEC"]), np.radians(tychoobjs["RA"])
tychohpx = hp.ang2pix(nside, theta, phi, nest=True)
ii = (tychohpx == pixnum) & (tychomag < maglim)
tychomag, tychoobjs = tychomag[ii], tychoobjs[ii]
if verbose:
log.info('Read {} (mag < {}) Tycho objects (pix={})...t={:.1f} mins'.
format(np.sum(ii), maglim, pixnum, (time()-t0)/60))
# ADM read in the associated Gaia files. Also grab
# ADM neighboring pixels to prevent edge effects.
gaiafns = find_gaia_files(tychoobjs, neighbors=True)
gaiaobjs = []
cols = 'SOURCE_ID', 'RA', 'DEC', 'PHOT_G_MEAN_MAG', 'PMRA', 'PMDEC'
for fn in gaiafns:
if os.path.exists(fn):
gaiaobjs.append(fitsio.read(fn, ext='GAIAHPX', columns=cols))
gaiaobjs = np.concatenate(gaiaobjs)
gaiaobjs = rfn.rename_fields(gaiaobjs, {"SOURCE_ID": "REF_ID"})
# ADM limit Gaia objects to 3 magnitudes fainter than the passed
# ADM limit. This leaves some (!) leeway when matching to Tycho.
gaiaobjs = gaiaobjs[gaiaobjs['PHOT_G_MEAN_MAG'] < maglim + 3]
if verbose:
log.info('Read {} (G < {}) Gaia sources (pix={})...t={:.1f} mins'.format(
len(gaiaobjs), maglim+3, pixnum, (time()-t0)/60))
# ADM substitute URAT where Gaia proper motions don't exist.
ii = ((np.isnan(gaiaobjs["PMRA"]) | (gaiaobjs["PMRA"] == 0)) &
(np.isnan(gaiaobjs["PMDEC"]) | (gaiaobjs["PMDEC"] == 0)))
if verbose:
log.info('Add URAT for {} Gaia objs with no PMs (pix={})...t={:.1f} mins'
.format(np.sum(ii), pixnum, (time()-t0)/60))
urat = add_urat_pms(gaiaobjs[ii], numproc=1)
if verbose:
log.info('Found an additional {} URAT objects (pix={})...t={:.1f} mins'
.format(np.sum(urat["URAT_ID"] != -1), pixnum, (time()-t0)/60))
for col in "PMRA", "PMDEC":
gaiaobjs[col][ii] = urat[col]
# ADM need to track the URATID to track which objects have
# ADM substituted proper motions.
uratid = np.zeros_like(gaiaobjs["REF_ID"])-1
uratid[ii] = urat["URAT_ID"]
# ADM match to remove Tycho objects already in Gaia. Prefer the more
# ADM accurate Gaia proper motions. Note, however, that Tycho epochs
# ADM can differ from the mean (1991.5) by as as much as 0.86 years,
# ADM so a star with a proper motion as large as Barnard's Star
# ADM (10.3 arcsec) can be off by a significant margin (~10").
margin = 10.
ra, dec = rewind_coords(gaiaobjs["RA"], gaiaobjs["DEC"],
gaiaobjs["PMRA"], gaiaobjs["PMDEC"],
epochnow=gaiaepoch)
# ADM match Gaia to Tycho with a suitable margin.
if verbose:
log.info('Match Gaia to Tycho with margin={}" (pix={})...t={:.1f} mins'
.format(margin, pixnum, (time()-t0)/60))
igaia, itycho = radec_match_to([ra, dec],
[tychoobjs["RA"], tychoobjs["DEC"]],
sep=margin, radec=True)
if verbose:
log.info('{} matches. Refining at 1" (pix={})...t={:.1f} mins'.format(
len(itycho), pixnum, (time()-t0)/60))
# ADM match Gaia to Tycho at the more exact reference epoch.
epoch_ra = tychoobjs[itycho]["EPOCH_RA"]
epoch_dec = tychoobjs[itycho]["EPOCH_DEC"]
# ADM some of the Tycho epochs aren't populated.
epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
ra, dec = rewind_coords(gaiaobjs["RA"][igaia], gaiaobjs["DEC"][igaia],
gaiaobjs["PMRA"][igaia], gaiaobjs["PMDEC"][igaia],
epochnow=gaiaepoch,
epochpast=epoch_ra, epochpastdec=epoch_dec)
# ADM catch the corner case where there are no initial matches.
if ra.size > 0:
_, refined = radec_match_to([ra, dec], [tychoobjs["RA"][itycho],
tychoobjs["DEC"][itycho]], radec=True)
else:
refined = np.array([], dtype='int')
# ADM retain Tycho objects that DON'T match Gaia.
keep = np.ones(len(tychoobjs), dtype='bool')
keep[itycho[refined]] = False
tychokeep, tychomag = tychoobjs[keep], tychomag[keep]
if verbose:
log.info('Kept {} Tychos with no Gaia match (pix={})...t={:.1f} mins'
.format(len(tychokeep), pixnum, (time()-t0)/60))
# ADM now we're done matching to Gaia, limit Gaia to the passed
# ADM magnitude limit and to the HEALPixel boundary of interest.
theta, phi = np.radians(90-gaiaobjs["DEC"]), np.radians(gaiaobjs["RA"])
gaiahpx = hp.ang2pix(nside, theta, phi, nest=True)
ii = (gaiahpx == pixnum) & (gaiaobjs['PHOT_G_MEAN_MAG'] < maglim)
gaiakeep, uratid = gaiaobjs[ii], uratid[ii]
if verbose:
log.info('Mask also comprises {} Gaia sources (pix={})...t={:.1f} mins'
.format(len(gaiakeep), pixnum, (time()-t0)/60))
# ADM move the coordinates forwards to the input mask epoch.
epoch_ra, epoch_dec = tychokeep["EPOCH_RA"], tychokeep["EPOCH_DEC"]
# ADM some of the Tycho epochs aren't populated.
epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
ra, dec = rewind_coords(
tychokeep["RA"], tychokeep["DEC"], tychokeep["PM_RA"], tychokeep["PM_DEC"],
epochnow=epoch_ra, epochnowdec=epoch_dec, epochpast=maskepoch)
tychokeep["RA"], tychokeep["DEC"] = ra, dec
ra, dec = rewind_coords(
gaiakeep["RA"], gaiakeep["DEC"], gaiakeep["PMRA"], gaiakeep["PMDEC"],
epochnow=gaiaepoch, epochpast=maskepoch)
gaiakeep["RA"], gaiakeep["DEC"] = ra, dec
# ADM finally, format according to the mask data model...
gaiamask = np.zeros(len(gaiakeep), dtype=maskdatamodel.dtype)
tychomask = np.zeros(len(tychokeep), dtype=maskdatamodel.dtype)
for col in "RA", "DEC":
gaiamask[col] = gaiakeep[col]
gaiamask["PM"+col] = gaiakeep["PM"+col]
tychomask[col] = tychokeep[col]
tychomask["PM"+col] = tychokeep["PM_"+col]
gaiamask["REF_ID"] = gaiakeep["REF_ID"]
# ADM take care to rigorously convert to int64 for Tycho.
tychomask["REF_ID"] = tychokeep["TYC1"].astype('int64')*int(1e6) + \
tychokeep["TYC2"].astype('int64')*10 + tychokeep["TYC3"]
gaiamask["REF_CAT"], tychomask["REF_CAT"] = 'G2', 'T2'
gaiamask["REF_MAG"] = gaiakeep['PHOT_G_MEAN_MAG']
tychomask["REF_MAG"] = tychomag
gaiamask["URAT_ID"], tychomask["URAT_ID"] = uratid, -1
gaiamask["TYPE"], tychomask["TYPE"] = 'PSF', 'PSF'
mask = np.concatenate([gaiamask, tychomask])
# ADM ...and add the mask radii.
mask["IN_RADIUS"], mask["NEAR_RADIUS"] = radii(mask["REF_MAG"])
if verbose:
log.info("Done making mask...(pix={})...t={:.1f} mins".format(
pixnum, (time()-t0)/60.))
return mask
| 18,348
|
def _feature_normalization(features, method, feature_type):
"""Normalize the given feature vector `y`, with the stated normalization `method`.
Args:
features (np.ndarray): The signal array
method (str): Normalization method.
'global': Uses global mean and standard deviation values from `train.txt`.
The normalization is being applied element wise.
([sample] - [mean]^T) / [std]^T
Where brackets denote matrices or vectors.
'local': Use local (in sample) mean and standard deviation values, and apply the
normalization element wise, like in `global`.
'local_scalar': Uses only the mean and standard deviation of the current sample.
The normalization is being applied by ([sample] - mean_scalar) / std_scalar
'none': No normalization is being applied.
feature_type (str): Feature type, see `load_sample` for details.
Returns:
np.ndarray: The normalized feature vector.
"""
if method == 'none':
return features
elif method == 'global':
# Option 'global' is applied element wise.
if feature_type == 'mel':
global_mean = __global_mean_mel
global_std = __global_std_mel
elif feature_type == 'mfcc':
global_mean = __global_mean_mfcc
global_std = __global_std_mfcc
else:
raise ValueError('Unsupported global feature type: {}'.format(feature_type))
return (features - global_mean) / global_std
elif method == 'local':
return (features - np.mean(features, axis=0)) / np.std(features, axis=0)
elif method == 'local_scalar':
# Option 'local' uses scalar values.
return (features - np.mean(features)) / np.std(features)
else:
raise ValueError('Invalid normalization method: {}'.format(method))
| 18,349
|
def get_constraint(name):
"""
Lookup table of default weight constraint functions.
Parameters
----------
name : Constraint, None, str
Constraint to look up. Must be one of:
- 'l1' : L1 weight-decay.
- 'l2' : L2 weight-decay.
- 'l1-l2' : Combined L1-L2 weight-decay.
- Constraint : A custom implementation.
- None : Return None.
Custom Constraint must implement `constrain`
function.
Returns
-------
constraint : Constraint or None
The constraint function.
"""
if name == 'unit' : return UnitNorm
elif name == 'maxnorm' : return MaxNorm
elif name == 'minmax' : return MinMaxNorm
elif isinstance(name, (None, Constraint)) : return name
else : raise ValueError("Invalid regularizer")
| 18,350
|
def add_org_hooks(mock, values):
"""Add hooks for an organization
The values passed is an array of dictionaries as generated by the factory
"""
url = os.path.join("orgs", ORG_NAME, "hooks")
register_uri(mock, "GET", url=url, json=values)
| 18,351
|
def player_count(conn, team_id):
"""Returns the number of players associated with a particular team"""
c = conn.cursor()
c.execute("SELECT id FROM players WHERE team_id=?", (team_id,))
return len(c.fetchall())
| 18,352
|
def _RedisClient(address):
"""
Return a connection object connected to the socket given by `address`
"""
h1, h2 = get_handle_pair(conn_type=REDIS_LIST_CONN)
c = _RedisConnection(h1)
#redis_client = util.get_redis_client()
redis_client = util.get_cache_client()
ip, port = address
chan = '{}:{}'.format(ip, port)
redis_client.publish(chan, bytes(h2, 'utf-8'))
ack = c.recv()
assert ack == 'OK'
return c
| 18,353
|
def format_timedelta(value,
time_format="{days} days, {hours2}:{minutes2}:{seconds2}"):
"""Format a datetie.timedelta. See """
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(
**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
})
| 18,354
|
def random_account_user(account):
"""Get a random user for an account."""
account_user = AccountUser.objects.filter(account=account).order_by("?").first()
return account_user.user if account_user else None
| 18,355
|
def pytest_collection_modifyitems(session, config, items):
"""Called by pytest after collecting tests.
The collected tests and the order in which they will be called are in ``items``,
which can be manipulated in place.
"""
if not session.config.getoption("--skip"):
return
passed_set = set(session.config.cache.get(D1_SKIP_LIST, []))
new_item_list = []
for item in items:
if item.nodeid not in passed_set:
new_item_list.append(item)
prev_skip_count = session.config.cache.get(D1_SKIP_COUNT, 0)
cur_skip_count = len(items) - len(new_item_list)
if prev_skip_count == cur_skip_count:
logger.info("No tests were run (--skip). Restarting with complete test set")
_clear_skip_list(session)
else:
session.config.cache.set(D1_SKIP_COUNT, cur_skip_count)
logger.info(
"Skipping {} previously passed tests (--skip)".format(cur_skip_count)
)
items[:] = new_item_list
| 18,356
|
def overwrite(main_config_obj, args):
"""
Overwrites parameters with input flags
Args:
main_config_obj (ConfigClass): config instance
args (dict): arguments used to overwrite
Returns:
ConfigClass: config instance
"""
# Sort on nested level to override shallow items first
args = dict(sorted(args.items(), key=lambda item: item[0].count('.')))
for argument_key, val in args.items():
# Seperate nested keys into outer and inner
outer_keys = argument_key.split('.')
inner_key = outer_keys.pop(-1)
base_err_msg = f"Can't set '{argument_key} = {val}'"
# Check that the nested config has the attribute and is a config class
config_obj = main_config_obj
config_class = type(config_obj).__name__
for key_idx, key_part in enumerate(argument_key.split('.')):
err_msg = f"{base_err_msg}. '{key_part}' isn't an attribute in '{config_class}'"
assert hasattr(config_obj, key_part), err_msg
# Check if the config allows the argument
figutils.check_allowed_input_argument(config_obj, key_part, argument_key)
# Check if the outer attributes are config classes
if key_idx < len(outer_keys):
config_obj = getattr(config_obj, key_part)
config_class = type(config_obj).__name__
err_msg = f"{base_err_msg}. '{'.'.join(outer_keys)}' isn't a registered Anyfig config class"
assert figutils.is_config_class(config_obj), err_msg
value_class = type(getattr(config_obj, inner_key))
base_err_msg = f"Input argument '{argument_key}' with value {val} can't create an object of the expected type"
# Create new anyfig class object
if figutils.is_config_class(value_class):
value_obj = create_config(val)
# Create new object that follows the InterfaceField's rules
elif issubclass(value_class, fields.InterfaceField):
field = getattr(config_obj, inner_key)
if isinstance(value_class, fields.InputField):
value_class = field.type_pattern
else:
value_class = type(field.value)
try:
val = value_class(val)
except Exception as e:
err_msg = f"{base_err_msg} {field.type_pattern}. {e}"
raise RuntimeError(err_msg) from None
field = field.update_value(inner_key, val, config_class)
value_obj = field.finish_wrapping_phase(inner_key, config_class)
# Create new object of previous value type with new value
else:
try:
if isinstance(val, dict): # Keyword specified cli-arguments
value_obj = value_class(**val)
else:
value_obj = value_class(val)
except Exception as e:
err_msg = f"{base_err_msg} {value_class}. {e}"
raise RuntimeError(err_msg) from None
# Overwrite old value
setattr(config_obj, inner_key, value_obj)
return main_config_obj
| 18,357
|
def initInterpreter(model_path):
"""Initializes the tflite interpreter with the given tflite model"""
global interpreter
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
global input_details
input_details = interpreter.get_input_details()
global output_details
output_details = interpreter.get_output_details()
| 18,358
|
def max_delta(model, new_model):
"""Return the largest difference between any two corresponding
values in the models"""
return max( [(abs(model[i] - new_model[i])).max() for i in range(len(model))] )
| 18,359
|
def wiener_khinchin_transform(power_spectrum, frequency, time):
"""
A function to transform the power spectrum to a correlation function by the Wiener Khinchin transformation
** Input:**
* **power_spectrum** (`list or numpy.array`):
The power spectrum of the signal.
* **frequency** (`list or numpy.array`):
The frequency discretizations of the power spectrum.
* **time** (`list or numpy.array`):
The time discretizations of the signal.
**Output/Returns:**
* **correlation_function** (`list or numpy.array`):
The correlation function of the signal.
"""
frequency_interval = frequency[1] - frequency[0]
fac = np.ones(len(frequency))
fac[1: len(frequency) - 1: 2] = 4
fac[2: len(frequency) - 2: 2] = 2
fac = fac * frequency_interval / 3
correlation_function = np.zeros(len(time))
for i in range(len(time)):
correlation_function[i] = 2 * np.dot(fac, power_spectrum * np.cos(frequency * time[i]))
return correlation_function
| 18,360
|
def child_is_flat(children, level=1):
"""
Check if all children in section is in same level.
children - list of section children.
level - integer, current level of depth.
Returns True if all children in the same level, False otherwise.
"""
return all(
len(child) <= level + 1 or child[(level + 1) :][0].isalpha()
for child in children
)
| 18,361
|
def load_dataset(name, other_paths=[]):
"""Load a dataset with given (file) name."""
if isinstance(name, Dataset):
return name
path = Path(name)
# First, try if you have passed a fully formed dataset path
if path.is_file():
return _from_npy(name, classes=classes)
# Go through the dataset paths, return the first dataset found
all_paths = dataset_path + other_paths
for p in all_paths:
try:
file = p / path
return _from_npy(file, classes=classes)
except FileNotFoundError:
pass
raise FileNotFoundError(
"Could not find dataset {} in paths {}".format(name, all_paths)
)
| 18,362
|
def isqrtcovresnet101b(**kwargs):
"""
iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs)
| 18,363
|
def get_data(name: str, level: int, max_level: int) -> str:
"""從維基頁面爬取資料
參數:
name: 程式或節點名稱
level: 欲查詢的等級
回傳:
爬到的資料
"""
reply_msg = []
for dataframe in read_html(generate_url(name)):
if (max_level < dataframe.shape[0] < max_level + 3 and
dataframe.iloc[level, 0].isdigit() and
level == int(dataframe.iloc[level, 0])):
reply_msg.append(zip(*dataframe.iloc[[0, level], 1:].values))
return '\n'.join(':'.join(pair) for data in reply_msg for pair in data)
| 18,364
|
def is_processable(path: str, should_match_extension: str):
"""
Process scandir entries, copying the file if necessary
"""
if not os.path.isfile(path):
return False
filename = os.path.basename(path)
_, extension = os.path.splitext(filename)
if extension.lower() != should_match_extension.lower():
return False
return True
| 18,365
|
def insert_data(context, data_dict):
"""
:raises InvalidDataError: if there is an invalid value in the given data
"""
data_dict['method'] = _INSERT
result = upsert_data(context, data_dict)
return result
| 18,366
|
def timestamp():
"""Get the unix timestamp now and retuen it.
Attention: It's a floating point number."""
import time
timestamp = time.time()
return timestamp
| 18,367
|
def _n64_to_datetime(n64):
"""Convert Numpy 64 bit timestamps to datetime objects. Units in seconds"""
return datetime.utcfromtimestamp(n64.tolist() / 1e9)
| 18,368
|
def cycle(iterable):
"""Make an iterator returning elements from the iterable and saving a copy of each.
When the iterable is exhausted, return elements from the saved copy. Repeats indefinitely.
This function uses single dispatch.
.. seealso:: :func:`itertools.cycle`
"""
return itertools.cycle(iterable)
| 18,369
|
def get_availability_zone(name=None,state=None,zone_id=None,opts=None):
"""
`.getAvailabilityZone` provides details about a specific availability zone (AZ)
in the current region.
This can be used both to validate an availability zone given in a variable
and to split the AZ name into its component parts of an AWS region and an
AZ identifier letter. The latter may be useful e.g. for implementing a
consistent subnet numbering scheme across several regions by mapping both
the region and the subnet letter to network numbers.
This is different from the `.getAvailabilityZones` (plural) data source,
which provides a list of the available zones.
:param str name: The full name of the availability zone to select.
:param str state: A specific availability zone state to require. May
be any of `"available"`, `"information"` or `"impaired"`.
:param str zone_id: The zone ID of the availability zone to select.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/d/availability_zone.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['state'] = state
__args__['zoneId'] = zone_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAvailabilityZone:getAvailabilityZone', __args__, opts=opts).value
return AwaitableGetAvailabilityZoneResult(
name=__ret__.get('name'),
name_suffix=__ret__.get('nameSuffix'),
region=__ret__.get('region'),
state=__ret__.get('state'),
zone_id=__ret__.get('zoneId'),
id=__ret__.get('id'))
| 18,370
|
def _recs_on_solr(recommendations, solr_index_backoff, solr_index_restore):
"""sees if we have auto soft commit increases and restores and makes recommendations accordingly"""
if len(solr_index_backoff) > 0:
for core in solr_index_backoff.keys():
data = solr_index_backoff[core]
dates_str = ", ".join(
list(map(lambda a: a.strftime("%Y-%m-%d %H:%M:%S"), data["dates"]))
)
if core in solr_index_restore:
recommendations.append(
{
"issue": "There were %i incidents of indexing not be able to keep up for core %s at the following times: %s"
% (data["count"], core, dates_str),
"rec": "Consider raising auto soft commit to 60000 for core '%s' to avoid dropped mutations and timeouts"
% core,
}
)
else:
recommendations.append(
{
"issue": "There were %i incidents of indexing not be able to keep up for core %s at the following times: %s. There is nothing in the log indicating restore to the configured auto soft commit."
% (data["count"], dates_str, core),
"rec": "Strongly consider raising auto soft commit for core '%s' to avoid dropped mutations and timeouts. This core never restores to the configured value, so there is no benefit to keeping it where it is at"
% core,
}
)
| 18,371
|
def create_collection(CollectionId=None):
"""
Creates a collection in an AWS Region. You can add faces to the collection using the operation.
For example, you might create collections, one for each of your application users. A user can then index faces using the IndexFaces operation and persist results in a specific collection. Then, a user can search the collection for faces in the user-specific container.
For an example, see example1 .
This operation requires permissions to perform the rekognition:CreateCollection action.
See also: AWS API Documentation
Examples
This operation creates a Rekognition collection for storing image data.
Expected Output:
:example: response = client.create_collection(
CollectionId='string'
)
:type CollectionId: string
:param CollectionId: [REQUIRED]
ID for the collection that you are creating.
:rtype: dict
:return: {
'StatusCode': 123,
'CollectionArn': 'string'
}
"""
pass
| 18,372
|
def main():
"""
main function for both public leaderboard evaluation and private leaderboard prediction
"""
print("Get ground truth data for 376 listed companies")
true_df = make_true_df(
DATA_PATH,
start_date=None,
cut_off_date=CUT_OFF_DATE,
new_eval_period=BOOL_NEW_EVAL_PERIOD,
)
# get blank dataframe for predictions
blank_submission = make_blank_submission(
DATA_PATH, new_eval_period=BOOL_NEW_EVAL_PERIOD
)
# sample number of companies for prediction
if NUMBER_OF_PREDICTIONS:
model = ElasticNet() # purpose of speeding up the process
model_name = "ElasticNet"
blank_submission = blank_submission.sample(
NUMBER_OF_PREDICTIONS, axis=1, random_state=616
)
else:
model = ElasticNetCV(
max_iter=1000000
) # estimated time for training: 3500+ seconds (1 hour)
model_name = "ElasticNetCV"
print("make predictions")
pred_df = elasticnet_cv_predict(
pred_df_input=blank_submission,
model=model,
bool_public=BOOL_PUBLIC,
start_date=None,
recent_known_date=CUT_OFF_DATE,
scaler=StandardScaler(),
)
if BOOL_PUBLIC: # evaluation only
print(
f"{model_name} is evaluated with {NUMBER_OF_PREDICTIONS} sampled companies"
)
if not BOOL_NEW_EVAL_PERIOD:
print(
f"Public Evaluation period: {blank_submission.iloc[0].name} ~ {blank_submission.iloc[4].name}"
)
elif BOOL_NEW_EVAL_PERIOD:
print(
f"Arbitrary Evaluation period: {blank_submission.iloc[0].name} ~ {blank_submission.iloc[4].name}"
)
print(nmae(true_df, pred_df))
elif not BOOL_PUBLIC: # save file without evaluation
file_name = os.path.join(PREDICT_PATH, f"{model_name}_private.csv")
pred_df.to_csv(file_name, index=False)
print("Private prediction results are saved at {file_name}")
| 18,373
|
def copy_wcs(wcs_source_file, wcs_target_file):
""" Copy the WCS header keywords from the source file into the
target file.
"""
hdr_src = fits.getheader(wcs_source_file)
wcs_src = WCS(hdr_src)
im = fits.open(wcs_target_file, 'update')
im[0].header.update(wcs_src.to_header())
im.flush(output_verify='fix+warn')
im.close()
return
| 18,374
|
def _add_u_eq(blk, uex=0.8):
"""Add heat transfer coefficent adjustment for feed water flow rate.
This is based on knowing the heat transfer coefficent at a particular flow
and assuming the heat transfer coefficent is porportial to feed water
flow rate raised to certain power (typically 0.8)
Args:
blk: Heat exchanger block to add correlation to
uex: Correlation parameter value (defalut 0.8)
Returns:
None
"""
ti = blk.flowsheet().time
blk.U0 = pyo.Var(ti)
blk.f0 = pyo.Var(ti)
blk.uex = pyo.Var(ti, initialize=uex)
for t in ti:
blk.U0[t].value = blk.overall_heat_transfer_coefficient[t].value
blk.f0[t].value = blk.tube.properties_in[t].flow_mol.value
blk.overall_heat_transfer_coefficient.unfix()
blk.U0.fix()
blk.uex.fix()
blk.f0.fix()
@blk.Constraint(ti)
def U_eq(b, t):
return (
b.overall_heat_transfer_coefficient[t] ==
b.U0[t]*(b.tube.properties_in[t].flow_mol/b.f0[t])**b.uex[t]
)
| 18,375
|
def test_parse_cpe_name_wfn(cpe, cpe_ret):
"""
Parse correct CPE_NAME data WFN formatted
:return:
"""
ret = core._parse_cpe_name(cpe)
for key, value in cpe_ret.items():
assert key in ret
assert ret[key] == value
| 18,376
|
def find_process_in_list( proclist, pid ):
"""
Searches for the given 'pid' in 'proclist' (which should be the output
from get_process_list(). If not found, None is returned. Otherwise a
list
[ user, pid, ppid ]
"""
for L in proclist:
if pid == L[1]:
return L
return None
| 18,377
|
def solution(s, start_pos, end_pos):
"""
Find the minimal nucleotide from a range of sequence DNA.
:param s: String consisting of the letters A, C, G and T,
which correspond to the types of successive nucleotides in the sequence
:param start_pos: array with the start indexes for the intervals to check
:param end_pos: array with the end indexes for the intervals to check
:return: a list with the minimal nucleotide for each interval defined by start_pos and end_pos
"""
highest_class = 'T'
highest_class_value = 4
# The array below must be in ascending order regarding the value assigned to the classes in the challenge description
# (not necessarily in alphabetic order)
other_classes = ['A', 'C', 'G']
other_classes_values = [1, 2, 3]
# We create a prefix_sum list for each class, so we can identify when a range has that specific class
prefix_sums = __class_based_prefix_sums(s, other_classes)
result = []
for i in range(len(start_pos)):
# We don't need to create a prefix_sum list for the class with highest value,
# because we can always use it as a fallback
current_result = highest_class_value
for j in range(len(other_classes)):
if __class_is_present(prefix_sums, j, start_pos[i], end_pos[i]):
current_result = other_classes_values[j]
break
result.append(current_result)
return result
| 18,378
|
def ExtremeSoundHandlerProcess(ExtremeQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(ExtremeQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Extreme - " + x.visual_string)
y = x
| 18,379
|
def validate_address(value: str, context: dict = {}) -> str:
"""
Default address validator function. Can be overriden by providing a
dotted path to a function in ``SALESMAN_ADDRESS_VALIDATOR`` setting.
Args:
value (str): Address text to be validated
context (dict, optional): Validator context data.
Raises:
ValidationError: In case address is not valid
Returns:
str: Validated value
"""
if not value:
raise ValidationError(_("Address is required."))
return value
| 18,380
|
def select_images(img_dir, sample_size=150, random_seed=42):
"""Selects a random sample of image paths."""
img_paths = []
for file in os.listdir(img_dir):
if file.lower().endswith('.jpeg'):
img_paths.append(os.path.join(img_dir, file))
if sample_size is not None:
if random_seed is not None:
random.seed(a=random_seed)
img_paths = random.sample(img_paths, sample_size)
return img_paths
| 18,381
|
def load_period_data(period):
""" Load period data JSON file
If the file does not exist and empty dictionary is returned.
"""
filename = os.path.join(PROTOCOL_DIR, PERIOD_FILE_TEMPLATE % period)
if not os.path.exists(filename):
return {}
with open(filename, 'r', encoding='utf-8') as f:
return json.load(f)
| 18,382
|
def _get_hg_repo(path_dir):
"""Parse `hg paths` command to find remote path."""
if path_dir == "":
return ""
hgrc = Path(path_dir) / ".hg" / "hgrc"
if hgrc.exists():
config = ConfigParser()
config.read(str(hgrc))
if "paths" in config:
return config["paths"].get("default", "hgrc: no default path?")
else:
return "hgrc: no [paths] section?"
else:
return "not a hg repo"
| 18,383
|
def clip_grad_norm(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm.
"""
parameters = list(parameters)
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef >= 1:
return
for p in parameters:
p.grad.data.mul_(clip_coef)
| 18,384
|
def array_to_string(array,
col_delim=' ',
row_delim='\n',
digits=8,
value_format='{}'):
"""
Convert a 1 or 2D array into a string with a specified number
of digits and delimiter. The reason this exists is that the
basic numpy array to string conversions are surprisingly bad.
Parameters
------------
array : (n,) or (n, d) float or int
Data to be converted
If shape is (n,) only column delimiter will be used
col_delim : str
What string should separate values in a column
row_delim : str
What string should separate values in a row
digits : int
How many digits should floating point numbers include
value_format : str
Format string for each value or sequence of values
If multiple values per value_format it must divide
into array evenly.
Returns
----------
formatted : str
String representation of original array
"""
# convert inputs to correct types
array = np.asanyarray(array)
digits = int(digits)
row_delim = str(row_delim)
col_delim = str(col_delim)
value_format = str(value_format)
# abort for non-flat arrays
if len(array.shape) > 2:
raise ValueError('conversion only works on 1D/2D arrays not %s!',
str(array.shape))
# allow a value to be repeated in a value format
repeats = value_format.count('{}')
if array.dtype.kind == 'i':
# integer types don't need a specified precision
format_str = value_format + col_delim
elif array.dtype.kind == 'f':
# add the digits formatting to floats
format_str = value_format.replace(
'{}', '{:.' + str(digits) + 'f}') + col_delim
else:
raise ValueError('dtype %s not convertible!',
array.dtype.name)
# length of extra delimiters at the end
end_junk = len(col_delim)
# if we have a 2D array add a row delimiter
if len(array.shape) == 2:
format_str *= array.shape[1]
# cut off the last column delimiter and add a row delimiter
format_str = format_str[:-len(col_delim)] + row_delim
end_junk = len(row_delim)
# expand format string to whole array
format_str *= len(array)
# if an array is repeated in the value format
# do the shaping here so we don't need to specify indexes
shaped = np.tile(array.reshape((-1, 1)),
(1, repeats)).reshape(-1)
# run the format operation and remove the extra delimiters
formatted = format_str.format(*shaped)[:-end_junk]
return formatted
| 18,385
|
def get_dp_logs(logs):
"""Get only the list of data point logs, filter out the rest."""
filtered = []
compute_bias_for_types = [
"mouseout",
"add_to_list_via_card_click",
"add_to_list_via_scatterplot_click",
"select_from_list",
"remove_from_list",
]
for log in logs:
if log["type"] in compute_bias_for_types:
filtered.append(log)
return filtered
| 18,386
|
def assign_reports_to_watchlist(cb: CbThreatHunterAPI, watchlist_id: str, reports: List[Dict]) -> Dict:
"""Set a watchlist report IDs attribute to the passed reports.
Args:
cb: Cb PSC object
watchlist_id: The Watchlist ID to update.
reports: The Intel Reports.
Returns:
The Watchlist in dict form.
"""
watchlist_data = get_watchlist(cb, watchlist_id)
if not watchlist_data:
return None
watchlist_data["report_ids"] = [r["id"] for r in reports]
watchlist_data = update_watchlist(cb, watchlist_data)
if not watchlist_data:
LOGGER.error(f"unexpected problem updating watchlist with report IDs.")
return False
return watchlist_data
| 18,387
|
def visualize_filter(
image,
model,
layer,
filter_index,
optimization_parameters,
transformation=None,
regularization=None,
threshold=None,
):
"""Create a feature visualization for a filter in a layer of the model.
Args:
image (array): the image to be modified by the feature vis process.
model (object): the model to be used for the feature visualization.
layer (string): the name of the layer to be used in the visualization.
filter_index (number): the index of the filter to be visualized.
optimization_parameters (OptimizationParameters): the optimizer class to be applied.
transformations (function): a function defining the transformations to be perfromed.
regularization (function): customized regularizers to be applied. Defaults to None.
threshold (list): Intermediate steps for visualization. Defaults to None.
Returns:
tuple: activation and result image for the process.
"""
image = tf.Variable(image)
feature_extractor = get_feature_extractor(model, layer)
_threshold_figures = figure(figsize=(15, 10), dpi=200)
print("Starting Feature Vis Process")
for iteration in range(optimization_parameters.iterations):
pctg = int(iteration / optimization_parameters.iterations * 100)
if transformation:
if not callable(transformation):
raise ValueError("The transformations need to be a function.")
image = transformation(image)
else:
image = trans.standard_transformation(image)
activation, image = gradient_ascent_step(
image, feature_extractor, filter_index, regularization,
optimization_parameters
)
print('>>', pctg, '%', end="\r", flush=True)
# Routine for creating a threshold image for Jupyter Notebooks
if isinstance(threshold, list) and (iteration in threshold):
threshold_image = _threshold_figures.add_subplot(
1, len(threshold), threshold.index(iteration) + 1
)
threshold_image.title.set_text(f"Step {iteration}")
threshold_view(image)
print('>> 100 %')
if image.shape[1] < 299 or image.shape[2] < 299:
image = tf.image.resize(image, [299, 299])
# Decode the resulting input image
image = imgs.deprocess_image(image[0].numpy())
return activation, image
| 18,388
|
def porcentaje (cadena1, cadena2):
"""
(str),(str)-> (float)
Programa para hallar el porcentaje de correspondiente a las cadenas
>>> porcentaje("CG","GA")
50
>>> porcentaje("AT","GT")
0
>>> porcentaje ("AAAT","GCGA")
25
:param cadena1: primera cadena ingresada
:param cadena2: segunda cadena ingresada
:return: porcentaje de similitud de las cadenas
"""
| 18,389
|
def subtraction(x, y):
"""
Subtraction x and y
>>> subtraction(-20, 80)
-100
"""
assert isinstance(x, (int, float)), "The x value must be an int or float"
assert isinstance(y, (int, float)), "The y value must be an int or float"
return x - y
| 18,390
|
async def paste(text: str) -> str:
"""Return an online bin of given text."""
session = aiohttp.ClientSession()
async with session.post("https://hasteb.in/documents", data=text) as post:
if post.status == 200:
response = await post.text()
return f"https://hasteb.in/{response[8:-2]}"
post = await session.post("https://bin.drlazor.be", data={"val": text})
if post.status == 200:
return post.url
| 18,391
|
def lr_step(base_lr, curr_iter, decay_iters, warmup_iter=0):
"""Stepwise exponential-decay learning rate policy.
Args:
base_lr: A scalar indicates initial learning rate.
curr_iter: A scalar indicates current iteration.
decay_iter: A list of scalars indicates the numbers of
iteration when the learning rate is decayed.
warmup_iter: A scalar indicates the number of iteration
before which the learning rate is not adjusted.
Return:
A scalar indicates the current adjusted learning rate.
"""
if curr_iter < warmup_iter:
alpha = curr_iter / warmup_iter
return base_lr * (1 / 10.0 * (1 - alpha) + alpha)
else:
return base_lr * (0.1 ** get_step_index(curr_iter, decay_iters))
| 18,392
|
def apo(coalg):
"""
Extending an anamorphism with the ability to halt.
In this version, a boolean is paired with the value that indicates halting.
"""
def run(a):
stop, fa = coalg(a)
return fa if stop else fa.map(run)
return run
| 18,393
|
def cat_sample(ps):
"""
sample from categorical distribution
ps is a 2D array whose rows are vectors of probabilities
"""
r = nr.rand(len(ps))
out = np.zeros(len(ps),dtype='i4')
cumsums = np.cumsum(ps, axis=1)
for (irow,csrow) in enumerate(cumsums):
for (icol, csel) in enumerate(csrow):
if csel > r[irow]:
out[irow] = icol
break
return out
| 18,394
|
def download_network(region, network_type):
"""Download network from OSM representing the region.
Arguments:
region {string} -- Location. E.g., "Manhattan Island, New York City, New York, USA"
network_type {string} -- Options: drive, drive_service, walk, bike, all, all_private
Returns:
networkx -- downloaded networkx
"""
# Download graph
G = ox.graph_from_place(region, network_type=network_type)
return G
| 18,395
|
def extract_text(text):
""" """
l = []
res = []
i = 0
while i < len(text) - 2:
h, i, _ = next_token(text, i)
obj = text[h:i]
l.append(obj)
for j, tok in enumerate(l):
if tok == b'Tf':
font = l[j-2]
fsize = float(l[j-1])
elif tok == b'Td':
x = float(l[j-2])
y = float(l[j-1])
elif tok == b'Tj':
text = l[j-1]
res.append((x, y, font, fsize, text[1:-1]))
return res
| 18,396
|
async def test_step_import(opp):
"""Test that the import step works."""
conf = {
CONF_USERNAME: "user@host.com",
CONF_PASSWORD: "123abc",
}
with patch(
"openpeerpower.components.tile.async_setup_entry", return_value=True
), patch("openpeerpower.components.tile.config_flow.async_login"):
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "user@host.com"
assert result["data"] == {
CONF_USERNAME: "user@host.com",
CONF_PASSWORD: "123abc",
}
| 18,397
|
def add_pruning_arguments_to_parser(parser):
"""Add pruning arguments to existing argparse parser"""
parser.add_argument('--do_prune', action='store_true',
help="Perform pruning when training a model")
parser.add_argument('--pruning_config', type=str,
default='', help="Path to a pruning config")
parser.add_argument('--pruning_override', type=str, nargs='*', action=ConcatenateStringAction,
default='', help="JSON string to override pruning configuration file")
return parser
| 18,398
|
def invokeRule(priorAnswers,
bodyLiteralIterator,
sip,
otherargs,
priorBooleanGoalSuccess=False,
step=None,
debug=False,
buildProof=False):
"""
Continue invokation of rule using (given) prior answers and list of
remaining body literals (& rule sip). If prior answers is a list,
computation is split disjunctively
[..] By combining the answers to all these subqueries, we generate
answers for the original query involving the rule head
Can also takes a PML step and updates it as it navigates the
top-down proof tree (passing it on and updating it where necessary)
"""
assert not buildProof or step is not None
proofLevel, memoizeMemory, sipCollection, \
factGraph, derivedPreds, processedRules = otherargs
remainingBodyList = [i for i in bodyLiteralIterator]
lazyGenerator = lazyGeneratorPeek(priorAnswers, 2)
if lazyGenerator.successful:
# There are multiple answers in this step, we need to call invokeRule
# recursively for each answer, returning the first positive attempt
success = False
rt = None
_step = None
ansNo = 0
for priorAns in lazyGenerator:
ansNo += 1
try:
if buildProof:
newStep = InferenceStep(step.parent,
step.rule,
source=step.source)
newStep.antecedents = [ant for ant in step.antecedents]
else:
newStep = None
for rt, _step in\
invokeRule([priorAns],
iter([i for i in remainingBodyList]),
sip,
otherargs,
priorBooleanGoalSuccess,
newStep,
debug=debug,
buildProof=buildProof):
if rt:
yield rt, _step
except RuleFailure:
pass
if not success:
# None of prior answers were successful
# indicate termination of rule processing
raise RuleFailure(
"Unable to solve either of %s against remainder of rule: %s" % (
ansNo, remainingBodyList))
# yield False, _InferenceStep(step.parent, step.rule, source=step.source)
else:
lazyGenerator = lazyGeneratorPeek(lazyGenerator)
projectedBindings = lazyGenerator.successful and first(lazyGenerator) or {}
# First we check if we can combine a large group of subsequent body literals
# into a single query
# if we have a template map then we use it to further
# distinguish which builtins can be solved via
# cumulative SPARQl query - else we solve
# builtins one at a time
def sparqlResolvable(literal):
if isinstance(literal, Uniterm):
return not literal.naf and GetOp(literal) not in derivedPreds
else:
return isinstance(literal, N3Builtin) and \
literal.uri in factGraph.templateMap
def sparqlResolvableNoTemplates(literal):
if isinstance(literal, Uniterm):
return not literal.naf and GetOp(literal) not in derivedPreds
else:
return False
conjGroundLiterals = list(
itertools.takewhile(
hasattr(factGraph, 'templateMap') and sparqlResolvable or \
sparqlResolvableNoTemplates,
remainingBodyList))
bodyLiteralIterator = iter(remainingBodyList)
if len(conjGroundLiterals) > 1:
# If there are literals to combine *and* a mapping from rule
# builtins to SPARQL FILTER templates ..
basePredicateVars = set(
reduce(lambda x, y: x + y,
[list(GetVariables(arg, secondOrder=True)) for arg in conjGroundLiterals]))
if projectedBindings:
openVars = basePredicateVars.intersection(projectedBindings)
else:
# We don't have any given bindings, so we need to treat
# the body as an open query
openVars = basePredicateVars
queryConj = EDBQuery([copy.deepcopy(lit) for lit in conjGroundLiterals],
factGraph,
openVars,
projectedBindings)
query, answers = queryConj.evaluate(debug)
if isinstance(answers, bool):
combinedAnswers = {}
rtCheck = answers
else:
if projectedBindings:
combinedAnswers = (mergeMappings1To2(ans,
projectedBindings,
makeImmutable=True) for ans in answers)
else:
combinedAnswers = (MakeImmutableDict(ans) for ans in answers)
combinedAnsLazyGenerator = lazyGeneratorPeek(combinedAnswers)
rtCheck = combinedAnsLazyGenerator.successful
if not rtCheck:
raise RuleFailure("No answers for combined SPARQL query: %s" % query)
else:
# We have solved the previous N body literals with a single
# conjunctive query, now we need to make each of the literals
# an antecedent to a 'query' step.
if buildProof:
queryStep = InferenceStep(None, source='some RDF graph')
queryStep.groundQuery = subquery
queryStep.bindings = {} # combinedAnswers[-1]
queryHash = URIRef(
"tag:info@fuxi.googlecode.com:Queries#" + \
makeMD5Digest(subquery))
queryStep.identifier = queryHash
for subGoal in conjGroundLiterals:
subNs = NodeSet(subGoal.toRDFTuple(),
identifier=BNode())
subNs.steps.append(queryStep)
step.antecedents.append(subNs)
queryStep.parent = subNs
for rt, _step in invokeRule(
isinstance(answers, bool) and [projectedBindings] or combinedAnsLazyGenerator,
iter(remainingBodyList[len(conjGroundLiterals):]),
sip,
otherargs,
isinstance(answers, bool),
step,
debug=debug,
buildProof=buildProof):
yield rt, _step
else:
# Continue processing rule body condition
# one literal at a time
try:
bodyLiteral = next(bodyLiteralIterator) if py3compat.PY3 else bodyLiteralIterator.next()
# if a N3 builtin, execute it using given bindings for boolean answer
# builtins are moved to end of rule when evaluating rules via sip
if isinstance(bodyLiteral, N3Builtin):
lhs = bodyLiteral.argument
rhs = bodyLiteral.result
lhs = isinstance(lhs, Variable) and projectedBindings[lhs] or lhs
rhs = isinstance(rhs, Variable) and projectedBindings[rhs] or rhs
assert lhs is not None and rhs is not None
if bodyLiteral.func(lhs, rhs):
if debug:
print("Invoked %s(%s, %s) -> True" % (
bodyLiteral.uri, lhs, rhs))
# positive answer means we can continue processing the rule body
if buildProof:
ns = NodeSet(bodyLiteral.toRDFTuple(),
identifier=BNode())
step.antecedents.append(ns)
for rt, _step in invokeRule(
[projectedBindings],
bodyLiteralIterator,
sip,
otherargs,
step,
priorBooleanGoalSuccess,
debug=debug,
buildProof=buildProof):
yield rt, _step
else:
if debug:
print("Successfully invoked %s(%s, %s) -> False" % (
bodyLiteral.uri, lhs, rhs))
raise RuleFailure("Failed builtin invokation %s(%s, %s)" %
(bodyLiteral.uri, lhs, rhs))
else:
# For every body literal, subqueries are generated according
# to the sip
sipArcPred = URIRef(GetOp(bodyLiteral) + \
'_' + '_'.join(GetArgs(bodyLiteral)))
assert len(list(IncomingSIPArcs(sip, sipArcPred))) < 2
subquery = copy.deepcopy(bodyLiteral)
subquery.ground(projectedBindings)
for N, x in IncomingSIPArcs(sip, sipArcPred):
#That is, each subquery contains values for the bound arguments
#that are passed through the sip arcs entering the node
#corresponding to that literal
#Create query out of body literal and apply sip-provided bindings
subquery = copy.deepcopy(bodyLiteral)
subquery.ground(projectedBindings)
if literalIsGround(subquery):
#subquery is ground, so there will only be boolean answers
#we return the conjunction of the answers for the current
#subquery
answer = False
ns = None
answers = first(
itertools.dropwhile(
lambda item: not item[0],
SipStrategy(
subquery.toRDFTuple(),
sipCollection,
factGraph,
derivedPreds,
MakeImmutableDict(projectedBindings),
processedRules,
network=step is not None and \
step.parent.network or None,
debug=debug,
buildProof=buildProof,
memoizeMemory=memoizeMemory,
proofLevel=proofLevel)))
if answers:
answer, ns = answers
if not answer and not bodyLiteral.naf or \
(answer and bodyLiteral.naf):
#negative answer means the invokation of the rule fails
#either because we have a positive literal and there
#is no answer for the subgoal or the literal is
#negative and there is an answer for the subgoal
raise RuleFailure(
"No solutions solving ground query %s" % subquery)
else:
if buildProof:
if not answer and bodyLiteral.naf:
ns.naf = True
step.antecedents.append(ns)
#positive answer means we can continue processing the rule body
#either because we have a positive literal and answers
#for subgoal or a negative literal and no answers for the
#the goal
for rt, _step in invokeRule(
[projectedBindings],
bodyLiteralIterator,
sip,
otherargs,
True,
step,
debug=debug):
yield rt, _step
else:
_answers = \
SipStrategy(subquery.toRDFTuple(),
sipCollection,
factGraph,
derivedPreds,
MakeImmutableDict(projectedBindings),
processedRules,
network=step is not None and \
step.parent.network or None,
debug=debug,
buildProof=buildProof,
memoizeMemory=memoizeMemory,
proofLevel=proofLevel)
# solve (non-ground) subgoal
def collectAnswers(_ans):
for ans, ns in _ans:
if isinstance(ans, dict):
try:
map = mergeMappings1To2(
ans, projectedBindings,
makeImmutable=True)
yield map
except:
pass
combinedAnswers = collectAnswers(_answers)
answers = lazyGeneratorPeek(combinedAnswers)
if not answers.successful \
and not bodyLiteral.naf \
or (bodyLiteral.naf and answers.successful):
raise RuleFailure(
"No solutions solving ground query %s" % subquery)
else:
# Either we have a positive subgoal and answers
# or a negative subgoal and no answers
if buildProof:
if answers.successful:
goals = set([g for a, g in answers])
assert len(goals) == 1
step.antecedents.append(goals.pop())
else:
newNs = NodeSet(
bodyLiteral.toRDFTuple(),
network=step.parent.network,
identifier=BNode(),
naf=True)
step.antecedents.append(newNs)
for rt, _step in invokeRule(
answers,
bodyLiteralIterator,
sip,
otherargs,
priorBooleanGoalSuccess,
step,
debug=debug,
buildProof=buildProof):
yield rt, _step
except StopIteration:
#Finished processing rule
if priorBooleanGoalSuccess:
yield projectedBindings and projectedBindings or True, step
elif projectedBindings:
#Return the most recent (cumulative) answers and the given step
yield projectedBindings, step
else:
raise RuleFailure("Finished processing rule unsuccessfully")
| 18,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.