content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def drdpgr(body, lon, lat, alt, re, f):
"""
This routine computes the Jacobian matrix of the transformation
from planetographic to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdpgr_c.html
:param body: Body with which coordinate system is associated.
:type body: str
:param lon: Planetographic longitude of a point (radians).
:type lon: float
:param lat: Planetographic latitude of a point (radians).
:type lat: float
:param alt: Altitude of a point above reference spheroid.
:type alt: float
:param re: Equatorial radius of the reference spheroid.
:type re: float
:param f: Flattening coefficient.
:type f: float
:return: Matrix of partial derivatives.
:rtype: 3x3-Element Array of floats
"""
body = stypes.stringToCharP(body)
lon = ctypes.c_double(lon)
lat = ctypes.c_double(lat)
alt = ctypes.c_double(alt)
re = ctypes.c_double(re)
f = ctypes.c_double(f)
jacobi = stypes.emptyDoubleMatrix()
libspice.drdpgr_c(body, lon, lat, alt, re, f, jacobi)
return stypes.cMatrixToNumpy(jacobi) | 5,324,600 |
def data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_inter_rule_groupinter_rule_group_uuid_cost_characteristiccost_name_get(uuid, node_uuid, node_rule_group_uuid, inter_rule_group_uuid, cost_name): # noqa: E501
"""data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_inter_rule_groupinter_rule_group_uuid_cost_characteristiccost_name_get
returns tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param node_uuid: Id of node
:type node_uuid: str
:param node_rule_group_uuid: Id of node-rule-group
:type node_rule_group_uuid: str
:param inter_rule_group_uuid: Id of inter-rule-group
:type inter_rule_group_uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:rtype: TapiTopologyCostCharacteristic
"""
return 'do some magic!' | 5,324,601 |
def remember(event):
"""
Запоминание образов.
"""
image1 = []; image2 = []; image3 = []
for i in range(len_vector):
image1.append(c1[i].get())
image2.append(c2[i].get())
image3.append(c3[i].get())
if image1[i] == 0: image1[i] = -1
if image2[i] == 0: image2[i] = -1
if image3[i] == 0: image3[i] = -1
print("X1: ", image1)
print("X2: ", image2)
print("X3: ", image3)
network.remember(image1, image2, image3) | 5,324,602 |
async def joinUserChannel(cls:"PhaazebotTwitch", Message:twitch_irc.Message, Context:TwitchCommandContext) -> None:
"""
allowed user and admin to like phaaze to a channel
"""
alternative_target:str = ""
UserPerm:TwitchPermission = TwitchPermission(Message, None)
if UserPerm.rank >= TwitchConst.REQUIRE_ADMIN:
# admin or higher have the permission to remove phaaze from any channel without the owner consent
if len(Context.parts) >= 2:
alternative_target = Context.part(1)
if alternative_target:
alternative_sql:str = """
SELECT COUNT(*) AS `I`
FROM `twitch_user_name`
LEFT JOIN `twitch_channel`
ON `twitch_channel`.`channel_id` = `twitch_user_name`.`user_id`
WHERE `twitch_channel`.`managed` = 1
AND `twitch_user_name`.`user_name` = %s"""
res:List[dict] = cls.BASE.PhaazeDB.selectQuery(alternative_sql, (alternative_target,))
else:
check_sql:str = """
SELECT COUNT(*) AS `I`
FROM `twitch_channel`
WHERE `twitch_channel`.`managed` = 1
AND `twitch_channel`.`channel_id` = %s"""
res:List[dict] = cls.BASE.PhaazeDB.selectQuery(check_sql, (Message.user_id,))
if res[0]['I']:
return_content:str = f"@{Message.display_name} > Phaaze already is in your channel"
if alternative_target: return_content = f"@{Message.display_name} > Phaaze already is in {alternative_target}'s channel"
return await Message.Channel.sendMessage(cls, return_content)
# after this point, we have a user or a admin input how want to add phaaze
if alternative_target:
user_search:List[TwitchUser] = await getTwitchUsers(cls.BASE, alternative_target, item_type="login", limit=1)
if not user_search:
return_content:str = f"@{Message.display_name} > Phaaze could not find a user named {alternative_target} in the Twitch-API"
return await Message.Channel.sendMessage(cls, return_content)
else:
NewEntry:TwitchUser = user_search.pop(0)
# insert ot update managed status
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_channel",
content={
"channel_id": NewEntry.user_id,
"managed": 1
},
)
# insert ot update to name table
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_user_name",
content={
"user_id": NewEntry.user_id,
"user_name": NewEntry.login,
"user_display_name": NewEntry.display_name
},
)
else:
# insert ot update managed status
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_channel",
content={
"channel_id": Message.user_id,
"managed": 1
},
)
# insert ot update to name table
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_user_name",
content={
"user_id": Message.user_id,
"user_name": Message.user_name,
"user_display_name": Message.display_name
},
)
if alternative_target:
await cls.joinChannel(alternative_target)
return_content:str = f"@{Message.display_name} > Phaaze successful joined {alternative_target}'s channel"
else:
await cls.joinChannel(Message.user_name)
return_content:str = f"@{Message.display_name} > Phaaze successful joined your channel"
return await Message.Channel.sendMessage(cls, return_content) | 5,324,603 |
def download_manager(main_keywords, extra_keywords=None, download_dir=None, total=None, download=True):
"""Delegator function |
Takes care to call download_images for each main_keywords | Args are the same as download_images function"""
for n in range(len(main_keywords)):
download_images(main_keywords[n], extra_keywords=extra_keywords,
download_dir=download_dir, total=total, download=download) | 5,324,604 |
def budget_balanced_ascending_auction(
market:Market, ps_recipes: list)->TradeWithMultipleRecipes:
"""
Calculate the trade and prices using generalized-ascending-auction.
Allows multiple recipes, but only of the following kind:
[ [1,0,0,x], [0,1,0,y], [0,0,1,z] ]
(i.e., there are n-1 buyer categories and 1 seller category.
One agent of category 1 buys x units; of category 2 buys y units; of category 3 buys z units; etc.)
:param market: contains a list of k categories, each containing several agents.
:param ps_recipes: a list of lists of integers, one integer per category.
Each integer i represents the number of agents of category i
that should be in each procurement-set.
:return: Trade object, representing the trade and prices.
>>> # ONE BUYER, ONE SELLER
>>> market = Market([AgentCategory("buyer", [9.]), AgentCategory("seller", [-4.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0], seller: [-4.0]]
No trade
>>> market = Market([AgentCategory("buyer", [9.,8.]), AgentCategory("seller", [-4.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0, 8.0], seller: [-4.0]]
No trade
>>> market = Market([AgentCategory("buyer", [9.]), AgentCategory("seller", [-4.,-3.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0], seller: [-3.0, -4.0]]
seller: [-3.0]: all 1 agents trade and pay -4.0
buyer: [9.0]: all 1 agents trade and pay 4.0
>>> market = Market([AgentCategory("buyer", [9.,8.]), AgentCategory("seller", [-4.,-3.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0, 8.0], seller: [-3.0, -4.0]]
seller: [-3.0, -4.0]: random 1 out of 2 agents trade and pay -8.0
buyer: [9.0]: all 1 agents trade and pay 8.0
>>> # ONE BUYER, TWO SELLERS
>>> market = Market([AgentCategory("buyer", [9.]), AgentCategory("seller", [-4.,-3.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,2]]))
Traders: [buyer: [9.0], seller: [-3.0, -4.0]]
No trade
>>> market = Market([AgentCategory("buyer", [9., 8., 7., 6.]), AgentCategory("seller", [-6., -5., -4.,-3.,-2.,-1.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,2]]))
Traders: [buyer: [9.0, 8.0, 7.0, 6.0], seller: [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0]]
seller: [-1.0, -2.0, -3.0, -4.0]: random 2 out of 4 agents trade and pay -4.0
buyer: [9.0]: all 1 agents trade and pay 8.0
"""
logger.info("\n#### Budget-Balanced Ascending Auction with Multiple Recipes - n-1 buyer categories\n")
logger.info(market)
logger.info("Procurement-set recipes: %s", ps_recipes)
map_buyer_category_to_seller_count = _convert_recipes_to_seller_counts(ps_recipes, market.num_categories)
logger.info("Map buyer category index to seller count: %s", map_buyer_category_to_seller_count)
# NOTE: Calculating the optimal trade cannot be done greedily -
# it requires solving a restricted instance of Knapsack.
# optimal_trade = market.optimal_trade(ps_recipe, max_iterations=max_iterations)[0]
# logger.info("For comparison, the optimal trade is: %s\n", optimal_trade)
remaining_market = market.clone()
buyer_categories = remaining_market.categories[:-1]
num_buyer_categories = market.num_categories-1
seller_category = remaining_market.categories[-1]
prices = AscendingPriceVector([1, 1], -MAX_VALUE)
buyer_price_index = 0
seller_price_index = 1
# prices[0] represents the price for all buyer-categories per single unit.
# prices[1] represents the price for all sellers.
try:
num_units_offered = len(seller_category)
num_units_demanded = sum([len(buyer_categories[i])*map_buyer_category_to_seller_count[i] for i in range(num_buyer_categories)])
target_unit_count = min(num_units_demanded, num_units_offered)
logger.info("%d units demanded by buyers, %d units offered by sellers, minimum is %d",
num_units_demanded, num_units_offered, target_unit_count)
while True:
logger.info("Prices: %s, Target unit count: %d", prices, target_unit_count)
price_index = buyer_price_index
while True:
num_units_demanded = sum([len(buyer_categories[i]) * map_buyer_category_to_seller_count[i] for i in range(num_buyer_categories)])
logger.info(" Buyers demand %d units", num_units_demanded)
if num_units_demanded == 0: raise EmptyCategoryException()
if num_units_demanded <= target_unit_count: break
map_buyer_category_to_lowest_value = [category.lowest_agent_value() for category in buyer_categories]
logger.debug(" map_buyer_category_to_lowest_value=%s", map_buyer_category_to_lowest_value)
map_buyer_category_to_lowest_value_per_unit = [value / count for value,count in zip(map_buyer_category_to_lowest_value,map_buyer_category_to_seller_count)]
logger.debug(" map_buyer_category_to_lowest_value_per_unit=%s", map_buyer_category_to_lowest_value_per_unit)
category_index_with_lowest_value_per_unit = min(range(num_buyer_categories), key=lambda i:map_buyer_category_to_lowest_value_per_unit[i])
category_with_lowest_value_per_unit = buyer_categories[category_index_with_lowest_value_per_unit]
lowest_value_per_unit = map_buyer_category_to_lowest_value_per_unit[category_index_with_lowest_value_per_unit]
logger.info(" lowest value per unit is %f, of category %d (%s)", lowest_value_per_unit, category_index_with_lowest_value_per_unit, category_with_lowest_value_per_unit.name)
prices.increase_price_up_to_balance(price_index, category_with_lowest_value_per_unit.lowest_agent_value()/map_buyer_category_to_seller_count[category_index_with_lowest_value_per_unit], category_with_lowest_value_per_unit.name)
category_with_lowest_value_per_unit.remove_lowest_agent()
category = seller_category
# logger.info("\n### Step 1a: balancing the sellers (%s)", category.name)
price_index = seller_price_index
while True:
num_units_offered = len(category)
logger.info(" Sellers offer %d units", num_units_offered)
if num_units_offered == 0: raise EmptyCategoryException()
if num_units_offered <= target_unit_count: break
prices.increase_price_up_to_balance(price_index, category.lowest_agent_value(), category.name)
category.remove_lowest_agent()
target_unit_count -= 1
except EmptyCategoryException:
logger.info("\nOne of the categories became empty. No trade!")
logger.info(" Final price-per-unit vector: %s", prices)
# Construct the final price-vector:
buyer_price_per_unit = prices[buyer_price_index]
seller_price_per_unit = prices[seller_price_index]
final_prices = \
[buyer_price_per_unit * unit_count for unit_count in map_buyer_category_to_seller_count] + \
[seller_price_per_unit]
logger.info(" %s", remaining_market)
return TradeWithMultipleRecipes(remaining_market.categories, map_buyer_category_to_seller_count, final_prices) | 5,324,605 |
def test_streamrewriter_in_to_out_no_out_path(temp_file_creator):
"""Stream Rewriter in place edit with path object."""
rewriter = filesystem.StreamRewriter(get_arb_formatted_iter)
path_in = temp_file_creator()
path_in.write_text('yyy')
with patch_logger(
'pypyr.utils.filesystem', logging.DEBUG
) as mock_logger_debug:
rewriter.in_to_out(path_in, None)
assert mock_logger_debug.mock_calls == [
call(f"opening source file: {path_in}"),
call("opening temp file for writing..."),
call(f"moving temp file to: {path_in}")]
assert path_in.is_file()
assert path_in.read_text() == 'XyyyX' | 5,324,606 |
def copy_inputs(paths,
file_list):
""".. Create copies to inputs from list of files containing copying instructions.
Create copies using instructions contained in files of list ``file_list``. Instructions are `string formatted <https://docs.python.org/3.4/library/string.html#format-string-syntax>`__ using paths dictionary ``paths``. Copies are written in directory ``input_dir``. Status messages are appended to file ``make log``.
Instruction files on how to create copies (destinations) from targets (sources) should be formatted in the following way.
.. code-block:: md
# Each line of instruction should contain a destination and source delimited by a `|`
# Lines beginning with # are ignored
destination | source
.. Note::
Instruction files can be specified with the * shell pattern (see `here <https://www.gnu.org/software/findutils/manual/html_node/find_html/Shell-Pattern-Matching.html>`__). Destinations and their sources can also be specified with the * shell pattern. The number of wildcards must be the same for both destinations and sources.
Parameters
----------
paths : dict
Dictionary of paths. Dictionary should contain values for all keys listed below. Dictionary additionally used to string format copying instructions.
file_list : str, list
File or list of files containing copying instructions.
Path Keys
---------
input_dir : str
Directory to write copies.
makelog : str
Path of makelog.
Returns
-------
source_map : list
List of (source, destination) for each copy created.
Example
-------
Suppose you call the following function.
.. code-block:: python
copy_inputs(paths, ['file1'], formatting_dict)
Suppose ``paths`` contained the following values.
.. code-block:: md
paths = {'root': '/User/root/',
'makelog': 'make.log',
'input_dir': 'input'}
Now suppose instruction file ``file1`` contained the following text.
.. code-block:: md
destination1 | {root}/source1
The ``{root}`` in the instruction file would be string formatted using ``paths``. Therefore, the function would parse the instruction as:
.. code-block:: md
destination1 | /User/root/source1
Example
-------
The following code would use instruction files ``file1`` and ``file2`` to create copies.
.. code-block:: python
copy_inputs(paths, ['file1', 'file2'])
Suppose instruction file ``file1`` contained the following text.
.. code-block:: md
destination1 | source1
destination2 | source2
Copies ``destination1`` and ``destination1`` would be created in directory ``paths['input_dir']``. Their targets would be ``source1`` and ``source2``, respectively.
Example
-------
Suppose you have the following targets.
.. code-block:: md
source1
source2
source3
Specifying ``destination* | source*`` in one of your instruction files would create the following copies in ``paths['input_dir']``.
.. code-block:: md
destination1
destination2
destination3
"""
try:
paths['move_dir'] = get_path(paths, 'input_dir')
source_map = _create_copies(paths, file_list)
message = 'Input copies successfully created!'
write_to_makelog(paths, message)
print(colored(message, metadata.color_success))
return(source_map)
except:
error_message = 'An error was encountered with `copy_inputs`. Traceback can be found below.'
error_message = format_message(error_message)
write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc())
raise_from(ColoredError(error_message, traceback.format_exc()), None) | 5,324,607 |
def validate_api_key_id(api_key_id):
"""
>>> validate_api_key_id('3ddaeeb10ca690df3fee5e3bd1c329fa')
>>> validate_api_key_id('3ddaeeb10ca690df3f') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
VeracodeCredentialsError: API key ... is 18 characters,
which is not long enough. The API key should be at least 32 characters
>>> validate_api_key_id('0123456789abcdef'*128) # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
VeracodeCredentialsError: API key ... is 2048 characters,
which is too long. The API key should not be more than 128 characters
>>> validate_api_key_id('ZXHQddaeeb10ca690df3fee5e3bd1c329f') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
VeracodeCredentialsError: API key ... does not seem to be hexadecimal
"""
api_key_id_minimum_length = 32
api_key_id_maximum_length = 128
if len(api_key_id) < api_key_id_minimum_length:
raise VeracodeCredentialsError(
'API key {key} is {key_length} characters, which is not '
'long enough. The API key should be at least {minimum_length} '
'characters'.format(key=api_key_id, key_length=len(api_key_id),
minimum_length=api_key_id_minimum_length))
if len(api_key_id) > api_key_id_maximum_length:
raise VeracodeCredentialsError(
'API key {key} is {key_length} characters, which is too '
'long. The API key should not be more than {maximum_length} '
'characters'.format(key=api_key_id, key_length=len(api_key_id),
maximum_length=api_key_id_maximum_length))
if not validate_hex(api_key_id):
raise VeracodeCredentialsError(
'API key {} does not seem to be hexadecimal'.format(api_key_id)) | 5,324,608 |
def agaricus_lepiota() -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[ALFeature]]:
"""
Source:
https://archive.ics.uci.edu/ml/datasets/Mushroom
The function requires the file 'agaricus-lepiota.data' to be in the current
folder.
Returns the loaded dataset as a tuple of NumPy arrays, where the first contains
(for each sample) the concatenation of all the features (which are one-hot encoded),
the second contains (for each sample) the raw features as integer numbers and the
the third contains (for each sample) the labels (0 <=> edible, 1 <=> poisonous).
Also returns a list which contains a small description of each feature as a
`ALFeature` object.
"""
FEATURES_NAMES = [
'cap-shape',
'cap-surface',
'cap-color',
'bruises?',
'odor',
'gill-attachment',
'gill-spacing',
'gill-size',
'gill-color',
'stalk-shape',
'stalk-root',
'stalk-surface-above-ring',
'stalk-surface-below-ring',
'stalk-color-above-ring',
'stalk-color-below-ring',
'veil-type',
'veil-color',
'ring-number',
'ring-type',
'spore-print-color',
'population',
'habitat'
]
MISSING_FEATURE_LETTER = '?'
FEATURES_LETTERS = [
['b', 'c', 'x', 'f', 'k', 's'],
['f', 'g', 'y', 's'],
['n', 'b', 'c', 'g', 'r', 'p', 'u', 'e', 'w', 'y'],
['t', 'f'],
['a', 'l', 'c', 'y', 'f', 'm', 'n', 'p', 's'],
['a', 'd', 'f', 'n'],
['c', 'w', 'd'],
['b', 'n'],
['k', 'n', 'b', 'h', 'g', 'r', 'o', 'p', 'u', 'e', 'w', 'y'],
['e', 't'],
['b', 'c', 'u', 'e', 'z', 'r'],
['f', 'y', 'k', 's'],
['f', 'y', 'k', 's'],
['n', 'b', 'c', 'g', 'o', 'p', 'e', 'w', 'y'],
['n', 'b', 'c', 'g', 'o', 'p', 'e', 'w', 'y'],
['p', 'u'],
['n', 'o', 'w', 'y'],
['n', 'o', 't'],
['c', 'e', 'f', 'l', 'n', 'p', 's', 'z'],
['k', 'n', 'b', 'h', 'r', 'o', 'u', 'w', 'y'],
['a', 'c', 'n', 's', 'v', 'y'],
['g', 'l', 'm', 'p', 'u', 'w', 'd'],
]
LABEL_LETTERS = ['e', 'p']
assert len(FEATURES_LETTERS) == 22
assert len(LABEL_LETTERS) == 2
# compute number of features after one-hot encoding
Xn = sum([len(c) for c in FEATURES_LETTERS])
X = [] # one-hot encoded feature vectors
Xr = [] # raw feature vectors
Y = [] # labels
with open("agaricus-lepiota.data") as f:
for line in f:
label_letter, *features_letters = line.removesuffix('\n').split(',')
assert type(label_letter) == str
assert type(features_letters) == list
assert label_letter in LABEL_LETTERS
y = LABEL_LETTERS.index(label_letter)
x = np.zeros(Xn, dtype=float)
xr = np.zeros(22, dtype=int)
idx = 0 # track the starting index of the current feature in the one-hot encoded vector
for i, feature_letter in enumerate(features_letters):
assert i < 22
assert feature_letter in FEATURES_LETTERS[i] or feature_letter == MISSING_FEATURE_LETTER
if feature_letter != MISSING_FEATURE_LETTER:
feature_value = FEATURES_LETTERS[i].index(feature_letter)
# one-hot encode the feature value into the feature vector X only if it is not
# missing (if it is missing, the one-hot vector associated to this feature is left
# at zero in all of its components)
x[idx + feature_value] = 1.0
# store the raw feature
xr[i] = feature_value
else:
# if the feature is missing, the one-hot encoding is correct (i.e. all components
# are left as zero), but the raw encoding must be handled separately: here we set
# the value to -1 to represet the fact that the feature is missing with another
# category
xr[i] = -1
idx += len(FEATURES_LETTERS[i])
X.append(x)
Xr.append(xr)
Y.append(y)
assert len(X) == len(Y)
assert len(Xr) == len(Y)
fds = [ALFeature(name, len(fl)) for name, fl in zip(FEATURES_NAMES, FEATURES_LETTERS)]
return np.array(X, dtype=float), np.array(Xr, dtype=int), np.array(Y), fds | 5,324,609 |
def receiveVoteFromLowLevel():
"""
input -> params which is received from lower level in the hierarchy
return -> 200, 400
params = {
string: int
"candidate_id_1": num_votes,
"candidate_id_2": num_votes,
"candidate_id_3": num_votes,
"batch_id": unique_int,
"cluster_id": cluster_id_int,
"level_number": level_number_int,
...
}
"""
params = request.get_json()
# logging.debug("Data {} received from lower level with IP = {}".format(params, request.remote_addr))
# select a random orderer from the orderer_ip_list to forward the votedata received from lower level using params
rand_ord_ip = random.choice(orderer_ip_list)
res = requests.post("http://" + rand_ord_ip + ":" + str(orderer_port) + "/api/orderer/receiveFromBCNode", json=params)
if res.status_code != 200:
logging.error("Vote data forwarding to random orderer failed!")
return make_response("vote error occurred", 400)
else:
# logging.info("Vote data forwarded to random orderer with IP = {}".format(rand_ord_ip))
return make_response("vote successfully forwarded to orderer", 200) | 5,324,610 |
def test_cli_run_as_entrypoint():
"""Test the entrypoint script can be called.
Allows verification that CLI tool is correctly installed? (via setup.py)
"""
# Note - the script tool name is set in the entry_points section of setup.py
# TODO add a documentation cross ref here
out = subprocess.run([TOOL, "--help"], check=True) # nosec
assert out.returncode == 0 | 5,324,611 |
def patch286() -> PatchDiscriminator:
"""
Patch Discriminator from pix2pix
"""
return PatchDiscriminator([64, 128, 256, 512, 512, 512]) | 5,324,612 |
def best_exons(fp_in,fp_out,rank_by='log2_fold_change',
probeset_col=0,gene_symbol_col=1,log2_fold_change_col=12,
p_value_col=13):
"""Read exon data from file, find 'best' exons & output averaged data
This function performs the 'best_exons' procedure: it reads exon
data from a file, finds the 'best' exons for each gene symbol, and
outputs averaged data for each gene symbol to a second file.
Assumuptions are that the input file consists of tab separated values
(with the first line optionally being a header line), and that the
following column positions correspond to the data items below:
Column 0: probeset name
Column 1: gene symbol
Column 12: log2 fold change
Column 13: p-value
(Columns numbered from 0.) These defaults can be changed using the
appropriate function arguments.
The final column of the output file is a flag indicating gene symbols
for which there are only 4 exons or less in the input file.
'rank_by' selects the criterion used to rank the exons. Possible
values are any that are recognised by the 'best_exons' method of the
ExonList class (currently only 'log2_fold_change' and 'p_value').
Arguments:
fp_in: file object for input file (must be opened for reading)
fp_out: file object for output file (must be opened for writing)
rank_by: (optional) criterion used to rank the exons
"""
# Dictionary to store gene symbols
gene_symbols = OrderedDictionary()
# Report lookup for specific columns
print("Column assignments (numbered from zero):")
print("* Probe set : column %2d (%s column)" % (probeset_col,
ordinal(probeset_col+1)))
print("* Gene symbol : column %2d (%s column)" % (gene_symbol_col,
ordinal(gene_symbol_col+1)))
print("* Log2 fold change: column %2d (%s column)" % (log2_fold_change_col,
ordinal(log2_fold_change_col+1)))
print("* P-value : column %2d (%s column)" % (p_value_col,
ordinal(p_value_col+1)))
# Test if first line of file is a header line
first_line_is_header = False
header_line = None
for line in TabFileIterator(fp=fp_in):
break
try:
# Try to populate an Exon object as a test
Exon(line[probeset_col],
line[gene_symbol_col],
log2_fold_change=line[log2_fold_change_col],
p_value=line[p_value_col])
except ValueError:
first_line_is_header = True
header_line = str(line)
# Read data from file
for line in TabFileIterator(fp=fp_in):
##print("%s" % line)
if first_line_is_header:
# Skip first line
first_line_is_header = False
continue
# Process data
gene_symbol = line[gene_symbol_col]
if gene_symbol not in gene_symbols:
logging.debug("Gene symbol: %s" % gene_symbol)
gene_symbols[gene_symbol] = ExonList(gene_symbol)
gene_symbols[gene_symbol].add_exon(
Exon(line[probeset_col],
gene_symbol,
log2_fold_change=line[log2_fold_change_col],
p_value=line[p_value_col],
data=[x for x in line])
)
# Write output header line
if header_line is not None:
header_line = header_line.split('\t')
header_line.append("Less than 4 exons")
del(header_line[probeset_col])
fp_out.write("%s\n" % tsv_line(header_line))
# Iterate through gene symbols and find 'best' exons
for gene_symbol in gene_symbols:
# Sort by log2FoldChange
logging.debug("*** Processing %s ***" % gene_symbol)
exon_list = gene_symbols[gene_symbol]
# Fetch best exons (i.e. 'top' three) based on log2 fold change
best_exons = exon_list.best_exons(rank_by,n=3)
logging.debug("Top exons ranked by %s" % rank_by)
for exon in best_exons:
logging.debug("%s" % exon)
# Average data values and write to file
line = best_exons.average()
if len(exon_list) < 4:
logging.warning("Less than 4 exons for gene symbol '%s'" % gene_symbol)
line.append('*')
logging.debug("%s" % tsv_line(best_exons.average()))
line[gene_symbol_col] = exon_list.gene_symbol
del(line[probeset_col])
logging.debug("%s" % tsv_line(line))
fp_out.write("%s\n" % tsv_line(line)) | 5,324,613 |
def prefix(m):
"""Given a NFA `m`, construct a new NFA that accepts all prefixes of
strings accepted by `m`.
"""
if not m.is_finite():
raise ValueError('m must be a finite automaton')
f = set(m.get_accept_states())
size = None
while len(f) != size:
size = len(f)
for t in m.get_transitions():
[[q], [a]], [[r]] = t.lhs, t.rhs
if r in f:
f.add(q)
mp = machines.FiniteAutomaton()
mp.set_start_state(m.get_start_state())
for t in m.get_transitions():
mp.add_transition(t)
mp.add_accept_states(f)
return mp | 5,324,614 |
def _predict_k_neighbors(estimator, X):
"""Predict using a k-nearest neighbors estimator."""
X = estimator._validate_data(X, reset=False)
neigh_dist, neigh_ind = estimator.kneighbors(X)
neigh_Y = estimator._y[neigh_ind]
neigh_weights = _get_weights(neigh_Y, neigh_dist, estimator.weights)
return _aggregate_neighbors(estimator, neigh_Y, neigh_weights) | 5,324,615 |
def _loop_over(var):
""" Checks if a variable is in the form of an iterable (list/tuple)
and if not, returns it as a list. Useful for allowing argument
inputs to be either lists (e.g. [1, 3, 4]) or single-valued (e.g. 3).
Parameters
----------
var : int or float or list
Variable to check for iterability.
Returns
-------
var : list
Variable converted to list if single-valued input.
"""
if hasattr(var,"__iter__"):
return var
else:
return [var] | 5,324,616 |
def _get_servings_rest():
"""
Makes a REST request to Hopsworks to get a list of all servings in the current project
Returns:
JSON response parsed as a python dict
Raises:
:RestAPIError: if there was an error with the REST call to Hopsworks
"""
method = constants.HTTP_CONFIG.HTTP_GET
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_SERVING_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER)
response = util.send_request(method, resource_url)
response_object = response.json()
if response.status_code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise exceptions.RestAPIError("Could not fetch list of servings from Hopsworks REST API (url: {}), "
"server response: \n "
"HTTP code: {}, HTTP reason: {}, error code: {}, "
"error msg: {}, user msg: {}".format(resource_url, response.status_code, response.reason,
error_code, error_msg, user_msg))
return response_object | 5,324,617 |
def start():
"""
Starts ser2sock
"""
try:
sh.ser2sock('-d', _bg=True)
except sh.CommandNotFound, err:
raise NotFound('Could not locate ser2sock.') | 5,324,618 |
def transform_database_account_resources(
account_id: Any, name: Any, resource_group: Any, resources: List[Dict],
) -> List[Dict]:
"""
Transform the SQL Database/Cassandra Keyspace/MongoDB Database/Table Resource response for neo4j ingestion.
"""
for resource in resources:
resource['database_account_name'] = name
resource['database_account_id'] = account_id
resource['resource_group_name'] = resource_group
return resources | 5,324,619 |
def test_mypy_passes(cookies, context_override):
"""Generated project should pass mypy."""
sh.make("mypy") | 5,324,620 |
def jacsim(doc1, doc2, docsAsShingleSets,sign_matrix):
""" Jaccard Similarity.
:param doc1: First doc to be compared
:param doc2: Second doc to be comapred
:param docsAsShingleSets: Document wise shingles
:param sign_matrix: The Signature matrix
:return: The jaccard similarity value
"""
document1 = sign_matrix[:,doc1]
document2 = sign_matrix[:,doc2]
intersect = sum(bool(x) for x in np.logical_and(document1, document2))
return (intersect / len(document1)) | 5,324,621 |
def train(model, loss_function, optimizer, train_loader, val_loader=None, max_epochs=100, verbose=False, debug=False):
"""
Helper method for training a model.
Args:
model: PyTorch model.
loss_function: Loss function.
optimizer: Loss function.
train_loader: pytorch DataLoader for the training set.
val_loader: pytorch DataLoader for the validation set.
max_epochs: maximum number of iterations over the training set.
verbose: print training information
debug: print debugging information, if verbose is True
Returns:
list: List of losses per epoch.
"""
# Stores the loss at the end of each epoch
losses = []
# If we have validation data, use it
if val_loader is not None:
losses.append(validate(model=model, loss_function=loss_function, validation_loader=val_loader))
# Else use an EWMA of the batch losses
loss_eav = 0
eav_lambda = 0.2
for epoch in range(max_epochs):
for batch_num, sample_batch in enumerate(train_loader):
batch_x, batch_target = sample_batch
output = model(batch_x)
loss = loss_function(output, batch_target.unsqueeze(1))
# Calculate the eav of batch loss
loss_eav += eav_lambda * loss - eav_lambda * loss_eav
# zero the gradient buffers
optimizer.zero_grad()
# Store the gradient of loss function on the leaf nodes
loss.backward()
# Does the update
optimizer.step()
if val_loader is not None:
epoch_loss = validate(model=model, loss_function=loss_function, validation_loader=val_loader)
else:
epoch_loss = loss_eav.clone()
losses.append(epoch_loss)
# epoch_print = DEBUG and (not (epoch % int(num_epochs / 10)))
if verbose:
print('\n')
print('Epoch {}'.format(epoch))
print('Loss: {}'.format(epoch_loss))
if verbose and debug:
layer = model.fc3
print('Weight:')
print(layer.weight)
# w = layer.weight.detach().numpy()
# print(w.shape)
# print(numpy.sum(w, axis=1))
# plt.hist(w.flatten())
# plt.show()
print(layer.bias.data)
print('Gradient:')
print(layer.weight.grad)
print(layer.bias.grad)
if verbose:
print('\n')
print('Initial loss: {:.3f}'.format(losses[0]))
print('Final loss: {:.3f}'.format(losses[-1]))
return losses | 5,324,622 |
def recent_stream(model_name=None, filter=None):
""" return a dict, key the model name of the most recent stream, with stage and date
model_name : str | None
the full name of the model, e.g. P302_8years/uw8000. If None, use the current folder path
"""
if model_name is None: model_name='/'.join(os.getcwd().split('/')[-2:])
sinfo = StreamInfo(model_name)
sdf = pd.DataFrame(sinfo).T
# select last one for each model
recent = dict()
for model,s in zip(sdf.model, sdf.index):
m = model.split('/')[-1]
if filter is not None and not filter(m): continue
date = sdf.ix[s].date
stage = sdf.ix[s].stage
job_list= sdf.ix[s].job_list
if m not in recent: recent[m]=dict(stream=s, date=date, stage=stage, job_list=job_list)
else: recent[m].update(stream=s, date=date,stage=stage, job_list=job_list)
return recent | 5,324,623 |
def batch_tile_redshifts(tileid, exptable, group, spectrographs=None,
submit=False, queue='realtime', reservation=None,
dependency=None, system_name=None, run_zmtl=False,
noafterburners=False):
"""
Generate batch script for spectra+coadd+redshifts for a tile
Args:
tileid (int): Tile ID
exptable (Table): has columns NIGHT EXPID to use; ignores other columns.
Doesn't need to be full pipeline exposures table (but could be)
group (str): cumulative, pernight, perexp, or a custom name
Options:
spectrographs (list of int): spectrographs to include
submit (bool): also submit batch script to queue
queue (str): batch queue name
reservation (str): batch reservation name
dependency (str): passed to sbatch --dependency upon submit
system_name (str): batch system name, e.g. cori-haswell, perlmutter-gpu
run_zmtl (bool): if True, also run make_zmtl_files
noafterburners (bool): if True, do not run QSO afterburners
Returns tuple (scriptpath, error):
scriptpath (str): full path to generated script
err (int): return code from submitting job (0 if submit=False)
By default this generates the script but don't submit it
"""
log = get_logger()
if spectrographs is None:
spectrographs = (0,1,2,3,4,5,6,7,8,9)
if (group == 'perexp') and len(exptable)>1:
msg = f'group=perexp requires 1 exptable row, not {len(exptable)}'
log.error(msg)
raise ValueError(msg)
nights = np.unique(np.asarray(exptable['NIGHT']))
if (group in ['pernight', 'pernight-v0']) and len(nights)>1:
msg = f'group=pernight requires all exptable rows to be same night, not {nights}'
log.error(msg)
raise ValueError(msg)
tileids = np.unique(np.asarray(exptable['TILEID']))
if len(tileids)>1:
msg = f'batch_tile_redshifts requires all exptable rows to be same tileid, not {tileids}'
log.error(msg)
raise ValueError(msg)
elif len(tileids) == 1 and tileids[0] != tileid:
msg = f'Specified tileid={tileid} didnt match tileid given in exptable, {tileids}'
log.error(msg)
raise ValueError(msg)
spectro_string = ' '.join([str(sp) for sp in spectrographs])
num_nodes = len(spectrographs)
frame_glob = list()
for night, expid in zip(exptable['NIGHT'], exptable['EXPID']):
frame_glob.append(f'exposures/{night}/{expid:08d}/cframe-[brz]$SPECTRO-{expid:08d}.fits')
#- Be explicit about naming. Night should be the most recent Night.
#- Expid only used for labeling perexp, for which there is only one row here anyway
night = np.max(exptable['NIGHT'])
expid = np.min(exptable['EXPID'])
frame_glob = ' '.join(frame_glob)
batchscript = get_tile_redshift_script_pathname(
tileid, group, night=night, expid=expid)
batchlog = batchscript.replace('.slurm', r'-%j.log')
scriptdir = os.path.split(batchscript)[0]
os.makedirs(scriptdir, exist_ok=True)
outdir = get_tile_redshift_relpath(tileid, group, night=night, expid=expid)
suffix = get_tile_redshift_script_suffix(
tileid, group, night=night, expid=expid)
jobname = f'redrock-{suffix}'
write_redshift_script(batchscript, outdir,
jobname=jobname,
num_nodes=num_nodes,
group=group,
spectro_string=spectro_string, suffix=suffix,
frame_glob=frame_glob,
queue=queue, system_name=system_name,
onetile=True, tileid=tileid, night=night,
run_zmtl=run_zmtl, noafterburners=noafterburners)
err = 0
if submit:
cmd = ['sbatch' ,]
if reservation:
cmd.extend(['--reservation', reservation])
if dependency:
cmd.extend(['--dependency', dependency])
# - sbatch requires the script to be last, after all options
cmd.append(batchscript)
err = subprocess.call(cmd)
basename = os.path.basename(batchscript)
if err == 0:
log.info(f'submitted {basename}')
else:
log.error(f'Error {err} submitting {basename}')
return batchscript, err | 5,324,624 |
def symplot(b,
max_m = 20,
max_n = 20,
ymin = None,
sqrts = False,
log = True,
B0 = True,
helical_detail = False,
legend_args = {"loc":"best"},
**kwargs):
"""
Plot the radial variation of all the Fourier modes of :math:`|B|`
in Boozer coordinates. Color is used to group modes with
:math:`m=0` and/or :math:`n=0`.
Args:
b (Booz_xform, str): The Booz_xform instance to plot,
or a filename of a boozmn_*.nc file.
max_m (int): Maximum poloidal mode number to include in the plot.
max_n (int): Maximum toroidal mode number (divided by nfp) to include in the plot.
ymin (float): Lower limit for the y-axis. Only used if ``log==True``.
sqrts (bool): If true, the x axis will be sqrt(toroidal flux) instead of toroidal flux.
log (bool): Whether to use a logarithmic y axis.
B0 (bool): Whether to include the m=n=0 mode in the figure.
helical_detail (bool): Whether to show modes with ``n = nfp * m`` and
``n = -nfp * m`` in a separate color.
legend_args (dict): Any arguments to pass to ``plt.legend()``.
Useful for setting the legend font size and location.
kwargs: Any additional key-value pairs to pass to matplotlib's ``plot`` command.
This function can generate figures like this:
.. image:: symplot1.png
:width: 400
.. image:: symplot2.png
:width: 400
"""
b = handle_b_input(b)
background_color = 'b'
QA_color = [0, 0.7, 0]
mirror_color = [0.7, 0.5, 0]
helical_color = [1, 0, 1]
helical_plus_color = 'gray'
helical_minus_color = 'c'
# If ymin is not specified, pick a default value such that the
# plot mostly shows the largest modes, not all the modes down to
# machine precision.
if ymin is None:
ymin = np.max(b.bmnc_b) * 1e-4
mnmax = len(b.xm_b)
if sqrts:
rad = np.sqrt(b.s_b)
else:
rad = b.s_b
def my_abs(x):
if log:
return np.abs(x)
else:
return x
# Draw a reference line at 0.
if not log:
plt.plot([0, 1], [0, 0], ':k')
# First, plot just the 1st mode of each type, so the legend looks nice.
if B0:
for imode in range(mnmax):
if b.xn_b[imode] == 0 and b.xm_b[imode] == 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=background_color,
label='m = 0, n = 0 (Background)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] == 0 and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=QA_color,
label=r'm $\ne$ 0, n = 0 (Quasiaxisymmetric)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] != 0 and b.xm_b[imode] == 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=mirror_color,
label=r'm = 0, n $\ne$ 0 (Mirror)', **kwargs)
break
if helical_detail:
for imode in range(mnmax):
if b.xn_b[imode] == b.xm_b[imode] * b.nfp and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_plus_color,
label=r'n = n$_{fp}$ m (Helical)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] == -b.xm_b[imode] * b.nfp and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_minus_color,
label=r'n = -n$_{fp}$ m (Helical)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] != 0 and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_color,
label=r'Other helical', **kwargs)
break
else:
for imode in range(mnmax):
if b.xn_b[imode] != 0 and b.xm_b[imode] != 0 \
and b.xn_b[imode] != b.xm_b[imode] * b.nfp and b.xn_b[imode] != -b.xm_b[imode] * b.nfp:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_color,
label=r'm $\ne$ 0, n $\ne$ 0 (Helical)', **kwargs)
break
plt.legend(**legend_args)
# Now that the legend is made, plot all modes
for imode in range(mnmax):
if np.abs(b.xm_b[imode]) > max_m:
continue
if np.abs(b.xn_b[imode]) > max_n * b.nfp:
continue
if b.xn_b[imode] == 0:
if b.xm_b[imode] == 0:
mycolor = background_color
if not B0:
continue
else:
mycolor = QA_color
else:
if b.xm_b[imode] == 0:
mycolor = mirror_color
else:
# The mode is helical
if helical_detail:
if b.xn_b[imode] == b.xm_b[imode] * b.nfp:
mycolor = helical_plus_color
elif b.xn_b[imode] == -b.xm_b[imode] * b.nfp:
mycolor = helical_minus_color
else:
mycolor = helical_color
else:
mycolor = helical_color
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=mycolor, **kwargs)
if sqrts:
plt.xlabel('$r/a$ = sqrt(Normalized toroidal flux)')
else:
plt.xlabel('$s$ = Normalized toroidal flux')
plt.title('Fourier harmonics of |B| in Boozer coordinates [Tesla]')
plt.xlim([0, 1])
if log:
plt.yscale("log")
plt.gca().set_ylim(bottom=ymin) | 5,324,625 |
def get_holistic_keypoints(
frames, holistic=mp_holistic.Holistic(static_image_mode=False, model_complexity=2)
):
"""
For videos, it's optimal to create with `static_image_mode=False` for each video.
https://google.github.io/mediapipe/solutions/holistic.html#static_image_mode
"""
keypoints = []
confs = []
for frame in frames:
results = holistic.process(frame)
body_data, body_conf = process_body_landmarks(
results.pose_landmarks, N_BODY_LANDMARKS
)
face_data, face_conf = process_other_landmarks(
results.face_landmarks, N_FACE_LANDMARKS
)
lh_data, lh_conf = process_other_landmarks(
results.left_hand_landmarks, N_HAND_LANDMARKS
)
rh_data, rh_conf = process_other_landmarks(
results.right_hand_landmarks, N_HAND_LANDMARKS
)
data = np.concatenate([body_data, face_data, lh_data, rh_data])
conf = np.concatenate([body_conf, face_conf, lh_conf, rh_conf])
keypoints.append(data)
confs.append(conf)
# TODO: Reuse the same object when this issue is fixed: https://github.com/google/mediapipe/issues/2152
holistic.close()
del holistic
gc.collect()
keypoints = np.stack(keypoints)
confs = np.stack(confs)
return keypoints, confs | 5,324,626 |
def get_arp_info(pkt):
"""
Break the ARP packet into its components.
"""
if len(pkt) < 8:
raise ARPError("ARP header too short")
ar_hrd, ar_pro, ar_hln, ar_pln, ar_op = struct.unpack("!HHBBH", pkt[0:8])
pkt_len = 8+(2*ar_hln)+(2*ar_pln)
if len(pkt) < pkt_len:
raise ARPError("ARP packet too short")
ofs = 8
ar_sha = pkt[ofs:ofs+ar_hln]
ofs += ar_hln
ar_spa = pkt[ofs:ofs+ar_pln]
ofs += ar_pln
ar_tha = pkt[ofs:ofs+ar_hln]
ofs += ar_hln
ar_tpa = pkt[ofs:ofs+ar_pln]
ofs += ar_pln
return (ar_hrd, ar_pro, ar_hln, ar_pln,
ar_op, ar_sha, ar_spa, ar_tha, ar_tpa) | 5,324,627 |
def update_ref_point(ref_point, fy):
"""
Update the reference point by an offspring
parameter
----------
ref_point: 1D-Array
the position of original reference point
fy: 1D-Array
the fitness values of the offspring
return
----------
1D-Array
the position of the updated reference point
"""
tmp = np.vstack([ref_point, fy])
return np.min(tmp, axis=0) | 5,324,628 |
def parse(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Parse sentence *datum* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
datum (str): the sentence to parse
**kwargs: additional keyword arguments to pass to the ACEParser
Returns:
:class:`~delphin.interface.Response`
Example:
>>> response = ace.parse('erg.dat', 'Dogs bark.')
NOTE: parsed 1 / 1 sentences, avg 797k, time 0.00707s
"""
return next(parse_from_iterable(grm, [datum], **kwargs)) | 5,324,629 |
def EVLAPolCal(uv, InsCals, err, InsCalPoln=None, \
doCalib=2, gainUse=0, doBand=1, BPVer=0, flagVer=-1, \
solType=" ", fixPoln=False, avgIF=False, \
solInt=0.0, refAnt=0, ChInc=1, ChWid=1, \
doFitRL=False, doFitOri=True,
check=False, debug = False, \
nThreads=1, noScrat=[], logfile = ""):
"""
Instrumental Polarization
Do Instrumental
Instrumental cal uses PCal
Returns task error code, 0=OK, else failed
* uv = UV data object to calibrate
* InsCals = Instrumental poln calibrators, name or list of names
If None no instrumental cal
* err = Obit error/message stack
* InsCalPoln if non None then the list of source parameters as
tuples in the order of calibrators in InsCals,
(PPol, RLPhase, RM)
PPol = fractional poln, <0 => fit
RLPhase = R-L phase difference in deg
RM = Rotation measure
* doCalib = Apply prior calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = >0 => apply bandpass calibration
* BPVer = AIPS BP table to apply
* flagVer = Input Flagging table version
* solType = solution type, " ", "LM "
* fixPoln = if True, don't solve for source polarization in ins. cal
assumed 0, ignored if InsCalPoln given
* avgIF = NYI if True, average IFs in ins. cal.
* solInt = instrumental solution interval (min)
* refAnt = Reference antenna
* ChInc = channel increment for solutions
* ChWid = number of channels to average for solution.
* doFitRL = Fit R-L (or X-Y) gain phase
* doFitOri = Fit (linear feed) orientations?
* nThreads = Number of threads to use in imaging
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
# Don't bother if not full polarization
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
mess = "Skip Instrumental polarization corrections - not full stokes"
printMess(mess, logfile)
return 0
mess = "Instrumental polarization calibration "
printMess(mess, logfile)
# Instrumental calibration
if InsCals!=None:
pcal = ObitTask.ObitTask("PCal")
try:
pcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
pcal.logFile = logfile
if not check:
setname(uv,pcal)
if type(InsCals)==list:
pcal.Sources = InsCals
pcal.doFitI[0] = True
else:
pcal.Sources = [InsCals]
i = 0
for s in InsCals:
pcal.doFitI[i] = True
i += 1
# Polarization fixed?
if InsCalPoln:
if type(InsCals)==list:
n = len(InsCals)
else:
n = 1
for i in range(0,n):
if InsCalPoln[i][0]>=0.0:
pcal.doFitPol[i] = False
pcal.PPol[i] = InsCalPoln[i][0]
pcal.RLPhase[i] = InsCalPoln[i][1]
pcal.RM[i] = InsCalPoln[i][2]
else:
pcal.doFitPol[i] = True
elif fixPoln:
if type(InsCals)==list:
i = 0
for s in InsCals:
pcal.doFitPol[i] = False
i += 1
else:
pcal.doFitPol[0] = False
pcal.doCalib = doCalib
pcal.gainUse = gainUse
pcal.doBand = doBand
pcal.BPVer = BPVer
pcal.flagVer = flagVer
pcal.solnType = solType
pcal.solInt = solInt
pcal.ChInc = ChInc
pcal.ChWid = ChWid
pcal.refAnt = refAnt
pcal.doFitRL = doFitRL
pcal.doFitOri = doFitOri
pcal.prtLv = 2
pcal.PDSoln = 1
pcal.CPSoln = 1
pcal.nThreads = nThreads
for i in range(0,len(pcal.doFitI)):
pcal.doFitI[i] = True
pcal.taskLog = logfile
i = 1;
for d in noScrat:
pcal.noScrat[i] = d
i += 1
if debug:
pcal.i
pcal.debug = debug
# Trap failure
try:
if not check:
pcal.g
except Exception as exception:
print(exception)
mess = "PCal Failed retCode="+str(pcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end instrumental poln cal
return 0
# End EVLAPolCal | 5,324,630 |
def harvest(ctx, mode, reset=False, no_promote=False, version=None):
"""
Starts a harvest tasks on the AWS container cluster or localhost
"""
command = ["python", "manage.py", "run_harvest"]
if reset:
command += ["--reset"]
if no_promote:
command += ["--no-promote"]
run_harvester_task(ctx, mode, command, version=version, extra_workers=reset) | 5,324,631 |
def check_and_store(codechecker_cfg, test_project_name, test_project_path,
clean_project=True):
"""
Check a test project and store the results into the database.
:checkers parameter should be a list of enabled or disabled checkers
Example: ['-d', 'deadcode.DeadStores']
"""
output_dir = codechecker_cfg['reportdir'] \
if 'reportdir' in codechecker_cfg \
else os.path.join(codechecker_cfg['workspace'], 'reports')
build_cmd = project.get_build_cmd(test_project_path)
if clean_project:
ret = project.clean(test_project_path)
if ret:
return ret
check_cmd = ['CodeChecker', 'check',
'-o', output_dir,
'-b', build_cmd,
'--quiet']
suppress_file = codechecker_cfg.get('suppress_file')
if suppress_file:
check_cmd.extend(['--suppress', suppress_file])
skip_file = codechecker_cfg.get('skip_file')
if skip_file:
check_cmd.extend(['--skip', skip_file])
clean = codechecker_cfg.get('clean')
if clean:
check_cmd.extend(['--clean'])
analyzer_config = codechecker_cfg.get('analyzer_config')
if analyzer_config:
check_cmd.append('--analyzer-config')
check_cmd.extend(analyzer_config)
check_cmd.extend(codechecker_cfg['checkers'])
try:
print("RUNNING CHECK")
print(check_cmd)
subprocess.call(
check_cmd,
cwd=test_project_path,
env=codechecker_cfg['check_env'],
encoding="utf-8",
errors="ignore")
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode
store_cmd = ['CodeChecker', 'store', '-n', test_project_name,
output_dir,
'--url', env.parts_to_url(codechecker_cfg)]
tag = codechecker_cfg.get('tag')
if tag:
store_cmd.extend(['--tag', tag])
force = codechecker_cfg.get('force')
if force:
store_cmd.extend(['--force'])
description = codechecker_cfg.get('description')
if description:
store_cmd.extend(['--description', "'" + description + "'"])
try:
print('STORE' + ' '.join(store_cmd))
subprocess.call(
shlex.split(
' '.join(store_cmd)),
cwd=test_project_path,
env=codechecker_cfg['check_env'],
encoding="utf-8",
errors="ignore")
return 0
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode | 5,324,632 |
def ansyArticle(data):
"""分析错误日志"""
ids=[]
with open(data,'r') as fp:
i=0
for line in fp.readlines():
i+=1
if i%5==1:
m=re.search('view_(\d+)\.aspx',line)
ids.append(m.group(1))
else:
continue
return ids | 5,324,633 |
def climb_stairs(n):
""" Number of paths to climb n stairs if each move comprises of climbing 1
or 2 steps.
Args:
n integer
Returns:
integer
Preconditions:
n >= 0
"""
return fib(n) | 5,324,634 |
def randomInt(bit_length, seed):
"""Returns a random integer."""
s = randomHexString((bit_length + 3) / 4, seed)
return int(s, 16) % (1 << bit_length) | 5,324,635 |
def test_logout(mock_log_out, client, session):
"""
Test the GET method of the logout endpoint.
"""
resp = client.get('/logout', follow_redirects=True)
mock_log_out.assert_called_once()
assert resp.status_code == 200
assert request.endpoint == 'main.home' | 5,324,636 |
def config_init():
"""Add configuration options to weechat."""
global KEEP_ALIVE_TIMEOUT
config = {
"hide_inactive": ("off", "Hide inactive buffers"),
"hide_private": ("off", "Hide private buffers"),
"unhide_low": ("off",
"Unhide a buffer when a low priority message (like JOIN, PART, etc.) has been received"),
"exemptions": ("", "An enumeration of buffers that should not get hidden"),
"keep_open": ("off", "Keep a buffer open for a short amount of time"),
"keep_open_timeout": ("60 * 1000", "Timeout in milliseconds for how long a selected buffer should be kept around"),
}
for option, default_value in config.items():
if weechat.config_get_plugin(option) == "":
weechat.config_set_plugin(option, default_value[0])
weechat.config_set_desc_plugin(
option, '{} (default: "{}")'.format(default_value[1], default_value[0]))
weechat.hook_config("plugins.var.python.buffer_autohide.keep_open_timeout", "timeout_config_changed_cb", "")
if weechat.config_is_set_plugin("keep_open_timeout"):
KEEP_ALIVE_TIMEOUT = eval_expr(weechat.config_get_plugin("keep_open_timeout")) | 5,324,637 |
def best_fit_distribution(data, bins=200, ax=None):
"""Find the best fitting distribution to the data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,
st.anglit,
st.arcsine,
st.argus,
st.beta,
st.betaprime,
st.bradford,
st.burr,
st.burr12,
st.cauchy,
st.chi,
st.chi2,
st.cosine,
st.crystalball,
st.dgamma,
st.dweibull,
st.erlang,
st.expon,
st.exponnorm,
st.exponweib,
st.exponpow,
st.f,
st.fatiguelife,
st.fisk,
st.foldcauchy,
st.foldnorm,
st.genlogistic,
st.gennorm,
st.genpareto,
st.genexpon,
st.genextreme,
st.gausshyper,
st.gamma,
st.gengamma,
st.genhalflogistic,
st.geninvgauss,
st.gilbrat,
st.gompertz,
st.gumbel_r,
st.gumbel_l,
st.halfcauchy,
st.halflogistic,
st.halfnorm,
st.halfgennorm,
st.hypsecant,
st.invgamma,
st.invgauss,
st.invweibull,
st.johnsonsb,
st.johnsonsu,
st.kappa4,
st.kappa3,
st.ksone,
st.kstwo,
st.kstwobign,
st.laplace,
st.laplace_asymmetric,
st.levy,
st.levy_l,
# st.levy_stable, # unstable in v1.6.0
st.logistic,
st.loggamma,
st.loglaplace,
st.lognorm,
st.loguniform,
st.lomax,
st.maxwell,
st.mielke,
st.moyal,
st.nakagami,
st.ncx2,
st.ncf,
st.nct,
st.norm,
st.norminvgauss,
st.pareto,
st.pearson3,
st.powerlaw,
st.powerlognorm,
st.powernorm,
st.rdist,
st.rayleigh,
st.rice,
st.recipinvgauss,
st.semicircular,
st.skewnorm,
st.t,
st.trapezoid,
st.triang,
st.truncexpon,
st.truncnorm,
st.tukeylambda,
st.uniform,
# st.vonmises, # does not work in v1.6.0
st.vonmises_line,
st.wald,
st.weibull_min,
st.weibull_max,
st.wrapcauchy,
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in tqdm(DISTRIBUTIONS):
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if ax is passed, add to plot
try:
if ax:
pd.Series(pdf, x).plot(
label=distribution.name, legend=True, ax=ax
)
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return best_distribution.name, best_params | 5,324,638 |
def display_last_contracts(
past_transaction_days: int = 2,
num: int = 20,
sum_contracts: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Last government contracts [Source: quiverquant.com]
Parameters
----------
past_transaction_days: int
Number of days to look back
num: int
Number of contracts to show
sum_contracts: bool
Flag to show total amount of contracts given out.
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_contracts = quiverquant_model.get_government_trading("contracts")
if df_contracts.empty:
console.print("No government contracts found\n")
return
df_contracts.sort_values("Date", ascending=False)
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"])
df_contracts.drop_duplicates(inplace=True)
df = df_contracts.copy()
df_contracts = df_contracts[
df_contracts["Date"].isin(df_contracts["Date"].unique()[:past_transaction_days])
]
df_contracts = df_contracts[["Date", "Ticker", "Amount", "Description", "Agency"]][
:num
]
df_contracts["Description"] = df_contracts["Description"].apply(
lambda x: "\n".join(textwrap.wrap(x, 50))
)
print_rich_table(
df_contracts,
headers=list(df_contracts.columns),
show_index=False,
title="Last Government Contracts",
)
if sum_contracts:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df["Date"] = pd.to_datetime(df["Date"]).dt.date
df.groupby("Date").sum().div(1000).plot(kind="bar", rot=0, ax=ax)
ax.set_ylabel("Amount ($1k)")
ax.set_title("Total amount of government contracts given")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "lastcontracts", df) | 5,324,639 |
def cumulative_completion_rate(completion_times, inc, top):
"""
Gets the cumulative completion rate data from an array of completion times.
Starting from zero, time is incremented by `inc` until `top` is reached
(inclusive) and the number of timestamps in `completion_times` under the
current time is added to `counts`. The timestamps can be obtained from
`completions()`.
Parameters
----------
completion_times : array
List of completion times (one per trial).
inc : float
Amount of time increment per check.
top : float
Largest time at which to check the completion rate.
Returns
-------
cutoffs : ndarray
Array of cutoff times at which the completion rate is checked.
counts : ndarray
The number of trials completed faster than the corresponding cutoff.
"""
ts = np.array(completion_times)
cutoffs = np.arange(0, top+inc, inc)
counts = np.zeros(cutoffs.shape)
for i, t in enumerate(cutoffs):
counts[i] = np.sum(ts < t)
return cutoffs, counts | 5,324,640 |
def _bfs_relational(adj, roots, max_nodes_per_hop=None):
"""
BFS for graphs with multiple edge types. Returns list of level sets.
Each entry in list corresponds to relation specified by adj_list.
Modified from dgl.contrib.data.knowledge_graph to node accomodate sampling
"""
visited = set()
current_lvl = set(roots)
next_lvl = set()
while current_lvl:
for v in current_lvl:
visited.add(v)
next_lvl = _get_neighbors(adj, current_lvl)
next_lvl -= visited # set difference
if max_nodes_per_hop and max_nodes_per_hop < len(next_lvl):
next_lvl = set(random.sample(next_lvl, max_nodes_per_hop))
yield next_lvl
current_lvl = set.union(next_lvl) | 5,324,641 |
def test_get_range_count_positive(loaded_store: DynamoStore, key_type_and_group) -> None:
"""
Tests that the range get does not include the blocks that lie on the boundary
"""
key = Key(key_type_and_group[0], 'user1', key_type_and_group[1])
blocks = loaded_store.get_range(key, datetime(2018, 3, 7, 19, 35, 31, 0, timezone.utc), None, 2)
assert len(blocks) == 2
assert blocks[0][1]['_start_time'] == datetime(2018, 3, 7, 20, 35, 35, 0,
timezone.utc).isoformat() | 5,324,642 |
def fitPeak(stack, slices, width, startingfit, **kwargs):
"""
Method to fit a peak through the stack.
The method will track the peak through the stack, assuming that moves
are relatively small from one slice to the next
Parameters
----------
slices : iterator
an iterator which dictates which slices to fit, should yeild
integers only
width : integer
width of fitting window
startingfit : dict
fit coefficients
Returns
-------
list : list of dicts
A list of dictionaries containing the best fits. Easy to turn into
a DataFrame
"""
# set up our variable to return
toreturn = []
# grab the starting fit parameters
popt_d = startingfit.copy()
y0 = int(round(popt_d["y0"]))
x0 = int(round(popt_d["x0"]))
if len(popt_d) == 6 * 2:
modeltype = "norot"
elif len(popt_d) == 5 * 2:
modeltype = "sym"
elif len(popt_d) == 7 * 2:
modeltype = "full"
else:
raise ValueError("Dictionary is too big {}".format(popt_d))
for s in slices:
# make the slice
try:
myslice = slice_maker((y0, x0), width)
except RuntimeError as e:
logger.warning("Fit window moved to edge of ROI")
break
else:
# pull the starting values from it
ystart = myslice[0].start
xstart = myslice[1].start
# insert the z-slice number
myslice = (s,) + myslice
# set up the fit and perform it using last best params
sub_stack = stack[myslice]
if sub_stack.size == 0:
# the fir window has moved to the edge, break
logger.warning("Fit window moved to edge of ROI")
break
fit = Gauss2D(sub_stack)
# move our guess coefs back into the window
popt_d["x0"] -= xstart
popt_d["y0"] -= ystart
# leave this in for now for easier debugging in future.
try:
fit.optimize_params(popt_d, **kwargs)
except TypeError as e:
print(repr(myslice))
raise e
# if there was an error performing the fit, try again without
# a guess
if fit.error:
fit.optimize_params(modeltype=modeltype, **kwargs)
# if there's not an error update center of fitting window and
# move on to the next fit
if not fit.error:
popt_d = fit.all_params_dict()
popt_d["x0"] += xstart
popt_d["y0"] += ystart
popt_d["slice"] = s
# calculate the apparent noise as the standard deviation
# of what's the residuals of the fit
popt_d["noise"] = (sub_stack - fit.fit_model).std()
toreturn.append(popt_d.copy())
y0 = int(round(popt_d["y0"]))
x0 = int(round(popt_d["x0"]))
else:
# if the fit fails, make sure to _not_ update positions.
bad_fit = fit.all_params_dict()
bad_fit["slice"] = s
# noise of a failed fit is not really useful
popt_d["noise"] = np.nan
toreturn.append(bad_fit.copy())
return toreturn | 5,324,643 |
def sbol_empty_space (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in empty space renderer.
"""
# Default options
zorder_add = 0.0
x_extent = 12.0
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
# Check direction add start padding
final_start = prev_end
final_end = final_start+x_extent
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end | 5,324,644 |
def find_max_1(array: list) -> int:
"""
O(n^2)
:param array: list of integers
:return: integer
"""
overallmax = array[0]
for i in array:
is_greatest = True
for j in array:
if j > i:
is_greatest = False
if is_greatest:
overallmax = i
return overallmax | 5,324,645 |
def make_element(builder, tag, content):
"""Make an element with this tag and text content"""
builder.start(tag, {})
builder.data(content) # Must be UTF-8 encoded
builder.end(tag) | 5,324,646 |
def test_get_profile_returns_none_with_wrong_profile(db_session, dummy_request, profile):
"""get_profile will attempt to find the profile to match the given username but return None if no profile exists."""
from pyramid_todo.views.main import get_profile
assert get_profile(dummy_request, 'nhuntwalker') is None | 5,324,647 |
def radec2xy(hdr,ra,dec):
"""Transforms sky coordinates (RA and Dec) to pixel coordinates (x and y).
Input:
- hdr: FITS image header
- ra <float> : Right ascension value in degrees
- dec <float>: Declination value in degrees
Output:
- (x,y) <tuple>: pixel coordinates
"""
wcs = wcs.WCS(hdr)
skycrd = np.array([[ra,dec]])
pixcrd = wcs.wcs_sky2pix(skycrd,1)
x = pixcrd[0][0]
y = pixcrd[0][1]
return (x,y) | 5,324,648 |
def getStepsBySerialNoAndProcessID(SerialNo, ProcessID):
"""
根据流水号和表单ID获取该表单的所有的步骤
:param SerialNo:流水号
:param ProcessID:表单ID
:return:返回{"name":步骤名称,"value":步骤ID,"state":步骤状态}
"""
raw = Raw_sql()
raw.sql = "SELECT a.StepID as value, b.StepName as name, Finished as state FROM RMI_TASK_PROCESS_STEP a WITH(NOLOCK) JOIN RMI_STEP b WITH(NOLOCK) "\
" ON a.StepID = b.StepID WHERE SerialNo = '%s' "\
" AND ProcessID = '%s'" % (SerialNo, ProcessID)
res, columns = raw.query_all(needColumnName=True)
return translateQueryResIntoDict(columns, res) | 5,324,649 |
def async_process_queue_worker(func, q_in, q_out, *args, **kwargs):
"""This is the worker function for a sub-process"""
# make sure that we create a new event loop for the new process
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# set up the queues to operate correctly on this process
executor = ThreadPoolExecutor()
q_in.set_process_variables(executor)
q_out.set_process_variables(executor)
loop.run_until_complete(func(q_in, q_out, *args, **kwargs)) | 5,324,650 |
def _split_train_dataset(y, tx, jet_num_idx=22):
"""Split the given training dataset into three distinct datasets.
Datasets are split depending on the value of the 'PRI_jet_num' column,
since this column dictates the -999 values for all the columns containing
the latter (except for the 'DER_mass_MMC' column at index 0).
"""
jet0_tx = tx[tx[:, jet_num_idx] == 0]
jet0_tx = np.delete(jet0_tx, [jet_num_idx, 4, 5, 6, 12, 23, 24, 25, 26, 27, 28, 29], axis=1)
jet0_y = y[tx[:, jet_num_idx] == 0]
jet1_tx = tx[tx[:, jet_num_idx] == 1]
jet1_tx = np.delete(jet1_tx, [jet_num_idx, 4, 5, 6, 12, 26, 27, 28], axis=1)
jet1_y = y[tx[:, jet_num_idx] == 1]
jetR_tx = tx[tx[:, jet_num_idx] >= 2]
jetR_y = y[tx[:, jet_num_idx] >= 2]
return jet0_y, jet1_y, jetR_y, jet0_tx, jet1_tx, jetR_tx | 5,324,651 |
def bear(
transitions=None,
# Common settings
discount_factor=0.99,
# Adam optimizer settings
lr_q=1e-3,
lr_pi=1e-3,
lr_enc=1e-3,
lr_dec=1e-3,
# Training settings
minibatch_size=100,
polyak_rate=0.005,
# BEAR settings
num_qs=2,
kernel_type="laplacian",
):
"""
Bootstrapping error accumulation reduction (BEAR) control preset
Args:
transitions:
dictionary of transitions generated by cpprb.ReplayBuffer.get_all_transitions()
discount_factor (float): Discount factor for future rewards.
lr_q (float): Learning rate for the Q network.
lr_pi (float): Learning rate for the policy network.
lr_enc (float): Learning rate for the encoder.
lr_dec (float): Learning rate for the decoder.
minibatch_size (int): Number of experiences to sample in each training update.
polyak_rate (float): Speed with which to update the target network towards the online network.
num_qs (int): Number of q functions for ensemble.
"""
def _bear(env):
disable_on_policy_mode()
device = get_device()
q_models = nn.ModuleList([fc_q(env) for _ in range(num_qs)]).to(device)
qs_optimizer = Adam(q_models.parameters(), lr=lr_q)
qs = EnsembleQContinuous(
q_models,
qs_optimizer,
target=PolyakTarget(polyak_rate),
name='qs'
)
policy_model = fc_soft_policy(env).to(device)
policy_optimizer = Adam(policy_model.parameters(), lr=lr_pi)
policy = SoftDeterministicPolicy(
policy_model,
policy_optimizer,
env.action_space,
target=PolyakTarget(polyak_rate),
)
latent_dim = env.action_space.shape[0] * 2
encoder_model = fc_bcq_encoder(env, latent_dim=latent_dim).to(device)
encoder_optimizer = Adam(encoder_model.parameters(), lr=lr_enc)
encoder = BcqEncoder(
model=encoder_model,
latent_dim=latent_dim,
optimizer=encoder_optimizer,
name="encoder",
)
decoder_model = fc_bcq_decoder(env, latent_dim=latent_dim).to(device)
decoder_optimizer = Adam(decoder_model.parameters(), lr=lr_dec)
decoder = BcqDecoder(
model=decoder_model,
latent_dim=latent_dim,
space=env.action_space,
optimizer=decoder_optimizer,
name="decoder",
)
replay_buffer = ExperienceReplayBuffer(1e7, env)
if transitions is not None:
samples = replay_buffer.samples_from_cpprb(
transitions, device="cpu")
replay_buffer.store(samples)
set_replay_buffer(replay_buffer)
return BEAR(
qs=qs,
encoder=encoder,
decoder=decoder,
policy=policy,
kernel_type=kernel_type,
discount_factor=discount_factor,
minibatch_size=minibatch_size,
)
return _bear | 5,324,652 |
def benchmark(setup=None, number=10, repeat=3, warmup=5):
"""A parametrized decorator to benchmark the test.
Setting up the bench can happen in the normal setUp,
which is applied to all benches identically, and additionally
the setup parameter, which is bench-specific.
Parameters
----------
setup : function
A function to call once to set up the test.
number : int
The number of loops of repeat repeats to run.
repeat : int
The number of repeats in each loop.
warmup : int
The number of warmup runs of the function.
"""
def real_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if setup is not None:
setup(self)
for i in range(warmup):
func(self, *args, **kwargs)
clock_time_starts = np.zeros((number, repeat))
clock_time_ends = np.zeros((number, repeat))
for i in range(number):
for j in range(repeat):
clock_time_starts[i, j] = perf_counter()
func(self, *args, **kwargs)
clock_time_ends[i, j] = perf_counter()
clock_times = (clock_time_ends - clock_time_starts).min(axis=1)
print(
"[{}] with {} loops, best of {}:".format(
_get_bench_name(func), number, repeat
)
)
print(
"\tmin {:4s} per loop".format(
_timeitlike_time_format(clock_times.min())
)
)
print(
"\tavg {:4s} per loop".format(
_timeitlike_time_format(clock_times.mean())
)
)
return wrapper
return real_decorator | 5,324,653 |
def make_dummy_protein_sequence(
n_supporting_variant_reads,
n_supporting_variant_sequences,
n_supporting_reference_transcripts,
n_total_variant_sequences=None,
n_total_variant_reads=None,
n_total_reference_transcripts=None,
gene=["TP53"],
amino_acids="MKHW", # ATG=M|AAA=K|CAC=H|TGG=W
cdna_sequence="CCCATGAAACACTGGTAG",
variant_cdna_interval_start=8, # assuming variant was AAC>AAA
variant_cdna_interval_end=9,
variant_aa_interval_start=1,
variant_aa_interval_end=2,
number_mismatches=1):
"""
Creates ProteinSequence object with None filled in for most fields
"""
if n_total_variant_reads is None:
n_total_variant_reads = n_supporting_variant_reads
if n_total_variant_sequences is None:
n_total_variant_sequences = n_supporting_variant_sequences
if n_total_reference_transcripts is None:
n_total_reference_transcripts = n_total_reference_transcripts
assert n_supporting_variant_sequences <= n_supporting_variant_reads
assert n_supporting_variant_sequences <= n_total_variant_sequences
assert n_supporting_reference_transcripts <= n_total_reference_transcripts
n_translations = n_total_reference_transcripts * n_total_variant_sequences
translation = make_dummy_translation()
return ProteinSequence(
translations=[translation] * n_translations,
overlapping_reads=[None] * n_total_variant_reads,
ref_reads=[],
alt_reads=[None] * n_total_variant_reads,
alt_reads_supporting_protein_sequence=[None] * n_supporting_variant_reads,
transcripts_supporting_protein_sequence=[None] * n_supporting_reference_transcripts,
transcripts_overlapping_variant=[None] * n_supporting_reference_transcripts,
gene=gene,
amino_acids=amino_acids,
variant_aa_interval_start=variant_aa_interval_start,
variant_aa_interval_end=variant_aa_interval_end,
ends_with_stop_codon=translation.ends_with_stop_codon,
frameshift=translation.frameshift) | 5,324,654 |
def parse_file(file):
"""Parses a YAML file containing the bingo board configuration"""
with open(file=file, mode="r", encoding='UTF-8') as stream:
try:
config = yaml.safe_load(stream)
except YAMLError as err:
sys.exit("Failed to parse YAML: " + err)
return config | 5,324,655 |
def process_dataset(path):
"""Maps entities and relations to ids and saves corresponding pickle arrays.
Args:
path: Path to dataset directory.
Returns:
examples: Dictionary mapping splits to with Numpy array contatining
corresponding KG triples.
filters: Dictionary containing filters for lhs and rhs predictions.
"""
lhs_skip = collections.defaultdict(set)
rhs_skip = collections.defaultdict(set)
ent2idx, rel2idx = get_idx(dataset_path)
examples = {}
for split in ['train', 'valid', 'test']:
dataset_file = os.path.join(path, split)
examples[split] = to_np_array(dataset_file, ent2idx, rel2idx)
lhs_filters, rhs_filters = get_filters(examples[split], len(rel2idx))
lhs_skip.update(lhs_filters)
rhs_skip.update(rhs_filters)
filters = {'lhs': lhs_skip, 'rhs': rhs_skip}
return examples, filters | 5,324,656 |
def prepare_data():
"""Data processing and data partitioning"""
prapare_ner = PrepareNer()
# entity samples_statistics
samples_statistics = defaultdict(int)
dataset = []
for file_ann in Path(Config.annotation_data_dir).rglob("*.ann"):
file_txt = str(file_ann.with_suffix('.txt'))
sents, tags = prapare_ner.get_annoteted_data(file_txt, file_ann, samples_statistics)
dataset.append((sents, tags))
all_case_num = len(dataset)
train_count = int(all_case_num * 0.8)
valid_count = int(all_case_num * 0.1)
test_count = all_case_num - train_count - valid_count
order = list(range(all_case_num))
random.shuffle(order)
train_dataset = [dataset[idx] for idx in order[:train_count]]
valid_dataset = [dataset[idx] for idx in order[train_count:train_count + valid_count]]
test_dataset = [dataset[idx] for idx in order[train_count + valid_count:]]
train_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'train'), train_dataset)
valid_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'valid'), valid_dataset)
test_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'test'), test_dataset)
print('\nall cases num: {}'.format(all_case_num))
print("train cases: {}, samples: {}".format(train_count, train_samples_count))
print("valid cases: {}, samples: {}".format(valid_count, valid_samples_count))
print("test cases: {}, samples: {}".format(test_count, test_samples_count))
return dict(samples_statistics) | 5,324,657 |
def InlineEditor(item: Item, view, pos: Optional[Tuple[int, int]] = None) -> bool:
"""Show a small editor popup in the diagram. Makes for easy editing without
resorting to the Element editor.
In case of a mouse press event, the mouse position (relative to the
element) are also provided.
"""
return False | 5,324,658 |
def getMetrics(trueLabels, predictedLabels):
"""Takes as input true labels, predictions, and prediction confidence scores and computes all metrics"""
MSE = sklearn.metrics.mean_squared_error(trueLabels, predictedLabels, squared = True)
MAE = sklearn.metrics.mean_absolute_error(trueLabels, predictedLabels)
MAPE = sklearn.metrics.mean_absolute_percentage_error(trueLabels, predictedLabels)
RMSE = sklearn.metrics.mean_squared_error(trueLabels, predictedLabels, squared = False)
PearsonR = correlation(true = trueLabels,
pred = predictedLabels)
return MSE, MAE, MAPE, RMSE, PearsonR | 5,324,659 |
def test_cartesian_to_zero_one():
"""
Test to ensure values are mapped to [0, 1]
"""
cart = np.random.randn(2, 1000)
x, _, _ = legacy.cartesian_to_zero_one(*cart)
assert np.logical_and(x >= 0, x <= 1).all() | 5,324,660 |
def create_data(feed_slug):
"""Post Data
Post a data point to a feed
---
tags:
- "Data Points"
parameters:
- name: feed_slug
in: path
type: string
required: true
- name: value
in: body
schema:
type: object
required:
- value
properties:
value:
type: stringboolnumber
description: value of data to post. must be the same data type as feed kind
responses:
200:
description: Success
schema:
type: object
properties:
message:
type: string
data:
$ref: '#/definitions/Data'
400:
$ref: '#/responses/Error'
"""
feed = Feed.query.filter_by(slug=feed_slug, owner=current_user).first()
if not feed:
return jsonify(error="Feed doesn't exist!"), 400
value = request.json.get("value", None)
if value is None:
return jsonify(error="Value is required."), 400
if (
(
feed.kind == "number"
and not (isinstance(value, int) or isinstance(value, float))
)
or (feed.kind == "boolean" and not isinstance(value, bool))
or (feed.kind == "image" and not validators.url(value))
):
return (
jsonify(
error=f"Invalid value. Type '{feed.kind}' was expected but got '{value}'."
),
400,
)
data = Data(value=str(value), created=datetime.datetime.utcnow(), feed=feed)
db.session.add(data)
db.session.commit()
return jsonify(message="Data posted!", data=data.to_dict()), 200 | 5,324,661 |
def save_vqc(model, path):
"""
Saves the qsvm model to a certain path.
@model :: qsvm model object.
@path :: String of full path to save the model in.
"""
joblib.dump(model, path)
print("Trained model saved in: " + path) | 5,324,662 |
async def get_hitokoto(*, c: Optional[str] = None) -> Result.TextResult:
"""获取一言"""
url = 'https://v1.hitokoto.cn'
params = {
'encode': 'json',
'charset': 'utf-8'
}
if c is not None:
params.update({'c': c})
headers = HttpFetcher.DEFAULT_HEADERS.update({'accept': 'application/json'})
hitokoto_result = await HttpFetcher(flag='sign_hitokoto', headers=headers).get_json(url=url, params=params)
if hitokoto_result.error:
return Result.TextResult(error=True, info=hitokoto_result.info, result='')
text = f'{hitokoto_result.result.get("hitokoto")}\n——《{hitokoto_result.result.get("from")}》'
if hitokoto_result.result.get("from_who"):
text += f' {hitokoto_result.result.get("from_who")}'
return Result.TextResult(error=False, info='Success', result=text) | 5,324,663 |
def img_unnormalize(src):
"""
Unnormalize a RGB image.
:param src: Image to unnormalize. Must be RGB order.
:return: Unnormalized Image.
"""
img = src.copy()
img *= NORMALIZE_VARIANCE
img += NORMALIZE_MEAN
return img.astype(np.uint8) | 5,324,664 |
def upilab5_9_6() :
"""5.9.6. Exercice UpyLaB 5.26 - Parcours bleu rouge
Une matrice M = \{m_{ij}\} de taille {n}\times{n} est dite antisymétrique lorsque, pour toute paire d’indices i, j, on
a m_{ij} = - m_{ji}.
Écrire une fonction booléenne antisymetrique(M) qui teste si la matrice M reçue est antisymétrique.
Exemple 1 : L’appel suivant de la fonction : antisymetrique([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]) doit retourner : True
Exemple 2 : L’appel suivant de la fonction : antisymetrique([[0, 1], [1, 0]]) doit retourner : False
Exemple 3 : L’appel suivant de la fonction : antisymetrique([[1, -2], [2, 1]]) doit retourner : False
Exemple 4 : L’appel suivant de la fonction : antisymetrique([]) doit retourner : True
"""
def antisymetrique(M):
""" teste si la matrice M est antisymétrique """
rep = True
if M == []:
pass
else:
for li, ligne in enumerate(M):
for co, val in enumerate(ligne):
if val != -M[co][li]: rep = False
return rep
test = [([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]], True),
([[0, 1], [1, 0]], False),
([[1, -2], [2, 1]], False),
([], True),
([[0, -1, -9, -1, -3], [1, 0, -9, -1, -4], [9, 9, 0, -5, -7], [1, 1, 5, 0, -9], [3, 4, 7, 9, 0]], True)
]
rep = [ "non ", ""]
for M,r in test :
print("La matrice : ")
printMatrice(M)
print(" devrait être : " + rep[int(r)] + " antisymétrique et la fonction la trouve" +
+ rep[int(antisymetrique(M))] + "antisymétrique") | 5,324,665 |
def update(statement):
""" Runs an execute command. It should be an UPDATE statement.
Unlike `_insert` and `_query`, this function is public because
there is no clean and efficient way to create a general purpose
update function. This requires raw SQL to be written outside of
`database.py`, for this case only.
"""
def update_action(conn, c):
statement(c)
conn.commit()
_db_connect(update_action) | 5,324,666 |
def test_backspace_cmd(vim_bot, text, cmd_list, cursor_pos):
"""Test backspace command."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
if isinstance(cmd, str):
qtbot.keyClicks(cmd_line, cmd)
else:
qtbot.keyPress(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos | 5,324,667 |
def info(request):
"""provide readable information for *request*."""
qs = request.get('QUERY_STRING')
aia = IAdditionalInfo(request, None)
ai = aia and str(aia)
return (request.get('PATH_INFO', '')
+ (qs and '?' + qs or '')
+ (ai and (' [%s] ' % ai) or '')
) | 5,324,668 |
def put(entity_pb, **options):
"""Store an entity in datastore.
The entity can be a new entity to be saved for the first time or an
existing entity that has been updated.
Args:
entity_pb (datastore_v1.types.Entity): The entity to be stored.
options (Dict[str, Any]): Options for this request.
Returns:
tasklets.Future: Result will be completed datastore key
(entity_pb2.Key) for the entity.
"""
_check_unsupported_options(options)
batch = _get_batch(_CommitBatch, options)
return batch.put(entity_pb) | 5,324,669 |
def get_country_flag(country):
"""Returns the corresponding flag of a provided country."""
with nation_flag_info as flag_path_info:
# Validate the provided nation string.
if country.title().replace('_', ' ') not in flag_path_info.keys() and \
country.title().replace(' ', '_') not in flag_path_info.keys() and \
country not in flag_path_info.keys():
raise ValueError(f"Received invalid nation {country}, try another one.")
# Read the flag image path and return the flag image.
return cv2.cvtColor(cv2.imread(flag_path_info[country.replace(' ', '_')]), cv2.COLOR_BGR2RGB) | 5,324,670 |
def add_time_columns(df):
"""
@param df: the dataframe you want to add the columns
@return: the same dataframe as input, but with new columns
"""
df['Month'] = df['YYYYMMDD'].str[4:6].astype(np.float64)
df['Day'] = df['YYYYMMDD'].str[6:8].astype(np.float64)
df['Hour'] = df['hhmmss'].str[0:2].astype(np.float64) | 5,324,671 |
def create_graph_to_decode_and_normalize_image():
"""See file docstring.
Returns:
input: The placeholder to feed the raw bytes of an encoded image.
y: A Tensor (the decoded, normalized image) to be fed to the graph.
"""
image = tf.placeholder(tf.string, shape=(), name='encoded_image_bytes')
with tf.name_scope("preprocess"):
y = tf.image.decode_image(image, channels=3)
y = tf.cast(y, tf.float32)
y = tf.expand_dims(y, axis=0)
y = tf.image.resize_bilinear(y, (IMAGE_HEIGHT, IMAGE_WIDTH))
y = (y - MEAN) / SCALE
return (image, y) | 5,324,672 |
def stackbar(x, y, colors=None, **kwargs):
"""
Given an array of vectors in y, draw a bar chart for each one stacked on
the prior.
"""
x = np.asarray(x)
if colors is None:
colors = ["" for i in range(0, y.shape[0])]
# Stack positive and negative separately
for op in ("__ge__", "__lt__"):
d = getattr(y, op)(0)
s = y[0, :] * 0
l = np.where(d[0, :])[0]
if np.any(l):
plt.bar(x[l], y[0, l], color=colors[0], **kwargs)
s[l] = y[0, l]
for i in range(1, y.shape[0]):
l = np.where(d[i, :])[0]
if np.any(l):
plt.bar(x[l], y[i, l], color=colors[i], bottom=s[l], **kwargs)
s[l] += y[i, l] | 5,324,673 |
def bilinear_upsample(x, scale=2):
"""Bilinear upsample.
Caffe bilinear upsample forked from
https://github.com/ppwwyyxx/tensorpack
Deterministic bilinearly-upsample the input images.
Args:
x (tf.Tensor): a NHWC tensor
scale (int): the upsample factor
Returns:
tf.Tensor: a NHWC tensor.
"""
def bilinear_conv_filler(s):
f = np.ceil(float(s) / 2)
c = float(2 * f - 1 - f % 2) / (2 * f)
ret = np.zeros((s, s), dtype='float32')
for x in range(s):
for y in range(s):
ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
return ret
inp_shape = x.get_shape().as_list()
ch = inp_shape[3]
assert ch is not None
filter_shape = 2 * scale
w = bilinear_conv_filler(filter_shape)
w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch))
weight_var = tf.constant(w, tf.float32,
shape=(filter_shape, filter_shape, ch, ch),
name='bilinear_upsample_filter')
pad = min(scale - 1, inp_shape[1])
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='SYMMETRIC')
if inp_shape[1] < scale:
# may cause problem?
pad = scale - 1 - inp_shape[1]
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]],
mode='CONSTANT')
out_shape = tf.shape(x) * tf.constant([1, scale, scale, 1], tf.int32)
deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape,
[1, scale, scale, 1], 'SAME')
edge = scale * (scale - 1)
deconv = deconv[:, edge:-edge, edge:-edge, :]
if inp_shape[1]:
inp_shape[1] *= scale
if inp_shape[2]:
inp_shape[2] *= scale
deconv.set_shape(inp_shape)
return deconv | 5,324,674 |
def verbose_print(msg: str):
"""
Verbose Printer.
Print called message if verbose mode is on.
"""
if CONF.debug.verbose and CONF.debug.verbose_level:
verbose_level = CONF.debug.verbose_level
else:
return
if verbose_level > 1:
print('> ' + msg, file=sys.stderr)
if verbose_level > 2:
print('>> ' + msg, file=sys.stderr) | 5,324,675 |
def _generate_simplifiers_and_detailers():
"""Generate simplifiers, forced full simplifiers and detailers."""
simplifiers = OrderedDict()
forced_full_simplifiers = OrderedDict()
detailers = []
def _add_simplifier_and_detailer(curr_type, simplifier, detailer, forced=False):
if detailer in detailers:
curr_index = detailers.index(detailer)
else:
curr_index = len(detailers)
detailers.append(detailer)
if forced:
forced_full_simplifiers[curr_type] = (curr_index, simplifier)
else:
simplifiers[curr_type] = (curr_index, simplifier)
# Register native and torch types
for curr_type in MAP_TO_SIMPLIFIERS_AND_DETAILERS:
simplifier, detailer = MAP_TO_SIMPLIFIERS_AND_DETAILERS[curr_type]
_add_simplifier_and_detailer(curr_type, simplifier, detailer)
# Register syft objects with custom simplify and detail methods
for syft_type in OBJ_SIMPLIFIER_AND_DETAILERS + EXCEPTION_SIMPLIFIER_AND_DETAILERS:
simplifier, detailer = syft_type.simplify, syft_type.detail
_add_simplifier_and_detailer(syft_type, simplifier, detailer)
# Register syft objects with custom force_simplify and force_detail methods
for syft_type in OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS:
force_simplifier, force_detailer = syft_type.force_simplify, syft_type.force_detail
_add_simplifier_and_detailer(syft_type, force_simplifier, force_detailer, forced=True)
return simplifiers, forced_full_simplifiers, detailers | 5,324,676 |
def read_machine_def():
"""
Reads the machine definition file.
"""
return read_yaml_file(machine_def_file) | 5,324,677 |
def zoo_start(args = '-net_type=mpi -sync=true'):
"""Start the Zoo, all the Actors will be registered.
"""
check_call(LIB.HPPS_ZooStart(c_str(args))) | 5,324,678 |
def read_libsvm_format(file_path: str) -> 'tuple[list[list[int]], sparse.csr_matrix]':
"""Read multi-label LIBSVM-format data.
Args:
file_path (str): Path to file.
Returns:
tuple[list[list[int]], sparse.csr_matrix]: A tuple of labels and features.
"""
def as_ints(str):
return [int(s) for s in str.split(',')]
prob_y = []
prob_x = array('d')
row_ptr = array('l', [0])
col_idx = array('l')
for i, line in enumerate(open(file_path)):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1:
line += ['']
label, features = line
prob_y.append(as_ints(label))
nz = 0
for e in features.split():
ind, val = e.split(':')
val = float(val)
if val != 0:
col_idx.append(int(ind) - 1)
prob_x.append(val)
nz += 1
row_ptr.append(row_ptr[-1]+nz)
prob_x = scipy.frombuffer(prob_x, dtype='d')
col_idx = scipy.frombuffer(col_idx, dtype='l')
row_ptr = scipy.frombuffer(row_ptr, dtype='l')
prob_x = sparse.csr_matrix((prob_x, col_idx, row_ptr))
return (prob_y, prob_x) | 5,324,679 |
def cristal_load_motor(datafile, root, actuator_name, field_name):
"""
Try to load the CRISTAL dataset at the defined entry and returns it.
Patterns keep changing at CRISTAL.
:param datafile: h5py File object of CRISTAL .nxs scan file
:param root: string, path of the data up to the last subfolder (not included).
This part is expected to not change over time
:param actuator_name: string, name of the actuator (e.g. 'I06-C-C07-EX-DIF-KPHI').
Lowercase and uppercase will
be tested when trying to load the data.
:param field_name: name of the field under the actuator name (e.g. 'position')
:return: the dataset if found or 0
"""
# check input arguments
valid.valid_container(
root, container_types=str, min_length=1, name="cristal_load_motor"
)
if not root.startswith("/"):
root = "/" + root
valid.valid_container(
actuator_name, container_types=str, min_length=1, name="cristal_load_motor"
)
# check if there is an entry for the actuator
if actuator_name not in datafile[root].keys():
actuator_name = actuator_name.lower()
if actuator_name not in datafile[root].keys():
actuator_name = actuator_name.upper()
if actuator_name not in datafile[root].keys():
print(f"\nCould not find the entry for the actuator'{actuator_name}'")
print(f"list of available actuators: {list(datafile[root].keys())}\n")
return 0
# check if the field is a valid entry for the actuator
try:
dataset = datafile[root + "/" + actuator_name + "/" + field_name][:]
except KeyError: # try lowercase
try:
dataset = datafile[root + "/" + actuator_name + "/" + field_name.lower()][:]
except KeyError: # try uppercase
try:
dataset = datafile[
root + "/" + actuator_name + "/" + field_name.upper()
][:]
except KeyError: # nothing else that we can do
print(
f"\nCould not find the field '{field_name}' "
f"in the actuator'{actuator_name}'"
)
print(
"list of available fields: "
f"{list(datafile[root + '/' + actuator_name].keys())}\n"
)
return 0
return dataset | 5,324,680 |
def trackPlot(mat, fig=None, groups=None, ratios=None, labels=None, cmap=None, norm=None, is2D=False, xticks=False):
"""
This function takes a matrix and generates a track figure with several panel according to a group structure that groups
several rows/cols of the matrix into one panel. This can be done for rows only or for columns and rows. So if the input
is a 10x10 matrix and we have a grouping of 2,4,3,1, then the final figure will have 4 panels, splitting the matrix into
the respective groups. When option is2D is true, the same grouping is also applied to the columns. There is obviously room
for extension ...
Input:
mat - data matrix containing the values
fig - figure object to place the panels into
groups - grouping vector (is all ones per default, a single panel per row)
ratios - the relative proportion each panel takes in the full plot (default to group values)
labels - row labels for the matrix (needs to have as many entries as there are rows in the matrix)
cmap - color map to apply to the single groups
is2D - apply grouping to both columns and rows (rows only is default)
xticks - set xticks
Output:
Returns a 2 tuple containing the figure object and an array with the axes objects corresponding to the single groups.
fig - figure
ax - axes
"""
if fig is None:
fig = plt.figure(figsize=(10, 10), dpi=200)
if groups is None:
groups = np.ones((mat.shape[0],), dtype='int')
if ratios is None:
ratios = groups
if labels is not None:
assert(labels.shape[0] == mat.shape[0])
if cmap is None:
cmap = np.array([plt.get_cmap('Blues')] * groups.shape[0], dtype='object')
else:
assert(cmap.shape[0] == groups.shape[0])
if norm is None:
norm = np.array([plt.Normalize(-1.0, 1.0)] * groups.shape[0], dtype='object')
else:
assert(norm.shape[0] == groups.shape[0])
if is2D:
gs = gridspec.GridSpec(groups.shape[0], groups.shape[0], height_ratios=ratios, hspace=0.05, width_ratios=ratios, wspace=0.05)
last_col = 0
axes = np.zeros((groups.shape[0], groups.shape[0]), dtype='object')
for col in range(groups.shape[0]):
last_row = 0
for row in range(groups.shape[0]):
axes[row, col] = fig.add_subplot(gs[row, col])
axes[row, col].imshow(mat[last_row:last_row+groups[row], :][:, last_col:last_col+groups[col]], aspect='auto', origin='upper', interpolation='nearest', cmap=cmap[row], norm=norm[row])
if xticks and row == 0:
axes[row, col].set_xticks(np.arange(groups[col]))
axes[row, col].xaxis.tick_top()
if labels is not None:
axes[row, col].set_xticklabels(labels[last_col:last_col+groups[col]], rotation=90)
else:
axes[row, col].set_xticks([])
if col == 0:
axes[row, col].set_yticks(np.arange(groups[row]))
if labels is not None:
axes[row, col].set_yticklabels(labels[last_row:last_row+groups[row]])
else:
axes[row, col].set_yticks([])
last_row += groups[row]
last_col += groups[col]
else:
axes = np.zeros((groups.shape[0], ), dtype='object')
gs = gridspec.GridSpec(groups.shape[0], 1, height_ratios=ratios, hspace=0.05)
last_row = 0
for row in range(groups.shape[0]):
axes[row] = fig.add_subplot(gs[row, 0])
# if density is not None and row in density:
# ax.fill_between(np.arange(mat.shape[1]),
# else:
axes[row].imshow(mat[last_row:last_row+groups[row], :], aspect='auto', origin='lower', interpolation='nearest', cmap=cmap[row], norm=norm[row])
axes[row].set_xticks([])
axes[row].set_yticks(np.arange(groups[row]))
if labels is not None:
axes[row].set_yticklabels(labels[last_row:last_row+groups[row]])
last_row += groups[row]
return (fig, axes) | 5,324,681 |
def concatenate(
*,
target_list: List[str],
is_colored: bool = False,
number_x: Optional[int] = None,
):
"""api to concatenate movie/picture (note: keyword-only argument)
Args:
target_list (List[str]): list of movies, pictures or directories where pictures are stored.
is_colored (bool, optional): flag to output in color. Defaults to False.
number_x (int, optional): number of targets concatenated in x direction. max is 5. if this variable is None, this will be selected using GUI window
Returns:
return (List[str], optional): list of processed pictures, directories where pictures are stored, and movies. if no process is executed, None is returned
"""
if not target_list:
sys.exit("no target is given!")
m_list, p_list, d_list = process.sort_target_type(target_list)
return_list: List[str] = []
if not m_list and not p_list and not d_list:
sys.exit("no movie, picture, directory is given!")
if m_list:
r = process.ConcatenatingMovie(target_list=m_list,
is_colored=is_colored,
number_x=number_x).execute()
if r is not None:
return_list.extend(r)
if p_list:
r = process.ConcatenatingPicture(target_list=p_list,
is_colored=is_colored,
number_x=number_x).execute()
if r is not None:
return_list.extend(r)
if d_list:
r = process.ConcatenatingPictureDirectory(target_list=d_list,
is_colored=is_colored,
number_x=number_x).execute()
if r is not None:
return_list.extend(r)
return return_list if return_list else None | 5,324,682 |
def split_at(n, coll):
"""
Returns a tuple of ``(take(n, coll), drop(n coll))``.
"""
if n <= 0:
return [], coll
if coll is None:
return [], []
# Unfortunately we must consume all elements for the first case because
# unlike Clojure's lazy lists, Python's generators yield their elements
# only once.
taken = []
for i, e in enumerate(coll):
taken.append(e)
if i+1 >= n:
break
return taken, _iter(coll, n) | 5,324,683 |
def clip(x: ArrayLike, lo: ArrayLike = None, up: ArrayLike = None) -> ShapeletsArray:
"""
Element-wise, limits the values in an array
Parameters
----------
x: ArrayLike
Input array expression
lo: Optional ArrayLike (defaults: None)
Low values
up: Optional ArrayLike (defaults: None)
High values
Returns
-------
ShapeletsArray
A new array with the result of the element-wise operation.
Notes
-----
The first parameter must resolve to a dimensional array. Broadcasting
rules will be applied to the rest of the parameters.
Examples
--------
>>> import shapelets.compute as sc
>>> a = sc.array([0,1,2,3]).T
>>> sc.clip(a, 1, 2)
[1 4 1 1]
0 1 2 3
>>> up_vals = sc.array([0,0,1,1]).T
>>> sc.clip(a, up = up_vals)
[1 4 1 1]
0 0 1 1
"""
return _pygauss.clip(x, lo, up) | 5,324,684 |
def determineNewest(uid, homeType):
"""
Construct a query to determine the modification time of the newest object
in a given home.
@param uid: the UID of the home to scan.
@type uid: C{str}
@param homeType: The type of home to scan; C{ECALENDARTYPE},
C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
@type homeType: C{int}
@return: A select query that will return a single row containing a single
column which is the maximum value.
@rtype: L{Select}
"""
if homeType == ENOTIFICATIONTYPE:
return Select(
[Max(schema.NOTIFICATION.MODIFIED)],
From=schema.NOTIFICATION_HOME.join(
schema.NOTIFICATION,
on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
)
homeTypeName = {ECALENDARTYPE: "CALENDAR",
EADDRESSBOOKTYPE: "ADDRESSBOOK"}[homeType]
home = getattr(schema, homeTypeName + "_HOME")
bind = getattr(schema, homeTypeName + "_BIND")
child = getattr(schema, homeTypeName)
obj = getattr(schema, homeTypeName + "_OBJECT")
return Select(
[Max(obj.MODIFIED)],
From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
) | 5,324,685 |
def get_data(limit = None, filename = "C:/Users/Marcel/OneDrive/Python Courses/Machine Learning/train.csv"):
"""
Reads the MNIST dataset and outputs X and Y.
One can set a limit to the number of rows (number of samples) by editing the 'limit'
"""
print("Reading in and transforming data...")
dataset = pd.read_csv(filename).values
np.random.shuffle(dataset)
X = dataset[:, 1:] / 255
Y = dataset[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
print("Done reading in data...", len(Y))
return X, Y | 5,324,686 |
def vech(A): # TODO: why not just use A[np.triu_indices(A.shape[0])]?
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length = A.shape[1]
vechvec = []
for i in range(length):
b = i
while b < length:
vechvec.append(A[b, i])
b = b + 1
vechvec = np.asarray(vechvec)
return vechvec | 5,324,687 |
def file_size(value, fmt="{value:.1f} {suffix}", si=False):
"""
Takes a raw number of bytes and returns a humanized filesize.
"""
if si:
base = 1000
suffixes = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
else:
base = 1024
suffixes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
max_suffix_index = len(suffixes) - 1
for i, suffix in enumerate(suffixes):
unit = base ** (i + 1)
if value < unit or i == max_suffix_index:
return fmt.format(value=(base * value / unit), suffix=suffix) | 5,324,688 |
def login():
"""
This method logs the user into the account.
It checks the username and the password in the database.
---
Args: None
Returns: If log in is successful redirects the user to account page
"""
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if user.confirmation == True:
if check_password_hash(user.password, password): # check if hashes are the same
login_user(user, remember=False)
return redirect(url_for('views.account'))
else:
flash('Incorrect password', category = 'error')
else:
flash('Please confirm your email address first.', category = 'error')
return redirect(url_for('auth.login'))
else:
flash('Account does not exist!', category = 'error')
return render_template("login.html", user=current_user) | 5,324,689 |
def index_select_op_tensor(input, dim, index):
"""
input.index_select(dim, index) -> Tensor
See :func:`oneflow.index_select`
"""
return index_select_op(input, dim, index) | 5,324,690 |
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out_keys = []
seen = set()
dependencies = dict()
work = list(set(flatten(keys)))
while work:
new_work = []
out_keys += work
deps = [(k, get_dependencies(dsk, k, as_list=True)) # fuse needs lists
for k in work]
dependencies.update(deps)
for _, deplist in deps:
for d in deplist:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
out = {k: dsk[k] for k in out_keys}
return out, dependencies | 5,324,691 |
def validate_coot(config, model,
val_loader,
epoch,
constrastive_loss,
cmc_loss,
writer,
logger,
use_cuda=True):
"""Validate COOT model
Args:
model: COOT model
dataloader
epoch: current epoch number
constrastive_loss: MaxMargingRanking loss
cmc_loss: Cross-modal cycle-consistensy loss
writer: tensorboard writer
logger
use_cuda (bool): use GPU
Returns:
Retrieval performance
"""
model.eval()
max_step = len(val_loader)
# collect embeddings
vid_emb_list = []
par_emb_list = []
clip_emb_list = []
sent_emb_list = []
for step, data_dict in enumerate(val_loader):
(vid_id, vid_frames, vid_frames_mask, vid_frames_len, par_cap_vectors,
par_cap_mask, par_cap_len, clip_num, clip_frames, clip_frames_len,
clip_frames_mask, sent_num, sent_cap_vectors, sent_cap_mask,
sent_cap_len) = unpack_data(data_dict, use_cuda)
if step == 0:
print(f"ids {vid_id[:4]}...")
# forward pass
(vid_emb, clip_emb, vid_context, clip_emb_reshape, clip_emb_mask,
clip_emb_lens) = model.encode_video(vid_frames, vid_frames_mask,
vid_frames_len, clip_num,
clip_frames, clip_frames_len,
clip_frames_mask)
(par_emb, sent_emb, par_context, sent_emb_reshape, sent_emb_mask,
sent_emb_lens) = model.encode_paragraph(par_cap_vectors, par_cap_mask,
par_cap_len, sent_num,
sent_cap_vectors,
sent_cap_mask, sent_cap_len)
loss = compute_constrastive_loss(config, constrastive_loss, vid_emb, par_emb,
clip_emb, sent_emb, vid_context,
par_context)
loss += compute_cmc_loss(cmc_loss, config.CONFIG.TRAIN.LOSS_CYCLE_CONS_W, clip_emb_reshape,
clip_emb_mask, clip_emb_lens,
sent_emb_reshape, sent_emb_mask,
sent_emb_lens)
# collect embeddings
vid_emb_list.extend(vid_emb.detach().cpu())
par_emb_list.extend(par_emb.detach().cpu())
#clip-sentence embeddings
clip_emb_list.extend(clip_emb.detach().cpu())
sent_emb_list.extend(sent_emb.detach().cpu())
# logging
if step % 10 == 0:
logger.info(f"Val [{step}/{max_step}] Loss {loss.item():.4f}")
vid_emb_list = torch.stack(vid_emb_list, 0)
par_emb_list = torch.stack(par_emb_list, 0)
clip_emb_list = torch.stack(clip_emb_list, 0)
sent_emb_list = torch.stack(sent_emb_list, 0)
# video text retrieval
vid_emb_list = F.normalize(vid_emb_list).numpy()
par_emb_list = F.normalize(par_emb_list).numpy()
v2p_res, _ = coot_utils.compute_retr_vid_to_par(vid_emb_list, par_emb_list)
p2v_res, _ = coot_utils.compute_retr_par_to_vid(vid_emb_list, par_emb_list)
sum_at_1 = v2p_res["r1"] + p2v_res["r1"]
logger.info(coot_utils.EVALHEADER)
logger.info(coot_utils.retrieval_results_to_str(p2v_res, "Par2Vid"))
logger.info(coot_utils.retrieval_results_to_str(v2p_res, "Vid2Par"))
# clip sentence retrieval
clip_emb_list = F.normalize(clip_emb_list).numpy()
sent_emb_list = F.normalize(sent_emb_list).numpy()
c2s_res, _ = coot_utils.compute_retr_vid_to_par(clip_emb_list,
sent_emb_list)
s2c_res, _ = coot_utils.compute_retr_par_to_vid(clip_emb_list,
sent_emb_list)
c2s_sum_at_1 = c2s_res["r1"] + s2c_res["r1"]
logger.info(coot_utils.EVALHEADER)
logger.info(coot_utils.retrieval_results_to_str(s2c_res, "Sen2Shot"))
logger.info(coot_utils.retrieval_results_to_str(c2s_res, "Shot2Sen"))
writer.add_scalar('val_loss_epoch', loss, epoch)
writer.add_scalar('val_R1_Sentence2Clip_epoch', s2c_res["r1"], epoch)
writer.add_scalar('val_R5_Sentence2Clip_acc_epoch', s2c_res["r5"], epoch)
writer.add_scalar('val_R10_Sentence2Clip_acc_epoch', s2c_res["r10"], epoch)
writer.add_scalar('val_loss_epoch', loss, epoch)
writer.add_scalar('val_R1_Clip2Sentence_epoch', c2s_res["r1"], epoch)
writer.add_scalar('val_R5_Clip2Sentence_acc_epoch', c2s_res["r5"], epoch)
writer.add_scalar('val_R10_Clip2Sentence_acc_epoch', c2s_res["r10"], epoch)
return ((v2p_res, p2v_res, sum_at_1), (c2s_res, s2c_res, c2s_sum_at_1)) | 5,324,692 |
def iniStressProfile((z,dz),(zMin,zMax),ma):
"""initial acoustic stress profile
\param[in] z z-axis
\param[in] dz axial increment
\param[in] zMin start of new tissue layer
\param[in] zMax end of new tissue layer
\param[in] ma absorption coefficient
\param[out] p0 initial stress profile
"""
mu_z = np.zeros(z.size)
mu_z[int((zMin-z[0])/dz):int((zMax-z[0])/dz)] = ma
return mu_z*np.exp(-np.cumsum(mu_z*dz)) | 5,324,693 |
def OpenCredentials(cred_path: str):
"""
Opens and parses an AWS credentials file.
:param cred_path: Path to the file containing the credentials
:return: A dict containing the credentials
"""
with open(cred_path) as file:
keys, values = map(lambda s: s.strip().split(','), file)
credentials = dict(zip(keys, values))
return credentials | 5,324,694 |
def content_loop_rate(best, n,file_path,file_id, loop_time=20,height=100):
"""固定迭代次数
Args:
best (dict): 最优良的个体
n (NEST): 打包者
file_path (str): 保存路径
file_id (str): 文件id
loop_time (int, optional): 迭代次数. Defaults to 20.
height (int, optional): 默认高度. Defaults to 100.
"""
print("STOP_GENERATION",settings.STOP_GENERATION)
res = best
run_time = loop_time
loops=1
if settings.DEBUG:
import time
current_time=time.time()
last_time=current_time
generation_time=[]
best_fitness_for_all_generation=[]
best_fitness_for_current_generation=[]
square_like_for_all_generation=[]
square_like_for_current_generation=[]
while run_time:
#print("content_loop_rate",loops)
loops=loops+1
n.run()
best = n.best
#print (best['fitness'])
if best['fitness'] <= res['fitness']:
res = best
#print ('change', res['fitness'])
#################各代个体的评估结果############################
#self.results [{'placements': all_placements, 'fitness': fitness,'min_width':min_width, 'paths': paths, 'area': bin_area}]
#精英 self.best
if settings.DEBUG:
current_time=time.time()
generation_time.append(10*(current_time-last_time))
last_time=current_time
best_fitness_for_all_generation.append(res['fitness'])
best_fitness_for_current_generation.append(best['fitness'])
square_like_for_all_generation.append(res['min_width']/height)
square_like_for_current_generation.append(best['min_width']/height)
####################################################
run_time -= 1
#TODO:改
if n.shapes_total_area/(best['min_width']*settings.BIN_NORMAL[2][1])>settings.SMALLCASE_EXPECTATION:
#print("***",n.shapes_total_area/(best['min_width']*settings.BIN_NORMAL[2][1]))
#print(n.shapes_total_area,best['min_width'],settings.BIN_NORMAL[2][1])
run_time=False
if settings.DEBUG:
print("best_fitness_for_all_generation",best_fitness_for_all_generation)
if settings.DRAWPIC:
from matplotlib import pyplot as plt
from matplotlib.pyplot import MultipleLocator
x = range(1,len(best_fitness_for_all_generation)+1)
plt.grid(axis='x',color='0.95')
plt.step(x,best_fitness_for_all_generation, label="best_fitness_for_all_generation",color='red',marker='^')
plt.step(x,best_fitness_for_current_generation, label="best_fitness_for_current_generation",color='blue')
plt.xlabel('generation')
plt.ylabel('fitness',color='b')
#ax.legend()
#x_major_locator=MultipleLocator(1)
#ax.xaxis.set_major_locator(x_major_locator) #设置x轴尺度
plt.figure(2)
plt.step(x,square_like_for_all_generation, label="square_like_for_all_generation",linestyle="--",color='red',marker='^')
plt.step(x,square_like_for_current_generation, label="square_like_for_current_generation",linestyle="--",color='blue')
plt.xlabel('generation')
plt.ylabel('square_like',color='r')
plt.title('Sample Run')
plt.figure(3)
plt.bar(x, generation_time, color='rgb', tick_label=x)
draw_result(res['placements'], n.shapes,n.originalshapes, n.container, n.container_bounds,file_path,file_id) | 5,324,695 |
def get_follow_users():
"""
Get all the users stored in the cookie
"""
follow_users = []
if "follow" in request.cookies:
follow_users = request.cookies["follow"]
follow_users = follow_users.split(delim)
return follow_users | 5,324,696 |
def generate_user_agent(os=None, navigator=None, device_type=None):
"""
Generates HTTP User-Agent header
:param os: limit list of os for generation, possible values:
"win", "linux", "mac", "android", "ios", "all"
:type os: string or list/tuple or None
:param navigator: limit list of browser engines for generation, possible values:
"chrome", "firefox", "ie", "edge", "safari", "opera", "all"
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "all"
:return: User-Agent string
:rtype: string
:raises InvalidOption: if could not generate user-agent for
any combination of allowed oses and navigators
:raise InvalidOption: if any of passed options is invalid
"""
device_type, os_id, navigator_id = pick_config_ids(
device_type, os, navigator)
system = build_system_components(os_id, navigator_id)
app = build_app_components(os_id, navigator_id)
ua_template = choose_ua_template(os_id, navigator_id, app)
user_agent = ua_template.format(system=system, app=app)
return user_agent | 5,324,697 |
def _make_builders(
args: argparse.Namespace,
ds_to_build: str,
) -> Iterator[tfds.core.DatasetBuilder]:
"""Yields builders to generate."""
builder_cls, builder_kwargs = _get_builder_cls(ds_to_build)
# Eventually overwrite version
if args.experimental_latest_version:
if 'version' in builder_kwargs:
raise ValueError(
'Can\'t have both `--experimental_latest` and version set (`:1.0.0`)'
)
builder_kwargs['version'] = 'experimental_latest'
# Eventually overwrite config
builder_kwargs['config'] = _get_config_name(
builder_cls=builder_cls,
config_kwarg=builder_kwargs.get('config'),
config_name=args.config,
config_idx=args.config_idx,
)
make_builder = functools.partial(
_make_builder,
builder_cls,
overwrite=args.overwrite,
data_dir=args.data_dir,
**builder_kwargs,
)
# Generate all configs if no config requested.
if builder_cls.BUILDER_CONFIGS and builder_kwargs['config'] is None:
for config in builder_cls.BUILDER_CONFIGS:
yield make_builder(config=config.name)
# Generate only the dataset
else:
yield make_builder() | 5,324,698 |
def usage_demo(state_machine_name, resources):
"""
Creates and runs a Step Functions state machine that calls a Lambda function to
retrieve message records from a DynamoDB table and record them as sent.
The state machine is then updated to also send messages to an Amazon SQS
queue and the state machine is run again.
"""
state_machine = StepFunctionsStateMachine(boto3.client('stepfunctions'))
table = boto3.resource('dynamodb').Table(resources['MessageTableName'])
queue = boto3.resource('sqs').Queue(resources['SendQueueUrl'])
state_machine_arn = state_machine.find(state_machine_name)
if state_machine_arn is None:
print("Create a message pump state machine.")
definition = make_definition(resources, False)
state_machine.create(state_machine_name, definition, resources['StepRoleArn'])
print("Put three messages in the message table.")
for user_name, message in [
('wills', 'Brevity is the soul of wit.'),
('janea', 'Let us never underestimate the power of a well-written letter.'),
('lewisc', "I have proved by actual trial that a letter, that takes an "
"hour to write, takes only about 3 minutes to read!")]:
table.put_item(Item={
'user_name': user_name, 'message': message,
'message_id': str(time.time_ns()), 'sent': False})
print("Start the state machine.")
run_arn = state_machine.start_run(f"run-without-sqs-{time.time_ns()}")
print("Wait a few seconds for the state machine to run...")
time.sleep(10)
print("Verify that the messages in DynamoDB are marked as sent.")
messages = table.scan()['Items']
pprint(messages)
print("Stop the state machine.")
state_machine.stop_run(run_arn, "Stop to update for demo.")
runs = state_machine.list_runs('RUNNING')
while runs:
time.sleep(5)
runs = state_machine.list_runs('RUNNING')
print("Update the state machine so it sends messages to Amazon SQS.")
definition = make_definition(resources, True)
state_machine.update(definition)
time.sleep(5)
print("Reset the messages in the DynamoDB table to not sent.")
for msg in table.scan()['Items']:
table.update_item(
Key={'user_name': msg['user_name'], 'message_id': msg['message_id']},
UpdateExpression='SET sent=:s',
ExpressionAttributeValues={':s': False})
print("Restart the state machine.")
run_arn = state_machine.start_run(f"run-with-sqs-{time.time_ns()}")
print("Wait for state machine to process messages...")
time.sleep(15)
print("Retrieve messages from Amazon SQS.")
poll_for_messages(queue)
print("Put another message in the table.")
table.put_item(
Item={'user_name': 'wills', 'message': 'Action is eloquence.',
'message_id': str(time.time_ns()), 'sent': False})
print("Give the state machine time to find and process the message.")
time.sleep(15)
print("Get messages from Amazon SQS.")
poll_for_messages(queue)
print("Stop the run.")
state_machine.stop_run(run_arn, "Done with demo.") | 5,324,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.