content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def process_scene_ard(config_file, sensor, scene_id):
"""
A function which runs the process of converting the specified scene to an ARD product.
:param config_file: The EODataDown configuration file path.
:param sensor: the string name of the sensor
:param scene_id:
:return:
"""
# Create the System 'Main' object and parse the configuration file.
sys_main_obj = eodatadown.eodatadownsystemmain.EODataDownSystemMain()
sys_main_obj.parse_config(config_file)
logger.debug("Parsed the system configuration.")
edd_usage_db = sys_main_obj.get_usage_db_obj()
edd_usage_db.add_entry("Started: Converting Specified Scene to ARD ({0}: {1}).".format(sensor, scene_id), start_block=True)
sensor_objs = sys_main_obj.get_sensors()
sensor_obj_to_process = None
for sensor_obj in sensor_objs:
if sensor_obj.get_sensor_name() == sensor:
sensor_obj_to_process = sensor_obj
break
if sensor_obj_to_process is None:
logger.error("Error occurred could not find sensor object for '{}'".format(sensor))
raise EODataDownException("Could not find sensor object for '{}'".format(sensor))
try:
sensor_obj_to_process.scn2ard(scene_id)
except Exception as e:
logger.error("Error occurred while converting scene ({0}) to ARD for sensor: ({1})".format(
scene_id, sensor_obj_to_process.get_sensor_name()))
logger.debug(e.__str__(), exc_info=True)
edd_usage_db.add_entry("Finished: Converting Specified Scene to ARD ({0}: {1}).".format(sensor, scene_id), end_block=True) | 5,330,100 |
def save_associations(resource, form, descriptors, resource_existed):
"""Save associations from the forms received by 'create' and 'edit' route
handlers to the database."""
# first delete all the associations for this resource if it already
# existed (to handle the "empty" case)
if resource_existed:
options = OptionAssociation.query.filter_by(
resource_id=resource.id).all()
texts = TextAssociation.query.filter_by(resource_id=resource.id).all()
associations = options + texts
for a in associations:
db.session.delete(a)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
flash('Error: failed to save edits. Please try again.',
'form-error')
for descriptor in descriptors:
if descriptor.is_option_descriptor:
AssociationClass = OptionAssociation
if descriptor.name != 'supercategories':
values = [int(i) for i in form[descriptor.name].data]
else:
categories_descriptor = filter(
lambda d: d.name == 'categories', descriptors)[0]
categories_values = categories_descriptor.values
categories_options = [
int(i) for i in form[categories_descriptor.name].data
]
categories_values = [
categories_values[category_option]
for category_option in categories_options
]
supercategories_descriptor = filter(
lambda d: d.name == 'supercategories', descriptors)[0]
supercategories_values = [
category_to_supercategory[category_value]
for category_value in categories_values
if category_value in category_to_supercategory
]
values = [
supercategories_descriptor.values.index(
supercategory_value)
for supercategory_value in supercategories_values
]
keyword = 'option'
else:
if descriptor.name != 'report count':
AssociationClass = TextAssociation
values = [form[descriptor.name].data]
keyword = 'text'
for value in values:
arguments = {
'resource_id': resource.id,
'descriptor_id': descriptor.id,
keyword: value,
'resource': resource,
'descriptor': descriptor
}
new_association = AssociationClass(**arguments)
db.session.add(new_association) | 5,330,101 |
def game_get_state():
"""The ``/game/state`` endpoint requires authentication and expects no
other arguments.
It can be reached at ``/game/state?secret=<API_SECRET>``.
It is used to retrieve the current state of the game.
The JSON response looks like::
{
"state_id": int,
"game_id": int,
"services": [List of {"service_id": int,
"service_name": string,
"port": int}],
"scripts": [List of {"script_id": int,
"upload_id": int,
"type": ("exploit", "benign", "getflag",
"setflag"),
"script_name": string,
"service_id": int}]
"run_scripts": [{"team_id": int (team_id to run scripts against),
"run_list": [Ordered list of int script_ids]}],
"state_expire": int (approximate remaining seconds in this tick),
}
:return: a JSON dictionary providing information on the current state.
"""
cursor = mysql.cursor()
# Get basic information about the game, like tick info and services
to_return = {}
current_tick, tick_start, seconds_to_next_tick, _ = get_current_tick(cursor)
to_return["state_id"] = current_tick
to_return["state_expire"] = seconds_to_next_tick
cursor.execute("SELECT id FROM game LIMIT 1")
game_cursor = cursor.fetchone()
if game_cursor is None:
to_return["num"] = "621"
to_return["msg"] = "No game is currently running..."
return json.dumps(to_return)
to_return["game_id"] = game_cursor["id"]
cursor.execute("""SELECT services.id AS service_id,
services.name as service_name,
services.port as port,
current_state as state
FROM services""")
to_return["services"] = cursor.fetchall()
# Determine which scripts exists and which should be run
cursor.execute("""SELECT id AS script_id, upload_id, filename AS script_name,
type, service_id,
current_state as state
FROM scripts""")
to_return["scripts"] = cursor.fetchall()
cursor.execute("""SELECT team_id, json_list_of_scripts_to_run AS json_list
FROM team_scripts_run_status
WHERE team_scripts_run_status.tick_id = %s""",
(current_tick,))
run_scripts = []
for team_scripts_to_run in cursor.fetchall():
team_id = team_scripts_to_run["team_id"]
run_list = json.loads(team_scripts_to_run["json_list"])
run_scripts.append({"team_id": team_id,
"run_list": run_list})
to_return["run_scripts"] = run_scripts
return json.dumps(to_return) | 5,330,102 |
def new(key, mode, iv=None):
"""Return a `Cipher` object that can perform ARIA encryption and
decryption.
ARIA is a block cipher designed in 2003 by a large group of South
Korean researchers. In 2004, the Korean Agency for Technology and
Standards selected it as a standard cryptographic technique.
Parameters:
key (bytes): The key to encrypt decrypt.
mode (int): The mode of operation of the cipher.
iv (bytes or None): The initialization vector (IV). The IV is
required for every mode but ECB and CTR where it is ignored.
If not set, the IV is initialized to all 0, which should not
be used for encryption.
"""
mode = _cipher.Mode(mode)
if mode in {
_cipher.Mode.ECB,
_cipher.Mode.CBC,
# _cipher.Mode.CFB128,
_cipher.Mode.CTR,
_cipher.Mode.GCM,
}:
if len(key) * 8 not in {128, 192, 256}:
raise TLSError(
msg="key size must 16, 24, or 32 bytes, got %i" % len(key)
)
else:
raise TLSError(msg="unsupported mode %r" % mode)
name = ("ARIA-%i-%s" % (len(key) * 8, mode.name)).encode("ascii")
return _cipher.Cipher(name, key, mode, iv) | 5,330,103 |
def keggapi_info(database, verbose=True, force_download=False, return_format = None, return_url = False):
"""KEGG REST API interface for INFO command
Displays information on a given database
for further info read https://www.kegg.jp/kegg/rest/keggapi.html
Parameters
----------
database : str
database of which you want to obtain infos on
verbose : bool
if set to False displays only the first 4 lines of text (default is True)
force_download : bool
forces overwriting on previous cached files (default is False)
retutn_format : str
optional, specify a return format to return, str | dict (default is None)
Returns
-------
info_str : str
optional, plain text response of API INFO command
info_dict : dict
optional, parsed response of API INFO as a dictionary
"""
valid_return_formats = (None, "str", "dict")
if return_format not in valid_return_formats:
raise ValueError("invalid {} format for keggapi_info return".format(return_format))
org = get_organism_codes()
if database not in db_categories + org:
raise KEGGKeyError(
database, msg="source database {} is not a valid database".format(database)
)
url = "http://rest.kegg.jp/info/{}".format(database)
if return_url == True:
return url
filename = database + "_info"
infos = download_textfile(url, filename, verbose=False, force_download = force_download)
if verbose == True:
logging.info("Infos on %s from KEGG:\n",database)
if return_format == None:
if verbose == False:
print("\n".join(infos.splitlines()[1:4]))
else:
print(infos)
elif return_format == "str":
return infos
elif return_format == "dict":
processed_dict = process_request_text(infos, mode = "columns")
return processed_dict | 5,330,104 |
def round_to_thirty(str_time):
"""STR_TIME is a time in the format HHMM. This function rounds down to the nearest half hour."""
minutes = int(str_time[2:])
if minutes//30 == 1:
rounded = "30"
else:
rounded = "00"
return str_time[0:2] + rounded | 5,330,105 |
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path | 5,330,106 |
def write_jsonl(filepath, values):
"""Writes List[Dict] data to jsonlines file.
Args:
filepath: file to write to
values: list of dictionary data to write
Returns:
"""
with open(filepath, "w") as out_f:
for val in values:
out_f.write(ujson.dumps(val) + "\n")
return | 5,330,107 |
def remove_uoms(words):
"""
Remove uoms in the form of e.g. 1000m 1543m3
Parameters
----------
words: list of words to process
Returns
-------
A list of words where possible uom have been removed
"""
returnWords=[]
for word in words:
word=word.replace('.', '', 1)
word=word.replace(',', '', 1)
if word[0:len(word)-1].isnumeric()==False and word[0:len(word)-1].isdecimal()==False:
#we do not have a match on e.g. 1543m
if word[0:len(word)-2].isnumeric()==False and word[0:len(word)-2].isdecimal()==False:
#we do not have a match on e.g. 1543m3
#add it
returnWords.append(word)
return returnWords | 5,330,108 |
def raise_if_not_datetime_ta(candidate: Any) -> None:
"""Raise an exception if the given value is not a timezone aware
datetime.
"""
if not isinstance(candidate, datetime):
raise PeriodError("Given value is not strictly a datetime")
if candidate.tzinfo is None:
raise PeriodError('Given datetime is "naive" (no timezone is attached to it)') | 5,330,109 |
def parse_region(reg: str) -> tuple:
"""
Return a pair of slices (slice1, slice2) corresponding
to the region give as input in numpy slice string format
If the region can't be parsed sys.exit() is called
"""
try:
slices = str_to_slices(reg)
except ValueError as ve:
logging.error("ValueError: %s", ve)
logging.error("Bad region spec: %s", reg)
sys.exit(1)
if len(slices) != 2:
logging.error("Bad region spec: %s", reg)
sys.exit(1)
return slices | 5,330,110 |
async def create_wall_connector_entry(
hass: HomeAssistant, side_effect=None
) -> MockConfigEntry:
"""Create a wall connector entry in hass."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.2.3.4"},
options={CONF_SCAN_INTERVAL: 30},
)
entry.add_to_hass(hass)
# We need to return vitals with a contactor_closed attribute
# Since that is used to determine the update scan interval
fake_vitals = tesla_wall_connector.wall_connector.Vitals(
{
"contactor_closed": "false",
}
)
with patch(
"tesla_wall_connector.WallConnector.async_get_version",
return_value=get_default_version_data(),
side_effect=side_effect,
), patch(
"tesla_wall_connector.WallConnector.async_get_vitals",
return_value=fake_vitals,
side_effect=side_effect,
), patch(
"tesla_wall_connector.WallConnector.async_get_lifetime",
return_value=None,
side_effect=side_effect,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry | 5,330,111 |
def _normalize(data):
"""
Normalizes the data (z-score)
:param data: Data to be normalized
:return: Nomralized data
"""
mean = np.mean(data, axis=0)
sd = np.std(data, axis=0)
# If Std Dev is 0
if not sd:
sd = 1e-7
return (data - mean) / sd | 5,330,112 |
def test_xml_filters_change_bars():
"""Test the use a xml filter"""
plot = Bar(legend_at_bottom=True, explicit_size=True,
width=800, height=600)
A = [60, 75, 80, 78, 83, 90]
B = [92, 87, 81, 73, 68, 55]
plot.add("A", A)
plot.add("B", B)
plot.add_xml_filter(ChangeBarsXMLFilter(A, B))
q = plot.render_tree()
assert len(q.findall("g")) == 2
assert q.findall("g")[1].attrib[
"transform"] == "translate(0,150), scale(1,0.75)" | 5,330,113 |
def test_file_corrupt(datagram_small, tmpdir):
"""This tests DatagramInputFile's handling of a corrupt size header."""
dg, verify = datagram_small
p = tmpdir.join('datagram.bin')
filename = core.Filename.from_os_specific(str(p))
dof = core.DatagramOutputFile()
dof.open(filename)
dof.put_datagram(dg)
dof.close()
# Corrupt the size header to 4GB
with p.open(mode='wb') as f:
f.seek(0)
f.write(b'\xFF\xFF\xFF\xFF')
dg2 = core.Datagram()
dif = core.DatagramInputFile()
dif.open(filename)
assert not dif.get_datagram(dg2)
dif.close()
# Should we test that dg2 is unmodified? | 5,330,114 |
def showResultOnImage(result, img):
"""
Display obtained results onto input image
"""
img = img[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12,12))
ax.imshow(img, aspect='equal')
lines = result['recognitionResult']['lines'] # assign the 'lines' value at lv 1
for i in range(len(lines)):
words = lines[i]['words'] # assign words val at lv2
for j in range(len(words)):
tl = (words[j]['boundingBox'][0], words[j]['boundingBox'][1])
tr = (words[j]['boundingBox'][2], words[j]['boundingBox'][3])
br = (words[j]['boundingBox'][4], words[j]['boundingBox'][5])
bl = (words[j]['boundingBox'][6], words[j]['boundingBox'][7])
# extract text of each word
text = words[j]['text']
x = [tl[0], tr[0], tr[0], br[0], br[0], bl[0], bl[0], tl[0]]
y = [tl[1], tr[1], tr[1], br[1], br[1], bl[1], bl[1], tl[1]]
# define a box around the word
line = Line2D(x, y, linewidth=3.5, color='red')
# Draw box on image
ax.add_line(line)
# Write text on image
ax.text(tl[0], tl[1] - 2, '{:s}'.format(text),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.axis('off')
plt.tight_layout()
plt.draw()
plt.show() | 5,330,115 |
def addi(registers, a, b, c):
"""(add immediate) stores into register C the result of adding register A and value B."""
registers[c] = registers[a] + b | 5,330,116 |
def also_provides(context):
""" add settings to control panel"""
if context.readDataFile('medialog.subskins.install.txt') is None:
# Not your add-ons install profile
return
alsoProvides(IGooglefontsSettings, IMedialogControlpanelSettingsProvider) | 5,330,117 |
def test_transform_disabled(doctree):
"""
Test that no reference is inserted if transforming is disabled.
"""
assert not doctree.next_node(pending_xref) | 5,330,118 |
def test_config_duplicate_daily_avg_no_int_min_seen_days():
"""Test Depot not known yet.
Verify that config parser only int _min_seen_days for duplicate_daily_avg dimension.
"""
conditions_config = [{'label': 'duplicate_mk1',
'reason': 'Duplicate IMEI detected',
'dimensions': [{'module': 'duplicate_daily_avg',
'parameters': {'min_seen_days': 'ABC', 'threshold': '4.0',
'period_days': '30'}}]}]
cfg = {'conditions': conditions_config, 'region': {'name': 'Country1', 'country_codes': '22'}}
msg = 'Could not create dimension \'duplicate_daily_avg\' with supplied parameters. ' \
'Cause: \'min_seen_days\' parameter must be an integer, got \'ABC\' instead'
_expect_app_config_failure(config=cfg, expected_message=msg) | 5,330,119 |
def merge_target_airport_configs(
weather_flight_features: pd.DataFrame,
configs: pd.DataFrame,
parameters: Dict[str, Any],
)-> pd.DataFrame:
"""
This function merges actual airport configuration values to the main data frame. Multiple future configuration values
are added as defined by the prediction_lookahead and prediction_delta (prediction_lookahead/prediction_delta columns).
The current configuration is also added to the input data
"""
configs= configs.rename(columns={'start_time': 'timestamp'})
configs = configs[['timestamp', 'airport_configuration_name']]
configs = configs.assign(timestamp_config=configs['timestamp'])
configs = configs.sort_values('timestamp_config', ascending=True)
# Sample configuration data
start_datetime = configs['timestamp_config'].min().ceil("H")
end_datetime = configs['timestamp_config'].max().floor("H")
time_df = sampling_times(parameters['prediction_delta'], start_datetime, end_datetime)
configs_sampled = pd.merge_asof(time_df, configs, on="timestamp", direction="backward")
# TODO: for CLT 93.6% of the data is kept after removing stale configs. For other airports the current logic
# could lead to removing too many rows. Need to keep an eye on the logged value below to see if additional logic
# is needed
# Remove stale configs data
is_stale = (configs_sampled['timestamp'] - configs_sampled['timestamp_config']) \
/ np.timedelta64(1,'h') > parameters['stale_airport_config_th']
# Log rows with stale configs
log = logging.getLogger(__name__)
log.info('Kept {:.1f}% of rows when removing stale airport configuration'.format(
100 * (1-(sum(is_stale) / configs_sampled.shape[0])) ))
configs_sampled.loc[is_stale, 'airport_configuration_name'] = None
configs_sampled.drop(columns=['timestamp_config'], inplace=True)
# Restructure data, add future values
configs_wide = future_values_reshape(configs_sampled,
parameters['prediction_lookahead'],
parameters['prediction_delta'],
'timestamp')
# Add current configuration
configs_wide = pd.merge(configs_wide, configs_sampled, on='timestamp')
# Remove NAs, only removing NAs in current config and first future config
fields_remove_na = ['airport_configuration_name', 'airport_configuration_name' + '_' + str(parameters['prediction_delta'])]
is_na = configs_wide[fields_remove_na].isna().any(axis=1)
configs_wide = configs_wide[is_na == False]
# All future airport configuration columns are stored in a single columns
configs_wide = lookahead_cols_to_single_col(configs_wide, 'airport_configuration_name_')
configs_wide = configs_wide.rename(columns={'airport_configuration_name': 'airport_configuration_name_current'})
# Merge target configuration data
data = pd.merge(weather_flight_features, configs_wide, on='timestamp', how='inner')
return data | 5,330,120 |
def add_macro(config: str, macros: str, params: Union[List[str], str], macro: str = "params"):
"""Create new params macro, infer new references in config.
Look for keys in dictionaries of both macros and config that are in
params, and for each param, store the existing value, replace it by
a macro reference "$macro:param", and finally add the new macro
parameter to the macros.
The resulting updated config and macros are written in the a
subdirectory of the config's directory, with name "new".
WARNING: This function is performing a lot of magic by automatically
replacing values in both macros and config. It is highly recommended
to manually inspect the resulting config.
Parameters
----------
config : str
Path to config.json
macros : str
Path to macros.json
params : Union[List[str], str]
List of new parameters
macro : str, optional
Name of the new macro
Raises
------
ValueError
If any param in params has no match in either config and macros.
"""
# Load config and macros
params = params.split(",") if isinstance(params, str) else params
config_dict = read_json(config)
macros_dict = read_json(macros)
config_and_macros = {"config": config_dict, "macros": macros_dict}
# Retrieve existing values, prefer values from macros
LOGGER.info("Automatically retrieving existing values for new parameters.")
params_values = {**find_values(config_dict, params), **find_values(macros_dict, params)}
# Add new macro params in config and macros
LOGGER.info("Automatically adding new macro params in config and macros.")
config_and_macros = add_macro_params(config_and_macros, macro=macro, params=params)
new_config_dict = config_and_macros["config"]
new_macros_dict = config_and_macros["macros"]
# Update macros with the new macro
LOGGER.info(f"Building new macro {macro} with parameters :")
new_macros_dict[macro] = new_macros_dict.get(macro, {})
for param in params:
value = params_values.get(param, f"${macro}:{param}")
new_macros_dict[macro][param] = value
if ismacro(value):
LOGGER.warning(f"- {param}: {value} IS NOT SET (manual fix required if not using `ParamsTuner`).")
else:
LOGGER.info(f"- {param}: {value}")
# Write to new
write_json(new_config_dict, config)
write_json(new_macros_dict, macros) | 5,330,121 |
def test_2d_freq():
""" reading 2D freq domain files """
# read the text, binary, xreim, and rawbin data
text_dic, text_data = simpson.read(os.path.join(DD_2D, '2d_text.spe'))
bin_dic, bin_data = simpson.read(os.path.join(DD_2D, '2d.spe'))
xyreim_units, xyreim_data = simpson.read(
os.path.join(DD_2D, '2d_ftext.spe'))
rd, rawbin_data = simpson.read(
os.path.join(DD_2D, '2d_raw.spe'), ndim=2, NP=256, NI=512, spe=True)
# check data in text file
assert text_data.shape == (512, 256)
assert text_data.dtype == 'complex64'
assert np.abs(text_data[4, 150].real - 0.29) <= 0.01
assert np.abs(text_data[4, 150].imag - 0.34) <= 0.01
assert np.abs(text_data[4, 151].real - 0.13) <= 0.01
assert np.abs(text_data[4, 151].imag - 0.16) <= 0.01
assert np.abs(text_data[5, 150].real - 0.41) <= 0.01
assert np.abs(text_data[5, 150].imag - 0.14) <= 0.01
# data in text, bin and xyreim files should all be close
assert np.allclose(text_data, bin_data)
assert np.allclose(text_data, xyreim_data)
# rawbin should be close except for first point along each vector
assert np.allclose(rawbin_data[:, 1:], text_data[:, 1:]) | 5,330,122 |
def chunks(chunkable, n):
""" Yield successive n-sized chunks from l.
"""
chunk_list = []
for i in xrange(0, len(chunkable), n):
chunk_list.append( chunkable[i:i+n])
return chunk_list | 5,330,123 |
def handle_withdraw(exporter, elem, txinfo, index=0):
""" withdraw nft or sell proceeds from randomearth.io """
wallet_address = txinfo.wallet_address
execute_msg = util_terra._execute_msg(elem)
# Check if wallet is sender (can be receiver of later transfer_nft msg)
sender = elem["tx"]["value"]["msg"][index]["value"]["sender"]
if sender == wallet_address:
asset = execute_msg["withdraw"]["asset"]
received_amount, received_currency = _parse_asset(asset)
row = make_nft_withdraw(txinfo, received_amount, received_currency)
exporter.ingest_row(row) | 5,330,124 |
def test_switch_config_leaf_bmc():
"""Test that the `canu generate switch config` command runs and returns valid leaf-bmc config."""
leaf_bmc = "sw-leaf-bmc-001"
with runner.isolated_filesystem():
with open(sls_file, "w") as f:
json.dump(sls_input, f)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"generate",
"switch",
"config",
"--csm",
csm,
"--architecture",
architecture,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
corners,
"--sls-file",
sls_file,
"--name",
leaf_bmc,
],
)
assert result.exit_code == 0
print(result.output)
assert (
"ip name-server 10.92.100.225\n"
+ "hostname sw-leaf-bmc-001\n"
+ "rest api restconf\n"
) in str(result.output)
print(result.output)
assert (
"interface vlan1\n"
+ " description MTL\n"
+ " no shutdown\n"
+ " mtu 9216\n"
+ " ip address 192.168.1.12/16\n"
+ "interface vlan2\n"
+ " description RIVER_NMN\n"
+ " no shutdown\n"
+ " mtu 9216\n"
+ " ip address 192.168.3.12/17\n"
+ " ip access-group nmn-hmn in\n"
+ " ip access-group nmn-hmn out\n"
+ " ip ospf 1 area 0.0.0.0\n"
+ "interface vlan4\n"
+ " description RIVER_HMN\n"
+ " no shutdown\n"
+ " mtu 9216\n"
+ " ip address 192.168.0.12/17\n"
+ " ip access-group nmn-hmn in\n"
+ " ip access-group nmn-hmn out\n"
+ " ip ospf 1 area 0.0.0.0\n"
) in str(result.output)
print(result.output)
assert (
"interface port-channel101\n"
+ " description sw-spine-001:26<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport mode trunk\n"
+ " switchport access vlan 1\n"
+ " switchport trunk allowed vlan 2,4\n"
+ " mtu 9216\n"
) in str(result.output)
print(result.output)
assert (
"interface loopback0\n"
+ " no shutdown\n"
+ " mtu 9216\n"
+ " ip address 10.2.0.12/32\n"
+ " ip ospf 1 area 0.0.0.0\n"
+ "interface mgmt1/1/1\n"
+ " shutdown\n"
+ " ip address dhcp\n"
+ " ipv6 address autoconfig\n"
+ "interface ethernet1/1/51\n"
+ " no shutdown\n"
+ " channel-group 101 mode active\n"
+ " no switchport\n"
+ " mtu 9216\n"
+ " speed 10000\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ "interface ethernet1/1/52\n"
+ " no shutdown\n"
+ " channel-group 101 mode active\n"
+ " no switchport\n"
+ " mtu 9216\n"
+ " speed 10000\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ "interface ethernet1/1/1\n"
+ " description ncn-m001:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/2\n"
+ " description ncn-m002:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/3\n"
+ " description ncn-m003:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/4\n"
+ " description ncn-w001:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/5\n"
+ " description ncn-w002:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/6\n"
+ " description ncn-w003:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/7\n"
+ " description ncn-s001:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/8\n"
+ " description ncn-s002:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/9\n"
+ " description ncn-s003:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
+ "interface ethernet1/1/10\n"
+ " description uan001:bmc:1<==sw-leaf-bmc-001\n"
+ " no shutdown\n"
+ " switchport access vlan 4\n"
+ " mtu 9216\n"
+ " flowcontrol receive off\n"
+ " flowcontrol transmit off\n"
+ " spanning-tree bpduguard enable\n"
+ " spanning-tree port type edge\n"
) in str(result.output)
print(result.output)
assert (
"ip access-list nmn-hmn\n"
+ " seq 10 deny ip 192.168.3.0/17 192.168.0.0/17\n"
+ " seq 20 deny ip 192.168.0.0/17 192.168.3.0/17\n"
+ " seq 30 deny ip 192.168.3.0/17 192.168.200.0/17\n"
+ " seq 40 deny ip 192.168.0.0/17 192.168.100.0/17\n"
+ " seq 50 deny ip 192.168.100.0/17 192.168.0.0/17\n"
+ " seq 60 deny ip 192.168.100.0/17 192.168.200.0/17\n"
+ " seq 70 deny ip 192.168.200.0/17 192.168.3.0/17\n"
+ " seq 80 deny ip 192.168.200.0/17 192.168.100.0/17\n"
+ " seq 90 permit ip any any\n"
) in str(result.output)
print(result.output)
assert (
"router ospf 1\n"
+ " router-id 10.2.0.12\n"
+ "spanning-tree mode mst\n"
+ "spanning-tree mst configuration\n"
+ " name cray\n"
+ " revision 2\n"
+ "ntp server 192.168.4.4\n"
+ "ntp server 192.168.4.5\n"
+ "ntp server 192.168.4.6\n"
) in str(result.output)
print(result.output) | 5,330,125 |
def pass_generate():
"""
Стартовая процедура генератора паролей.
"""
func = generate_pass_block
param = {'min_len': 4, 'max_len': 6}
return f'{func(**param)}-{func(**param)}-{func(**param)}' | 5,330,126 |
def lineSpectrum(pos, image, data, width, scale=1, spacing=3, mode="dual"):
"""
Draw sepectrum bars.
:param pos: (x, y) - position of spectrum bars on image
:param image: PIL.Image - image to draw
:param data: 1D array - sound data
:param width: int - widht of spectrum on image
:param scale: number - scaling of bars length
:param spacing: int - spacing between bars
:param mode: dual | bottom | up - direction of bars
"""
from . import analyzer as anl
count = int(width // spacing)
spectrum_data = anl.fft(data, count)
return line(pos, image, spectrum_data, scale, spacing, mode) | 5,330,127 |
def _unique_arXiv(record, extra_data):
"""Check if the arXiv ID is unique (does not already exist in Scoap3)"""
arxiv_id = get_first_arxiv(record)
# search through ES to find if it exists already
if arxiv_id:
result = current_search_client.search(
'scoap3-records-record',
q='arxiv_eprints.value="{}"'.format(arxiv_id)
)['hits']
if result['total'] == 0:
return True, ('ArXiv ID not found. Unique ID.', ), None
else:
# return all the control numbers in order to check the error
record_control_numbers = ', '.join(
hit['_source']['control_number']
for hit in result['hits']
)
return False, ('ArXiv ID already exists. Please check {}'.format(
record_control_numbers)), None
return True, ('No arXiv id: Out of the scope of this check', ), None | 5,330,128 |
def profile(username):
"""
Профиль пользователя. Возможность изменить логин, пароль.
В перспективе сохранить свои любимые ссылки.
"""
if username != current_user.nickname:
return redirect(url_for('index'))
types = Types.manager.get_by('', dictionary=True)
return render_template('auth/profile.html', types=types) | 5,330,129 |
def had_cell_edge(strmfunc, cell="north", edge="north", frac_thresh=0.1,
cos_factor=False, lat_str=LAT_STR, lev_str=LEV_STR):
"""Latitude of poleward edge of either the NH or SH Hadley cell."""
hc_strengths = had_cells_strength(strmfunc, lat_str=lat_str,
lev_str=lev_str)
if cell == "north":
label = "had_cell_nh"
elif cell == "south":
label = "had_cell_sh"
else:
raise ValueError("`cell` must be either 'north' or 'south'; "
f"got {cell}.")
# Restrict to streamfunction at level of the specified cell's maximum.
cell_max = hc_strengths.sel(cell=label)
lat_max = cell_max[lat_str]
lev_max = cell_max[lev_str]
sf_at_max = strmfunc.sel(**{lev_str: float(lev_max), "method": "nearest"})
# Restrict to the latitudes north or south of the max, as specified.
lat = strmfunc[lat_str]
if edge == "north":
which_zero = 0
lat_compar = lat >= lat_max
elif edge == "south":
which_zero = -1
lat_compar = lat <= lat_max
else:
raise ValueError("`edge` must be either 'north' or 'south'; "
f"got {cell}.")
sf_one_side = sf_at_max.where(lat_compar, drop=True)
# Restrict to the latitudes from the max to the nearest point with
# opposite-signed value.
# Apply cubic interpolation in latitude to a refined mesh. Otherwise, the
# cell edge can (unphysically) vary non-monotonically with `frac_thresh`.
lats_interp = np.arange(sf_one_side[lat_str].min(),
sf_one_side[lat_str].max() - 0.01, 0.05)
sf_one_side_interp = sf_one_side.interp(**{lat_str: lats_interp},
method="cubic")
# Explicitly make the last value equal to the original, as otherwise the
# interp step can overwrite it with nan for some reason.
sf_one_side_interp = xr.concat([sf_one_side_interp, sf_one_side[-1]],
dim=lat_str)
# Find where the streamfunction crosses the specified fractional threshold,
# using the Singh 2019 cosine weighting if specified.
if cos_factor:
sf_norm = ((sf_one_side_interp / cosdeg(sf_one_side_interp[lat_str])) /
(cell_max / cosdeg(lat_max)))
else:
sf_norm = sf_one_side_interp / cell_max
sf_thresh_diff = sf_norm - frac_thresh
sf_edge_bounds = zero_cross_bounds(sf_thresh_diff, lat_str, which_zero)
# Interpolate between the bounding points to the crossing.
return interpolate(sf_edge_bounds, sf_edge_bounds[lat_str], 0,
lat_str)[lat_str] | 5,330,130 |
def make_score_fn(data):
"""Returns a groupwise score fn to build `EstimatorSpec`."""
context_feature_columns, example_feature_columns = data.create_feature_columns()
def _score_fn(context_features, group_features, mode, unused_params,
unused_config):
"""Defines the network to score a group of documents."""
with tf.name_scope("input_layer"):
group_input = [
tf.layers.flatten(group_features[name])
for name in sorted(example_feature_columns)
]
print(group_input[0].shape)
print(group_input[0].dtype)
context_input = [
tf.layers.flatten(context_features[name])
for name in sorted(context_feature_columns)
]
print(context_input[0].shape)
print(context_input[0].dtype)
final_input = context_input + group_input
input_layer = tf.concat(final_input, 1)
tf.summary.scalar("input_sparsity", tf.nn.zero_fraction(input_layer))
tf.summary.scalar("input_max", tf.reduce_max(input_layer))
tf.summary.scalar("input_min", tf.reduce_min(input_layer))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
cur_layer = tf.layers.batch_normalization(input_layer, training=is_training)
for i, layer_width in enumerate(int(d) for d in FLAGS.hidden_layer_dims):
cur_layer = tf.layers.dense(cur_layer, units=layer_width)
cur_layer = tf.layers.batch_normalization(cur_layer, training=is_training)
cur_layer = tf.nn.relu(cur_layer)
tf.summary.scalar("fully_connected_{}_sparsity".format(i),
tf.nn.zero_fraction(cur_layer))
cur_layer = tf.layers.dropout(
cur_layer, rate=FLAGS.dropout_rate, training=is_training)
logits = tf.layers.dense(cur_layer, units=FLAGS.group_size)
return logits
return _score_fn | 5,330,131 |
def getSummaryHtml():
""" Gives a HTML summary of the sent emails
(calls the getSummary method). It uses the
shelve-file given as parameter 'logpath'. """
import cgi, cgitb
cgitb.enable()
#print "Content-type: text/html\n\n"
f = cgi.FieldStorage()
logPath = f["logpath"].value
logger = EmailLogger(logPath)
summary = logger.getLogSummary(secondsBack=None)
outText = "<html><head></head><body><table border='1'><tr><th>%s</th><th>%s</th><th>%s</th></tr>" \
% (summary[0][0], summary[0][1], summary[0][2])
for row in summary[1:]:
outText += "<tr><td>%s</td><td>%s</td><td>%s</td></tr>" %(row[0], row[1], row[2])
outText += "</table></body></html>"
print outText | 5,330,132 |
def unicode2str(obj):
""" Recursively convert an object and members to str objects
instead of unicode objects, if possible.
This only exists because of the incoming world of unicode_literals.
:param object obj: object to recurse
:return: object with converted values
:rtype: object
"""
if isinstance(obj, dict):
return {unicode2str(k): unicode2str(v) for k, v in
obj.items()}
elif isinstance(obj, list):
return [unicode2str(i) for i in obj]
elif isinstance(obj, unicode_type()):
return obj.encode("utf-8")
else:
return obj | 5,330,133 |
def test_elim_cast_same_dtype(tag):
""" test_elim_cast_same_dtype """
fns = FnDict()
cast = P.Cast()
@fns
def fp32_cast_fp32(x, y):
return cast(x, y)
@fns
def after(x, y):
return x
return fns[tag] | 5,330,134 |
def bdd_to_coco(src_path,
dst_path,
img_dir,
include,
ignore_occluded=False,
time_of_day=None):
"""Convert BDD100K det format to MS COCO format.
Parameters
----------
src_path : str
The path to the BDD100K det JSON file.
dst_path : str
The destination path to save the MS COCO
annotations.
img_dir : str
Path to the BDD100K images directory.
include : str
Which category to include.
time_of_day : str
Time of day to include. None if include all.
ignore_occluded : bool
Boolean flag whether to ignore occluded individuals.
Returns
-------
list
"""
src = json.load(open(src_path))
include = set([include])
bdd100k_cocostyle = {}
bdd100k_cocostyle['categories'] = make_coco_categories()
bdd100k_cocostyle['images'], filenames = make_coco_images(
src, img_dir, include, time_of_day)
bdd100k_cocostyle['annotations'] = make_coco_annotations(
src, include, time_of_day, ignore_occluded, filenames)
with open(dst_path, 'w') as json_file:
json.dump(bdd100k_cocostyle, json_file) | 5,330,135 |
def getRepository():
""" Determine the SVN repostiory for the cwd """
p = Ptyopen2('svn info')
output, status = p.readlinesAndWait()
for line in output:
if len(line) > 3 and line[0:3] == 'URL':
return line[5:].rstrip()
raise Exception('Could not determine SVN repository') | 5,330,136 |
def replaceInternalLinks(text):
"""
Replaces internal links of the form:
[[title |...|label]]trail
with title concatenated with trail, when present, e.g. 's' for plural.
See https://www.mediawiki.org/wiki/Help:Links#Internal_links
"""
# call this after removal of external links, so we need not worry about
# triple closing ]]].
tailRE = re.compile('\w+')
cur = 0
res = ''
for s, e in findBalanced(text):
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
inner = text[s + 2:e - 2]
# find first |
pipe = inner.find('|')
if pipe < 0:
title = inner
label = title
else:
title = inner[:pipe].rstrip()
# find last |
curp = pipe + 1
for s1, e1 in findBalanced(inner):
last = inner.rfind('|', curp, s1)
if last >= 0:
pipe = last # advance
curp = e1
label = inner[pipe + 1:].strip()
res += text[cur:s] + makeInternalLink(title, label) + trail
cur = end
return res + text[cur:] | 5,330,137 |
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
"""
:param rows: 2D list containing objects that have a single-line representation (via `str`).
All rows must be of the same length.
:param labels: List containing the column labels. If present, the length must equal to that of each row.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.
"""
max_column_length = [] #list for the maximum length of a column
if not labels is None: #add label length to the maximum length
for i in labels:
max_column_length.append(len(str(i)))
else: #add 0 as label length
for i in range(len(rows[0])):
max_column_length.append(0)
for j in range(len(rows[0])): #add the length of a row item if it is longer than the label length or another leangth of a row
for i in rows:
if len(str(i[j])) > max_column_length[j]:
max_column_length[j] = len(str(i[j]))
#top and bot line creation
#top line
top_line = "┌─"
for j in max_column_length:
for i in range(j):
top_line += "─"
top_line += "─┬─"
top_line = top_line[:-2] + "┐\n"
#bot line
bot_line = "└─"
for j in max_column_length:
for i in range(j):
bot_line += "─"
bot_line += "─┴─"
bot_line = bot_line[:-2] + "┘\n"
#table header
table_header = ""
if not labels is None:
table_header += "│ "
for i in labels:
if not centered:
table_header += i + "".join(' ' for l in range(max_column_length[labels.index(i)] - len(str(i)))) + " │ "
elif centered:
table_header += "".join(' ' for l in range(int(floor((max_column_length[labels.index(i)] - len(str(i)))/2)))) + str(i) + "".join(' ' for l in range(int(ceil((max_column_length[labels.index(i)] - len(str(i)))/2)))) + " │ "
table_header = table_header[:-1] + "\n"
table_header += "├─"
for j in max_column_length:
for i in range(j):
table_header += "─"
table_header += "─┼─"
table_header = table_header[:-2] + "┤\n"
#table body
table_body = ""
for j in rows:
for i in range(len(j)):
if not centered:
table_body += "│ " + str(j[i]) + "".join(' ' for l in range(max_column_length[i] - len(str(j[i])) + 1))
elif centered:
table_body += "│ " + "".join(' ' for l in range(int(floor((max_column_length[i] - len(str(j[i])))/2)))) + str(j[i]) + "".join(' ' for l in range(int(ceil((max_column_length[i] - len(str(j[i])))/2)) + 1))
table_body += "│\n"
return top_line + table_header + table_body + bot_line | 5,330,138 |
def mock_connection(
aioclient_mock: AiohttpClientMocker,
host: str = HOST,
port: int = PORT,
ssl: bool = False,
base_path: str = BASE_PATH,
conn_error: bool = False,
conn_upgrade_error: bool = False,
ipp_error: bool = False,
no_unique_id: bool = False,
parse_error: bool = False,
version_not_supported: bool = False,
):
"""Mock the IPP connection."""
scheme = "https" if ssl else "http"
ipp_url = f"{scheme}://{host}:{port}"
if ipp_error:
aioclient_mock.post(f"{ipp_url}{base_path}", exc=IPPError)
return
if conn_error:
aioclient_mock.post(f"{ipp_url}{base_path}", exc=aiohttp.ClientError)
return
if conn_upgrade_error:
aioclient_mock.post(f"{ipp_url}{base_path}", exc=IPPConnectionUpgradeRequired)
return
fixture = "ipp/get-printer-attributes.bin"
if no_unique_id:
fixture = "ipp/get-printer-attributes-success-nodata.bin"
elif version_not_supported:
fixture = "ipp/get-printer-attributes-error-0x0503.bin"
if parse_error:
content = "BAD"
else:
content = load_fixture_binary(fixture)
aioclient_mock.post(
f"{ipp_url}{base_path}",
content=content,
headers={"Content-Type": "application/ipp"},
) | 5,330,139 |
def format_value(v):
"""
Formats a value to be included in a string.
@param v a string
@return a string
"""
return ("'{0}'".format(v.replace("'", "\\'"))
if isinstance(v, str) else "{0}".format(v)) | 5,330,140 |
def worst_solvents(delta_d, delta_p, delta_h, filter_params):
"""Search solvents on the basis of RED (sorted descending) with given Hansen parameters, and with
a formatted string indicating filter parameters. See the function parse_filter_params
for details of filter parameters string."""
results_list = []
filter_dict = parse_filter_params(filter_params)
try:
delta_d = float(delta_d)
delta_p = float(delta_p)
delta_h = float(delta_h)
except ValueError:
return jsonify(results_list)
#Since we need most of the info and the solvents table is not big, we'll just read it
#straight to a DataFrame
solvent_df = pd.read_sql('solvents',db.engine)
for code_key, min_max in filter_dict.items():
solvent_df = solvent_df[(solvent_df[cols_from_codes[code_key]] >= min_max[0]) & \
(solvent_df[cols_from_codes[code_key]] <= min_max[1])]
if len(solvent_df) == 0:
return jsonify(results_list)
solvent_df['RED2'] = 4 * (solvent_df['delta_d'] - delta_d) * (solvent_df['delta_d'] - delta_d) + \
(solvent_df['delta_p'] - delta_p) * (solvent_df['delta_p'] - delta_p) + \
(solvent_df['delta_h'] - delta_h) * (solvent_df['delta_h'] - delta_h)
solvent_df['RED'] = solvent_df['RED2'].apply(np.sqrt)
solvent_df = solvent_df.sort_values(by='RED', ascending=False)
# Limit output to top 5
results_list = solvent_df.head().fillna('').to_dict(orient='records')
return jsonify(results_list) | 5,330,141 |
def log(msg, *args, dialog=False, error=False, **kwargs):
"""
Generate a message to the console and optionally as either a message or
error dialog. The message will be formatted and dedented before being
displayed, and will be prefixed with its origin.
"""
msg = textwrap.dedent(msg.format(*args, **kwargs)).strip()
if error:
print("remote_build:")
return sublime.error_message(msg)
for line in msg.splitlines():
print("remote_build: {msg}".format(msg=line))
if dialog:
sublime.message_dialog(msg) | 5,330,142 |
def write2file(filename, NPCorpsList, NPCorps, names):
"""将获得的lp兑换结果写入文件
每lp价格计算公式: (吉他价*数量-兑换消耗isk)/数量
需要物品的每lp价格计算公式:(吉他价*数量-兑换消耗isk-∑(依赖物品价格i*需要数量i))/数量
输出格式
军团名-物品名-isk花费-lp花费-数量-吉他收价-吉他卖价-收价折合isk-卖价折合isk-扣去购买素材后折合isk
Args:
filename: 文件名
NPCorpList: 兑换物品的NPC军团id列表
NPCorps: 兑换物品的NPC军团信息字典,key为id
names: 物品信息字典,key为id
Returns:
没有返回值
"""
with open(filename, "w+") as f:
for NPCorpID in NPCorpsList:
CorpInfo = NPCorps['{}'.format(NPCorpID)]["info"]
LPStore = NPCorps['{}'.format(NPCorpID)]["lp_store"]
for i, item in enumerate(LPStore):
LPStore[i]["CHName"] =\
names["{}".format(item["type_id"])]["name"]
f.write("{}\t{}\t{}\t{}\t{}\t".format(
CorpInfo["corporation_name"],
LPStore[i]["CHName"],
item["isk_cost"],
item["lp_cost"],
item["quantity"]
))
jitaValue = names["{}".format(item["type_id"])]["jita"]
f.write("{}\t{}\t".format(
jitaValue["buy"]["max"],
jitaValue["sell"]["min"]
))
required_items = item["required_items"]
required_items_name_list = []
total_isk = 0
for required_item in required_items:
rid = required_item["type_id"]
rqu = required_item["quantity"]
ritem = names["{}".format(rid)]
required_items_name_list.append(
"{}*{}".format(ritem["name"], rqu))
isk_cost = rqu * ritem["jita"]["sell"]["min"]
if 0 >= isk_cost:
isk_cost = 9999999999999
total_isk += isk_cost
if item["lp_cost"] > 0:
buyLP = \
((jitaValue["buy"]["max"] * item["quantity"])
- item["isk_cost"]) / item["lp_cost"]
sellLP = \
((jitaValue["sell"]["min"] * item["quantity"])
- item["isk_cost"]) / item["lp_cost"]
realLP = \
((jitaValue["sell"]["min"] * item["quantity"])
- item["isk_cost"] - total_isk) / item["lp_cost"]
else:
buyLP = 0
sellLP = 0
realLP = 0
f.write("{}\t{}\t".format(
str(required_items_name_list), total_isk))
f.write("{}\t{}\t{}\n".format(buyLP, sellLP, realLP)) | 5,330,143 |
async def async_test_matrix_inverse(testcase: Tuple[Matrix, Any]) -> None:
"""
Test that checks whether the secure application of the matrix inverse returns the same result
as the regular matrix inverse up to a certain margin.
:param testcase: tuple of a matrix and its correct inverse
"""
matrix, correct_matrix_inverse = testcase
bit_length = 32
frac_length = 16
await mpc.start()
secfxp: Type[SecureFixedPoint] = mpc.SecFxp(l=bit_length, f=frac_length)
secure_matrix: SecureFixedPointMatrix = [[secfxp(x) for x in row] for row in matrix]
secure_matrix = [mpc.input(row, 0) for row in secure_matrix]
# noinspection PyTypeChecker
secure_inverse: SecureFixedPointMatrix = matrix_inverse(secure_matrix)
inverse: List[List[float]] = [await mpc.output(row) for row in secure_inverse]
secure_checker = mpc.matrix_prod(secure_matrix, secure_inverse)
checker: List[List[float]] = [await mpc.output(row) for row in secure_checker]
diff = array(correct_matrix_inverse) - array(inverse)
rel_diff = divide(
diff,
array(correct_matrix_inverse),
out=zeros_like(diff),
where=array(correct_matrix_inverse) != 0,
)
await mpc.shutdown()
max_abs_diff = abs_(diff).max()
max_rel_diff = abs_(rel_diff).max()
print(f"X = \n{array(matrix)}\n")
print(f"Xinv = \n{array(correct_matrix_inverse)}\n")
print(f"Xinv_mpc = \n{array(inverse)}\n")
print(f"X * Xinv_mpc = \n{array(checker)}\n")
print(f"max absolute diff = {max_abs_diff}")
print(f"max relative diff (nonzero entries) = {max_rel_diff}")
assert max_abs_diff < 0.05 and max_rel_diff < 0.5 | 5,330,144 |
def test_individual_file_convenience():
"""Accessing individual file via om.read works"""
data = om.read(path=persist, channel='testing', key='file1')
assert_false(data == {})
data = om.read(persist, 'testing', 'NON_EXISTANT')
assert_equals(data, None) | 5,330,145 |
def build_frustum_lineset(K, l, t, r, b):
"""Build a open3d.geometry.LineSet to represent a frustum
Args:
pts_A (np.array or torch.tensor): Point set in form (Nx3)
pts_B (np.array or torch.tensor): Point set in form (Nx3)
idxs (list of int): marks correspondence between A[i] and B[idxs[i]]
Returns:
line_set (open3d.geometry.LineSet)
"""
corners = np.asarray([(l, t), (r - 1, t), (r - 1, b - 1), (l, b - 1)], dtype=np.float32)
rays = unproject(K, corners)
rays /= np.linalg.norm(rays, axis=1)[:, None]
line_idx = [[i * 2 + 0, i * 2 + 1] for i in range(4)]
line_pts = []
for ray in rays:
line_pts.extend([[0, 0, 0], (ray * 100).tolist()])
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(line_pts)
colors = np.zeros((8, 3), dtype=np.uint8)
colors[:, 1] = 255
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set.lines = o3d.utility.Vector2iVector(line_idx)
return line_set | 5,330,146 |
def _config_file_exists():
"""
Checks if the configuration file exists.
:return: Returns True if the configuration file exists and False otherwise.
:rtype: bool
"""
if os.path.isfile(DEFAULT_CONFIG_FILE):
return True
return False | 5,330,147 |
def postagsget(sent):
"""
sent: Sentence as string
"""
string = ""
ls = pos_tag(list(sent.split()))
for i in ls:
string += i[1] + " "
return string | 5,330,148 |
def try_convert_to_list_of_numbers(transform_params):
"""
Args:
transform_params: a dict mapping transform parameter names to values
This function tries to convert each parameter value to a list of numbers.
If that fails, then it tries to convert the value to a number.
For example, if transform_params = {'scale':'0.16 1', size='256'}, this will become
{'scale':[0.16, 1], 'size': 256}.
"""
for k, v in transform_params.items():
try:
v = [string_to_num(x) for x in v.split(" ")]
if len(v) == 1:
v = v[0]
except AttributeError:
v = string_to_num(v)
transform_params[k] = v
return transform_params | 5,330,149 |
def from_tensorflow(graph):
""" Load tensorflow graph which is a python tensorflow graph object into nnvm graph.
The companion parameters will be handled automatically.
Parameters
----------
graph : GraphDef object
Tensorflow GraphDef
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.ndarray
Dict of converted parameters stored in tvm.ndarray format
"""
g = GraphProto()
sym, params = g.from_tensorflow(graph)
return sym, params | 5,330,150 |
def isequal(q1, q2, tol=100, unitq=False):
"""
Test if quaternions are equal
:param q1: quaternion
:type q1: array_like(4)
:param q2: quaternion
:type q2: array_like(4)
:param unitq: quaternions are unit quaternions
:type unitq: bool
:param tol: tolerance in units of eps
:type tol: float
:return: whether quaternions are equal
:rtype: bool
Tests if two quaternions are equal.
For unit-quaternions ``unitq=True`` the double mapping is taken into account,
that is ``q`` and ``-q`` represent the same orientation and ``isequal(q, -q, unitq=True)`` will
return ``True``.
.. runblock:: pycon
>>> from spatialmath.base import isequal
>>> q1 = [1, 2, 3, 4]
>>> q2 = [-1, -2, -3, -4]
>>> isequal(q1, q2)
>>> isequal(q1, q2, unitq=True)
"""
q1 = base.getvector(q1, 4)
q2 = base.getvector(q2, 4)
if unitq:
return (np.sum(np.abs(q1 - q2)) < tol * _eps) or (np.sum(np.abs(q1 + q2)) < tol * _eps)
else:
return np.sum(np.abs(q1 - q2)) < tol * _eps | 5,330,151 |
def init_tables():
"""Init the data tables.
"""
if not os.path.isdir(base_config.data.table_dir):
os.makedirs(base_config.data.table_dir)
# if not os.path.isfile(base_config.data.file_table_path):
# open(base_config.data.file_table_path, 'w', encoding='utf-8').write(f'path{sep}remark\n')
# if not os.path.isfile(base_config.data.relation_table_path):
# open(base_config.data.relation_table_path, 'w', encoding='utf-8').write(f'path1{sep}path2{sep}relation\n') | 5,330,152 |
def MMOE(dnn_feature_columns, num_tasks, task_types, task_names, num_experts=4,
expert_dnn_units=[32,32], gate_dnn_units=[16,16], tower_dnn_units_lists=[[16,8],[16,8]],
l2_reg_embedding=1e-5, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False):
"""Instantiates the Multi-gate Mixture-of-Experts multi-task learning architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param num_tasks: integer, number of tasks, equal to number of outputs, must be greater than 1.
:param task_types: list of str, indicating the loss of each tasks, ``"binary"`` for binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression']
:param task_names: list of str, indicating the predict target of each tasks
:param num_experts: integer, number of experts.
:param expert_dnn_units: list, list of positive integer, its length must be greater than 1, the layer number and units in each layer of expert DNN
:param gate_dnn_units: list, list of positive integer, its length must be greater than 1, the layer number and units in each layer of gate DNN
:param tower_dnn_units_lists: list, list of positive integer list, its length must be euqal to num_tasks, the layer number and units in each layer of task-specific DNN
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:return: a Keras model instance
"""
if num_tasks <= 1:
raise ValueError("num_tasks must be greater than 1")
if len(task_types) != num_tasks:
raise ValueError("num_tasks must be equal to the length of task_types")
for task_type in task_types:
if task_type not in ['binary', 'regression']:
raise ValueError("task must be binary or regression, {} is illegal".format(task_type))
if num_tasks != len(tower_dnn_units_lists):
raise ValueError("the length of tower_dnn_units_lists must be euqal to num_tasks")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, seed)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
#build expert layer
expert_outs = []
for i in range(num_experts):
expert_network = DNN(expert_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed, name='expert_'+str(i))(dnn_input)
expert_outs.append(expert_network)
expert_concat = tf.keras.layers.concatenate(expert_outs, axis=1, name='expert_concat')
expert_concat = tf.keras.layers.Reshape([num_experts, expert_dnn_units[-1]], name='expert_reshape')(expert_concat) #(num_experts, output dim of expert_network)
mmoe_outs = []
for i in range(num_tasks): #one mmoe layer: nums_tasks = num_gates
#build gate layers
gate_network = DNN(gate_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed, name='gate_'+task_names[i])(dnn_input)
gate_out = tf.keras.layers.Dense(num_experts, use_bias=False, activation='softmax', name='gate_softmax_'+task_names[i])(gate_network)
gate_out = tf.tile(tf.expand_dims(gate_out, axis=-1), [1, 1, expert_dnn_units[-1]]) #let the shape of gate_out be (num_experts, output dim of expert_network)
#gate multiply the expert
gate_mul_expert = tf.keras.layers.Multiply(name='gate_mul_expert_'+task_names[i])([expert_concat, gate_out])
gate_mul_expert = tf.math.reduce_sum(gate_mul_expert, axis=1) #sum pooling in the expert ndim
mmoe_outs.append(gate_mul_expert)
task_outs = []
for task_type, task_name, tower_dnn, mmoe_out in zip(task_types, task_names, tower_dnn_units_lists, mmoe_outs):
#build tower layer
tower_output = DNN(tower_dnn, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed, name='tower_'+task_name)(mmoe_out)
logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(tower_output)
output = PredictionLayer(task_type, name=task_name)(logit)
task_outs.append(output)
model = tf.keras.models.Model(inputs=inputs_list, outputs=task_outs)
return model | 5,330,153 |
def greater(self, other):
""" Equivalent to the > operator.
"""
return _PropOpB(self, other, numpy.greater, numpy.uint8) | 5,330,154 |
def load_translation_data(dataset, src_lang='en', tgt_lang='vi'):
"""Load translation dataset
Parameters
----------
dataset : str
src_lang : str, default 'en'
tgt_lang : str, default 'vi'
Returns
-------
"""
common_prefix = 'IWSLT2015_{}_{}_{}_{}'.format(src_lang, tgt_lang,
args.src_max_len, args.tgt_max_len)
if dataset == 'IWSLT2015':
data_train = IWSLT2015('train', src_lang=src_lang, tgt_lang=tgt_lang)
data_val = IWSLT2015('val', src_lang=src_lang, tgt_lang=tgt_lang)
data_test = IWSLT2015('test', src_lang=src_lang, tgt_lang=tgt_lang)
else:
raise NotImplementedError
src_vocab, tgt_vocab = data_train.src_vocab, data_train.tgt_vocab
data_train_processed = load_cached_dataset(common_prefix + '_train')
if not data_train_processed:
data_train_processed = process_dataset(data_train, src_vocab, tgt_vocab,
args.src_max_len, args.tgt_max_len)
cache_dataset(data_train_processed, common_prefix + '_train')
data_val_processed = load_cached_dataset(common_prefix + '_val')
if not data_val_processed:
data_val_processed = process_dataset(data_val, src_vocab, tgt_vocab)
cache_dataset(data_val_processed, common_prefix + '_val')
data_test_processed = load_cached_dataset(common_prefix + '_test')
if not data_test_processed:
data_test_processed = process_dataset(data_test, src_vocab, tgt_vocab)
cache_dataset(data_test_processed, common_prefix + '_test')
fetch_tgt_sentence = lambda src, tgt: tgt.split()
val_tgt_sentences = list(data_val.transform(fetch_tgt_sentence))
test_tgt_sentences = list(data_test.transform(fetch_tgt_sentence))
return data_train_processed, data_val_processed, data_test_processed, \
val_tgt_sentences, test_tgt_sentences, src_vocab, tgt_vocab | 5,330,155 |
def objective_function(decision_variables, root_model, mode="by_age", country=Region.UNITED_KINGDOM, config=0,
calibrated_params={}):
"""
:param decision_variables: dictionary containing
- mixing multipliers by age as a list if mode == "by_age" OR
- location multipliers as a list if mode == "by_location"
:param root_model: integrated model supposed to model the past epidemic
:param mode: either "by_age" or "by_location"
:param country: the country name
:param config: the id of the configuration being considered
:param calibrated_params: a dictionary containing a set of calibrated parameters
"""
running_model = RegionApp(country)
build_model = running_model.build_model
params = copy.deepcopy(running_model.params)
# reformat decision vars if locations
if mode == "by_location":
new_decision_variables = {
"other_locations": decision_variables[0],
"school": decision_variables[1],
"work": decision_variables[2]
}
decision_variables = new_decision_variables
# Define scenario-1-specific params
sc_1_params_update = build_params_for_phases_2_and_3(decision_variables, config, mode)
# Rebuild the default parameters
params["default"].update(opti_params["default"])
params["default"] = update_params(params['default'], calibrated_params)
params['scenario_start_time'] = PHASE_2_START_TIME - 1
# Create scenario 1
sc_1_params = update_params(params['default'], sc_1_params_update)
params["scenarios"][1] = sc_1_params
scenario_1 = Scenario(build_model, idx=1, params=params)
# Run scenario 1
scenario_1.run(base_model=root_model)
models = [root_model, scenario_1.model]
#____________________________ Perform diagnostics ______________________
# How many deaths and years of life lost during Phase 2 and 3
start_phase2_index = models[1].derived_outputs["times"].index(PHASE_2_START_TIME)
end_phase2_index = models[1].derived_outputs["times"].index(phase_2_end[config])
total_nb_deaths = sum(models[1].derived_outputs["infection_deathsXall"][start_phase2_index:])
years_of_life_lost = sum(models[1].derived_outputs["years_of_life_lost"][start_phase2_index:])
# What proportion immune at end of Phase 2
recovered_indices = [
i
for i in range(len(models[1].compartment_names))
if "recovered" in models[1].compartment_names[i]
]
nb_reco = sum([models[1].outputs[end_phase2_index, i] for i in recovered_indices])
total_pop = sum([models[1].outputs[end_phase2_index, i] for i in range(len(models[1].compartment_names))])
prop_immune = nb_reco / total_pop
# Has herd immunity been reached?
herd_immunity = has_immunity_been_reached(models[1], end_phase2_index)
return herd_immunity, total_nb_deaths, years_of_life_lost, prop_immune, models | 5,330,156 |
def kick(code, input):
""" kick <user> [reason] - Kicks a user from the current channel, with a reason if supplied. """
text = input.group(2).split()
if len(text) == 1:
target = input.group(2)
reason = False
else:
target = text[0]
reason = ' '.join(text[1::])
if not reason:
reason = kick_reason()
if target != code.nick:
return code.write(['KICK', input.sender, target], reason)
else:
return code.say('...') | 5,330,157 |
def check_invalid(string,*invalids,defaults=True):
"""Checks if input string matches an invalid value"""
# Checks string against inputted invalid values
for v in invalids:
if string == v:
return True
# Checks string against default invalid values, if defaults=True
if defaults == True:
default_invalids = ['INC','inc','incomplete','NaN','nan','N/A','n/a','missing']
for v in default_invalids:
if string == v:
return True
# For valid strings
return False | 5,330,158 |
def drawline(betaset,df1_distinct,j,x_axis,y_axis,linecolor):
"""Draw a line between points from different beta sets that share the same ori-dest coordination"""
plt.plot([df1_distinct['{}_base'.format(x_axis)][j],df1_distinct['{}_set{}'.format(x_axis,betaset)][j]],
[df1_distinct['{}_base'.format(y_axis)][j],df1_distinct['{}_set{}'.format(y_axis,betaset)][j]]
,color=linecolor,linestyle='--',linewidth=0.4,marker='.',markersize=0.000001)
return | 5,330,159 |
def sse_content(response, handler, **sse_kwargs):
"""
Callback to collect the Server-Sent Events content of a response. Callbacks
passed will receive event data.
:param response:
The response from the SSE request.
:param handler:
The handler for the SSE protocol.
"""
# An SSE response must be 200/OK and have content-type 'text/event-stream'
raise_for_not_ok_status(response)
raise_for_header(response, 'Content-Type', 'text/event-stream')
finished, _ = _sse_content_with_protocol(response, handler, **sse_kwargs)
return finished | 5,330,160 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
client.check_auth()
return "ok" | 5,330,161 |
def get_more_spec_pos(tokens):
"""Return frequencies for more specific POS"""
# adverbs and preps, particles
adverbs = [t for t in tokens if t.full_pos == 'ADV']
apprart = [t for t in tokens if t.full_pos == 'APPRART']
postpos = [t for t in tokens if t.full_pos == 'APPO']
circum_pos = [t for t in tokens if t.full_pos == 'APZR']
compare_conj = [t for t in tokens if t.full_pos == 'KOKOM']
# foreign words, interjections
fremds = [t for t in tokens if t.full_pos == 'FM']
interj = [t for t in tokens if t.full_pos == 'ITJ']
# proper names and adjectives
prop_name = [t for t in tokens if t.full_pos == 'NE']
adja = [t for t in tokens if t.full_pos.startswith('ADJA')]
adjd = [t for t in tokens if t.full_pos.startswith('ADJA')]
# pronouns
dem_pro_s = [t for t in tokens if t.full_pos == 'PDS']
dem_pro_a = [t for t in tokens if t.full_pos == 'PDAT']
ind_pro_s = [t for t in tokens if t.full_pos == 'PIS']
ind_pro_a = [t for t in tokens if t.full_pos in ['PIAT','PIDAT']]
pers_pron = [t for t in tokens if t.full_pos == 'PPER']
poss_s = [t for t in tokens if t.full_pos == 'PPOSS']
poss_a = [t for t in tokens if t.full_pos == 'PPOSAT']
refl_pron = [t for t in tokens if t.full_pos == 'PRF']
inter_pron = [t for t in tokens if t.full_pos == ['PWS','PWAT','PWAV']]
all_prons = dem_pro_s+dem_pro_a+ind_pro_s+ind_pro_a+poss_s+poss_a+refl_pron+inter_pron
# compartives, punctuation
comp = [t for t in tokens if t.full_pos == 'TRUNC']
sent_int_interpunct = [t for t in tokens if t.full_pos == '$(']
# pronom adverbs and others
pro_adv = [t for t in tokens if t.full_pos == 'PROAV' and t.function == 'pp']
part_kvz = [t for t in tokens if t.full_pos == 'PTKVZ' and t.function == 'avz']
inf_with_zu = [t for t in tokens if t.full_pos == 'PTKVZ' and t.function == 'VVIZU']
for t in poss_s+poss_a:
t.pos_color.append('Poss pronouns')
for t in refl_pron:
t.pos_color.append('Refl pronouns')
return (len(adverbs), len(apprart), len(postpos), len(circum_pos), len(fremds), len(interj), \
len(prop_name), len(adja), len(adjd),
len(dem_pro_s), len(dem_pro_a), len(dem_pro_s)+len(dem_pro_a), len(ind_pro_s), len(ind_pro_a), \
len(ind_pro_s)+len(ind_pro_a),
len(pers_pron), len(poss_s), len(poss_a), len(poss_s)+len(poss_a), len(refl_pron), \
len(inter_pron), len(comp),
len(sent_int_interpunct), len(pro_adv), len(part_kvz), len(compare_conj), \
len(inf_with_zu), len(all_prons)) | 5,330,162 |
def build_first_db():
"""
Populate a small db with some example entries.
"""
db.drop_all()
db.create_all()
anonymous = Role(name = u'Anonymous', description = u'匿名用户')
admin = Role(name = u'Admin', description = u'管理员')
develop = Role(name = 'Develop', description = u'开发人员')
test = Role(name = 'Test', description = u'测试人员')
ops = Role(name = 'Ops', description = u'运维人员')
admin_user = User(real_name = u'admin',
email = u'admin@13322.com',
login=u"admin",
password=generate_password_hash(u"admin"),
roles=[admin]
)
anonymous_user = User(real_name = u'anonymous',
email = u'anonymous@13322.com',
login=u"anonymous",
password=generate_password_hash(u"anonymous"),
roles=[anonymous]
)
ip1 = Ip(isp = u'电信',
use = u'在用',
ip=u"1.1.1.1",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"1.1.1.254"),
switch_port=(u"5F-U09 G1/0/32"),
)
ip2 = Ip(isp = u'电信',
use = u'在用',
ip=u"1.1.1.2",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"1.1.1.254"),
switch_port=(u"5F-U09 G1/0/32"),
)
ip3 = Ip(isp = u'内网',
use = u'在用',
ip=u"1.1.1.3",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"1.1.1.254"),
switch_port=(u"5F-U09 G1/0/32"),
)
ip4 = Ip(isp = u'联通',
use = u'在用',
ip=u"1.1.1.4",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"1.1.1.254"),
switch_port=(u"5F-U09 G1/0/32"),
)
app1 = App(app = u'kf_scsa',
description=u"客服我也不知道",
ps=(u"没什么事"),
)
app2 = App(app = u'gamemanager',
description=u"游戏我也不知道",
ps=(u"没什么事"),
)
app3 = App(app = u'webPlatform',
description=u"公共我也不知道",
ps=(u"没什么事"),
)
app4 = App(app = u'wechat-server2',
description=u"wx我也不知道",
ps=(u"没什么事"),
)
project1 = Project(project = u'体彩项目', apps = [app1])
project2 = Project(project = u'福彩项目', apps = [app2])
project3 = Project(project = u'公共平台项目', apps = [app3])
project4 = Project(project = u'客服系统项目', apps = [app4])
device1 = Device(device_num = u'02-1331',
device_name = u'5F-U10',
idc=u"东莞",
location=(u"5F-U10"),
hardware_type=(u"DELL-2U"),
brand=(u"DELL"),
fast_repair_code=(u"没什么事"),
cpu=(u"没什么事"),
memory=(u"没什么事"),
disk=(u"没什么事"),
ips=[ip1],
apps = [app1],
)
device2 = Device(device_num = u'02-1331',
device_name = u'5F-U12',
idc=u"东莞",
location=(u"5F-U10"),
hardware_type=(u"DELL-2U"),
brand=(u"DELL"),
fast_repair_code=(u"没什么事"),
cpu=(u"没什么事"),
memory=(u"没什么事"),
disk=(u"没什么事"),
ips=[ip2],
apps = [app2],
)
platforms_info1 = Platforms_info(platform = u'阿里云管理控制台',
description = u'申请云服务器及域名解析',
url=u"http://www.aliyun.com/",
username=u"hhlyadmin",
password=(u"hhlyadmin"),
ps=(u"登陆进入后,依次点击:\
订单管理-我的租用-最后面详细\
下方图标-进入之后\
点击IP即可查看流量图"
),
)
platforms_info2 = Platforms_info(platform = u'DNS盾',
description = u'13322.com域名A记录解析网站',
url=u"http://www.dnsdun.com",
username=u"hhlyadmin@13322.com",
password=(u"hhlyadmin@13322.com"),
ps=(u"登陆进入后"
),
)
db.session.add(anonymous)
db.session.add(admin)
db.session.add(develop)
db.session.add(test)
db.session.add(ops)
db.session.add(admin_user)
db.session.add(anonymous_user)
db.session.add(ip1)
db.session.add(ip2)
db.session.add(ip3)
db.session.add(ip4)
db.session.add(app1)
db.session.add(app2)
db.session.add(app3)
db.session.add(app4)
db.session.add(project1)
db.session.add(project2)
db.session.add(project3)
db.session.add(project4)
db.session.add(device1)
db.session.add(device2)
db.session.add(platforms_info1)
db.session.add(platforms_info2)
db.session.commit()
return | 5,330,163 |
def reconcile_suggest_property(prefix: str = ""):
"""Given a search prefix, return all the type/schema properties which match
the given text. This is used to auto-complete property selection for detail
filters in OpenRefine."""
matches = []
for prop in model.properties:
if not prop.schema.is_a(settings.BASE_SCHEMA):
continue
if prop.hidden or prop.type == prop.type == registry.entity:
continue
if match_prefix(prefix, prop.name, prop.label):
matches.append(get_freebase_property(prop))
return {
"code": "/api/status/ok",
"status": "200 OK",
"prefix": prefix,
"result": matches,
} | 5,330,164 |
def make_boxes(df_data, category, size_factor, x, y, height, width, pad=[1,1], main_cat=None):
"""Generates the coordinates for the boxes of the category"""
totals = df_data[size_factor].groupby(df_data[category]).sum()
box_list = totals.sort_values(ascending=False).to_frame()
box_list.columns = ['value']
if main_cat:
box_list['cat'] = main_cat
box_list['norm'] = sq.normalize_sizes(box_list.value, width, height)
box_list['rect'] = sq.squarify(box_list.norm, x, y, width, height)
box_list['rect'] = box_list.apply(lambda row: pad_rect(row['rect'], pad), axis=1)
return box_list | 5,330,165 |
def hash_codeobj(code):
"""Return hashed version of a code object"""
bytecode = code.co_code
consts = code.co_consts
consts = [hash_codeobj(c) if isinstance(c, types.CodeType) else c
for c in consts]
return joblib.hash((bytecode, consts)) | 5,330,166 |
def us_census():
"""Data Source for the US census.
Arguments:
None
Returns:
pandas.DataFrame
"""
df = us_census_connector()
return us_census_formatter(df) | 5,330,167 |
def get_notebook_path(same_config_path, same_config_file_contents) -> str:
"""Returns absolute value of the pipeline path relative to current file execution"""
return str(Path.joinpath(Path(same_config_path).parent, same_config_file_contents["notebook"]["path"])) | 5,330,168 |
def get_path_from_dependency(
recipe_dependency_value: str,
recipe_base_folder_path: str
) -> str:
""" Searches the base folder for a file, that corresponse to the dependency passed.
:param recipe_dependency_value: Value of the "From:" section from a
recipe file, used by singularity
to find the base image.
:param recipe_base_folder_path: Full path of the base folder,
containing all recipes.
:returns: Full path to the parent recipe or
an empty string '' if it is not a local
dependency.
"""
if not is_own_dependency(recipe_dependency_value):
return ''
_dependency_value_regex = re.compile(
r'^(?:.*?\/)?' # Match possible host address and ignore it
r'(?P<collection>.+?)\/' # Match collection
r'(?P<container>.+?)' # Match container/image name
r'(?::(?P<version>.*?))?$' # Match possible version Tag
)
_filename_components = re.search(_dependency_value_regex, recipe_dependency_value)
_glob_dict = {'basepath': recipe_base_folder_path}
_glob_dict.update(_filename_components.groupdict())
_glob_string = ''
# lastest tag translates to a filename without
if 'version' in _glob_dict:
if _glob_dict['version'] == 'latest':
_glob_dict.pop('version')
if "version" in _glob_dict:
if _glob_dict != 'latest':
_glob_string = (
'{basepath}/**/{collection}/{container}.{version}.recipe'.format(
**_glob_dict)
)
else:
_glob_string = (
'{basepath}/**/{collection}/{container}.recipe'.format(
**_glob_dict)
)
# Find corresponding Files
_glob_results = glob.glob(
_glob_string,
recursive=True
)
if len(_glob_results) > 1:
raise RuntimeError(
(
"The naming schema of recipe {} clashes with. "
"They cannot both exist in one sregistry."
).format(', '.join(_glob_results))
)
if not _glob_results:
raise RuntimeError(
"Unresolved dependency on {}".format(
recipe_dependency_value
)
)
return _glob_results[0] | 5,330,169 |
def about_us():
""" The about us page. """
return render_template(
"basic/about_us.html",
) | 5,330,170 |
def sfb1d_atrous(lo, hi, g0, g1, mode='periodization', dim=-1, dilation=1,
pad1=None, pad=None):
""" 1D synthesis filter bank of an image tensor with no upsampling. Used for
the stationary wavelet transform.
"""
C = lo.shape[1]
d = dim % 4
# If g0, g1 are not tensors, make them. If they are, then assume that they
# are in the right order
if not isinstance(g0, torch.Tensor):
g0 = torch.tensor(np.copy(np.array(g0).ravel()),
dtype=torch.float, device=lo.device)
if not isinstance(g1, torch.Tensor):
g1 = torch.tensor(np.copy(np.array(g1).ravel()),
dtype=torch.float, device=lo.device)
L = g0.numel()
shape = [1,1,1,1]
shape[d] = L
# If g aren't in the right shape, make them so
if g0.shape != tuple(shape):
g0 = g0.reshape(*shape)
if g1.shape != tuple(shape):
g1 = g1.reshape(*shape)
g0 = torch.cat([g0]*C,dim=0)
g1 = torch.cat([g1]*C,dim=0)
# Calculate the padding size.
# With dilation, zeros are inserted between the filter taps but not after.
# that means a filter that is [a b c d] becomes [a 0 b 0 c 0 d].
centre = L / 2
fsz = (L-1)*dilation + 1
newcentre = fsz / 2
before = newcentre - dilation*centre
# When conv_transpose2d is done, a filter with k taps expands an input with
# N samples to be N + k - 1 samples. The 'padding' is really the opposite of
# that, and is how many samples on the edges you want to cut out.
# In addition to this, we want the input to be extended before convolving.
# This means the final output size without the padding option will be
# N + k - 1 + k - 1
# The final thing to worry about is making sure that the output is centred.
short_offset = dilation - 1
centre_offset = fsz % 2
a = fsz//2
b = fsz//2 + (fsz + 1) % 2
# a = 0
# b = 0
pad = (0, 0, a, b) if d == 2 else (a, b, 0, 0)
lo = mypad(lo, pad=pad, mode=mode)
hi = mypad(hi, pad=pad, mode=mode)
unpad = (fsz - 1, 0) if d == 2 else (0, fsz - 1)
unpad = (0, 0)
y = F.conv_transpose2d(lo, g0, padding=unpad, groups=C, dilation=dilation) + \
F.conv_transpose2d(hi, g1, padding=unpad, groups=C, dilation=dilation)
# pad = (L-1, 0) if d == 2 else (0, L-1)
# y = F.conv_transpose2d(lo, g0, padding=pad, groups=C, dilation=dilation) + \
# F.conv_transpose2d(hi, g1, padding=pad, groups=C, dilation=dilation)
#
#
# Calculate the pad size
# L2 = (L * dilation)//2
# # pad = (0, 0, L2, L2+dilation) if d == 2 else (L2, L2+dilation, 0, 0)
# a = dilation*2
# b = dilation*(L-2)
# if pad1 is None:
# pad1 = (0, 0, a, b) if d == 2 else (a, b, 0, 0)
# print(pad1)
# lo = mypad(lo, pad=pad1, mode=mode)
# hi = mypad(hi, pad=pad1, mode=mode)
# if pad is None:
# p = (a + b + (L - 1)*dilation)//2
# pad = (p, 0) if d == 2 else (0, p)
# print(pad)
return y/(2*dilation) | 5,330,171 |
def flatten(iterable):
"""Flatten an arbitrarily deep list"""
# likely not used
iterable = iter(iterable)
while True:
try:
item = next(iterable)
except StopIteration:
break
if isinstance(item, str):
yield item
continue
try:
data = iter(item)
iterable = itertools.chain(data, iterable)
except:
yield item | 5,330,172 |
def preprocess(image, size):
""" pre-process images with Opencv format"""
image = np.array(image)
H, W, _ = image.shape
image = nd.zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, axis=0)
return torch.from_numpy(image) | 5,330,173 |
def test_get_route_info_input():
"""
Test results for various URLs
"""
transport_proxy = YandexTransportProxy(SERVER_HOST, SERVER_PORT)
# URL is None
with pytest.raises(Exception):
url = None
transport_proxy.get_route_info(url)
# URL is Gibberish
with pytest.raises(Exception):
url = '52086gfdgfd86534'
transport_proxy.get_route_info(url)
# URL is non-related to Yandex
with pytest.raises(Exception):
url = 'https://en.wikipedia.org/wiki/Taiwan'
transport_proxy.get_route_info(url)
# URL is for stop, not route
with pytest.raises(Exception):
# Остановка Туберкулёзный диспансер № 18
url = 'https://yandex.ru/maps/213/moscow/?ll=37.583033%2C55.815337&masstransit%5BstopId%5D=stop__9642178&mode=stop&z=17'
transport_proxy.get_route_info(url)
wait_random_time() | 5,330,174 |
def delete_notebook(notebook_id: str) -> tuple[dict, int]:
"""Delete an existing notebook.
The user can call this operation only for their own notebooks. This
operation requires the following header with a fresh access token:
"Authorization: Bearer fresh_access_token"
Request parameters:
- notebook_id (string): Notebook ID.
Response status codes:
- 200 (Success)
- 401 (Unauthorized)
- 403 (Forbidden)
- 422 (Unprocessable Entity)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
:param notebook_id: Notebook ID.
:return: Tuple containing the response data and the response status code.
"""
# Validate the ID. A "marshmallow.ValidationError" exception is raised if
# the ID is invalid, which produces a 400 response.
notebook_id = id_schema.load({"id": notebook_id})["id"]
# JWT payload data
req_user_id = get_jwt()["user_id"]
# Get notebook
db = get_db()
notebook = db.notebooks.get_by_id(notebook_id)
# Check that the notebook exists and the permissions
if notebook is None or notebook["user_id"] != req_user_id:
d = get_response_data(USER_UNAUTHORIZED, ERROR_UNAUTHORIZED_USER)
return d, 403
# Delete notebook
db.notebooks.delete(notebook_id)
# Delete all notebook's notes
for n in db.notes.get_by_filter(notebook_id):
db.notes.delete(n["id"])
return get_response_data(DELETED, OK), 200 | 5,330,175 |
def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__name__, start, end)
return result
return wrapper
return decorator | 5,330,176 |
def assert_weights(w1, w2):
"""Assert that two tuple-to-weight dictionaries are equal."""
assert convert_dict_of_weights(w1) == convert_dict_of_weights(w2) | 5,330,177 |
def compute_avg_merge_candidate(catavg, v, intersection_idx):
"""
Given intersecting deltas in catavg and v, compute average delta
one could merge into running average. If one cat is an outlier,
picking that really distorts the vector we merge into running
average vector. So, effectively merge using all as the ref
cat in common by merging in average of all possible refcats.
When there is no noise in y, the average merge candidate is
the same as any single candidate. So, with no noise, we get
exact answer; averaging here doesn't cost us anything. It
only helps to spread noise across categories.
"""
merge_candidates = []
for i in intersection_idx:
merge_candidates.append(v - v[i] + catavg[i])
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
# We get "Mean of empty slice" when all entries are Nan but we want that.
v = np.nanmean(merge_candidates, axis=0)
return v | 5,330,178 |
def plot_annotations(img, bbox, labels, scores, confidence_threshold,
save_fig_path='predicted_img.jpeg', show=False, save_fig=True):
"""
This function plots bounding boxes over image with text labels and saves the image to a particualr location.
"""
# Default colors and mappings
colors_map={'1':'#5E81AC','2':'#A3BE8C','3':'#B48EAD'}
labels_map={'1':'Vehicle','2':'Person','3':'Cyclist'}
# Create figure and axes
fig, ax = plt.subplots(figsize = (200,200))
# Display the image
ax.imshow(img)
i=0
scores_ind = [idx for idx,x in enumerate(scores) if x>confidence_threshold] # Filter for scores greater than certain threshold
for idx, entry in enumerate(bbox):
if idx in scores_ind:
h = entry[2]-entry[0]
w = entry[3]-entry[1]
# Create a Rectangle patch
rect = patches.Rectangle((entry[0],entry[1]), h, w,
linewidth=60,
edgecolor=colors_map[str(labels[idx])],
facecolor='none')
# Add classification category
plt.text(entry[0], entry[1], s=labels_map[str(labels[idx])],
color='white', verticalalignment='top',
bbox={'color': colors_map[str(labels[idx])], 'pad': 0},
font={'size':500})
# Add the patch to the Axes
ax.add_patch(rect)
i+=1
if show==True:
plt.show()
plt.savefig(save_fig_path,
bbox_inches = 'tight',
pad_inches = 0,
dpi=5)
return save_fig_path | 5,330,179 |
def main():
"""Demo of the base 64 encoding utility functions"""
print("\n----------------------------------------------------")
print("--Base 64 Encoding/decoding utility functions demo--")
print("----------------------------------------------------")
print("\n------------Encode data to base 64------------------")
data = "Normal string"
encoded_data = encodeDataToBase64(data)
print("Base 64 encoded string of \""+data+"\": \""+encoded_data+"\"")
print("\n------------Decode data from base 64----------------")
decoded_data = decodeDataFromBase64(encoded_data)
print("Base 64 decoded string of \""+encoded_data+"\": \""+decoded_data+"\"")
print("\n-------Decode data from base 64 to file-------------")
dummy_file = "dummy_file.txt"
print("Convert \""+encoded_data+"\" encoded string to file "+dummy_file)
convertBase64ToFile(encoded_data, dummy_file)
with open(dummy_file, "rb") as _file:
print("Data contained in file "+dummy_file+": \""+_file.read()+"\"")
print("\n-------Encode data from file to base 64-------------")
file_encoded_data = convertFileToBase64(dummy_file)
print("Encoded data from file "+dummy_file+": \""+file_encoded_data+"\"")
print("\n----------------------------------------------------")
print("-------------------End of demo----------------------")
print("----------------------------------------------------\n") | 5,330,180 |
def test_get_dataframe_from_xml():
"""Verify that we can gerate a dataframe from the XML elements.
"""
data = read_xml(get_test_data_stream("input.xml"))
table = get_populated_table(data)
assert len(table) == 5
assert len(table.Limit.unique()) == 2 | 5,330,181 |
def record_object(obj1):
"""If the object of given version is already processed, skip it."""
seen = Seen()
seen.obj = obj_signature(obj1)
seen.save() | 5,330,182 |
def get_registry_image_tag(app_name: str, image_tag: str, registry: dict) -> str:
"""Returns the image name for a given organization, app and tag"""
return f"{registry['organization']}/{app_name}:{image_tag}" | 5,330,183 |
def save_wind_generated_waves_to_subdirectory(args):
""" Copy the wave height and wave period to the outputs/ directory.
Inputs:
args['wave_height'][sector]: uri to "sector"'s wave height data
args['wave_period'][sector]: uri to "sector"'s wave period data
args['prefix']: prefix to be appended to the new filename
Outputs:
data_uri: dictionary containing the uri where the data is saved
"""
intermediate_directory = \
os.path.join(args['intermediate_directory'], args['subdirectory'])
wave_height_list = args['wave_heights']
wave_period_list = args['wave_periods']
data_uri = {}
for wave_height_uri in wave_height_list:
shutil.copy(wave_height_uri, intermediate_directory)
for wave_period_uri in wave_period_list:
shutil.copy(wave_period_uri, intermediate_directory)
return data_uri | 5,330,184 |
def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception as e:
log.error('EXCEPTION {!r} .. {}'.format(filename, e))
return 1 | 5,330,185 |
def test_markersizes_allclose(axis):
"""Are the markersizes almost correct?"""
err = 1e-12
markersize = 1
axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], markersize=markersize + err)
pc = LinePlotChecker(axis)
with pytest.raises(AssertionError):
pc.assert_markersizes_equal([markersize])
with pytest.raises(AssertionError):
pc.assert_markersizes_allclose([markersize], rtol=1e-13)
pc.assert_markersizes_allclose([markersize]) | 5,330,186 |
def draw_status_bar(stdscr):
"""
Draw status bar
"""
# subwin: window shares memory with parent (no need for its repainting)
n_rows_stdscr, n_cols_stdscr = stdscr.getmaxyx()
y_statusbar = n_rows_stdscr - 1
window = stdscr.subwin(1, n_cols_stdscr, y_statusbar, 0)
window.attron(curses.color_pair(2))
# content to render
shortcuts = ' arrow keys: Navigation, enter: Type digit, q: Quit'
current_time = time.strftime('%H:%M:%S', time.localtime())
# shortcuts on left of statusbar
window.insstr(0, 0, shortcuts)
# fill space in between
window.insstr(
0, len(shortcuts),
' ' * (n_cols_stdscr - (len(shortcuts) + len(current_time))))
# current time on right of statusbar
window.insstr(
0, n_cols_stdscr - len(current_time), current_time) | 5,330,187 |
def numeric_summary(tensor):
"""Get a text summary of a numeric tensor.
This summary is only available for numeric (int*, float*, complex*) and
Boolean tensors.
Args:
tensor: (`numpy.ndarray`) the tensor value object to be summarized.
Returns:
The summary text as a `RichTextLines` object. If the type of `tensor` is not
numeric or Boolean, a single-line `RichTextLines` object containing a
warning message will reflect that.
"""
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
if not isinstance(tensor, np.ndarray) or not np.size(tensor):
return debugger_cli_common.RichTextLines([
"No numeric summary available due to empty tensor."])
elif (np.issubdtype(tensor.dtype, np.float) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
counts = [
("nan", np.sum(np.isnan(tensor))),
("-inf", np.sum(np.isneginf(tensor))),
("-", np.sum(np.logical_and(
tensor < 0.0, np.logical_not(np.isneginf(tensor))))),
("0", np.sum(tensor == 0.0)),
("+", np.sum(np.logical_and(
tensor > 0.0, np.logical_not(np.isposinf(tensor))))),
("+inf", np.sum(np.isposinf(tensor)))]
output = _counts_summary(counts, total_count=np.size(tensor))
valid_array = tensor[
np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
if np.size(valid_array):
stats = [
("min", np.min(valid_array)),
("max", np.max(valid_array)),
("mean", np.mean(valid_array)),
("std", np.std(valid_array))]
output.extend(_counts_summary(stats, skip_zeros=False))
return output
elif tensor.dtype == np.bool:
counts = [
("False", np.sum(tensor == 0)),
("True", np.sum(tensor > 0)),]
return _counts_summary(counts, total_count=np.size(tensor))
else:
return debugger_cli_common.RichTextLines([
"No numeric summary available due to tensor dtype: %s." % tensor.dtype]) | 5,330,188 |
def solveq(K, f, bcPrescr, bcVal=None):
"""
Solve static FE-equations considering boundary conditions.
Parameters:
K global stiffness matrix, dim(K)= nd x nd
f global load vector, dim(f)= nd x 1
bcPrescr 1-dim integer array containing prescribed dofs.
bcVal 1-dim float array containing prescribed values.
If not given all prescribed dofs are assumed 0.
Returns:
a solution including boundary values
Q reaction force vector
dim(a)=dim(Q)= nd x 1, nd : number of dof's
"""
nDofs = K.shape[0]
nPdofs = bcPrescr.shape[0]
if bcVal is None:
bcVal = np.zeros([nPdofs], 'd')
bc = np.ones(nDofs, 'bool')
bcDofs = np.arange(nDofs)
bc[np.ix_(bcPrescr-1)] = False
bcDofs = bcDofs[bc]
fsys = f[bcDofs]-K[np.ix_((bcDofs), (bcPrescr-1))] * \
np.asmatrix(bcVal).reshape(nPdofs, 1)
asys = np.linalg.solve(K[np.ix_((bcDofs), (bcDofs))], fsys)
a = np.zeros([nDofs, 1])
a[np.ix_(bcPrescr-1)] = np.asmatrix(bcVal).reshape(nPdofs, 1)
a[np.ix_(bcDofs)] = asys
Q = K*np.asmatrix(a)-f
return (np.asmatrix(a), Q) | 5,330,189 |
def get_output_names(hf):
"""
get_output_names(hf)
Returns a list of the output variables names in the HDF5 file.
Args:
hf: An open HDF5 filehandle or a string containing the HDF5
filename to use.
Returns:
A sorted list of the output variable names in the HDF5 file.
"""
return sorted(map(str, hf['/output/data'].keys())) | 5,330,190 |
def gene_trends(
adata: AnnData,
model: _input_model_type,
genes: Union[str, Sequence[str]],
lineages: Optional[Union[str, Sequence[str]]] = None,
backward: bool = False,
data_key: str = "X",
time_key: str = "latent_time",
time_range: Optional[Union[_time_range_type, List[_time_range_type]]] = None,
transpose: bool = False,
callback: _callback_type = None,
conf_int: Union[bool, float] = True,
same_plot: bool = False,
hide_cells: bool = False,
perc: Optional[Union[Tuple[float, float], Sequence[Tuple[float, float]]]] = None,
lineage_cmap: Optional[matplotlib.colors.ListedColormap] = None,
abs_prob_cmap: matplotlib.colors.ListedColormap = cm.viridis,
cell_color: Optional[str] = None,
cell_alpha: float = 0.6,
lineage_alpha: float = 0.2,
size: float = 15,
lw: float = 2,
cbar: bool = True,
margins: float = 0.015,
sharex: Optional[Union[str, bool]] = None,
sharey: Optional[Union[str, bool]] = None,
gene_as_title: Optional[bool] = None,
legend_loc: Optional[str] = "best",
obs_legend_loc: Optional[str] = "best",
ncols: int = 2,
suptitle: Optional[str] = None,
return_models: bool = False,
n_jobs: Optional[int] = 1,
backend: Backend_t = _DEFAULT_BACKEND,
show_progress_bar: bool = True,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
plot_kwargs: Mapping[str, Any] = MappingProxyType({}),
**kwargs: Any,
) -> Optional[_return_model_type]:
"""
Plot gene expression trends along lineages.
Each lineage is defined via it's lineage weights which we compute using :func:`cellrank.tl.lineages`. This
function accepts any model based off :class:`cellrank.ul.models.BaseModel` to fit gene expression,
where we take the lineage weights into account in the loss function.
Parameters
----------
%(adata)s
%(model)s
%(genes)s
lineages
Names of the lineages to plot. If `None`, plot all lineages.
%(backward)s
data_key
Key in :attr:`anndata.AnnData.layers` or `'X'` for :attr:`anndata.AnnData.X` where the data is stored.
time_key
Key in :attr:`anndata.AnnData.obs` where the pseudotime is stored.
%(time_range)s
This can also be specified on per-lineage basis.
%(gene_symbols)s
transpose
If ``same_plot = True``, group the trends by ``lineages`` instead of ``genes``.
This forces ``hide_cells = True``.
If ``same_plot = False``, show ``lineages`` in rows and ``genes`` in columns.
%(model_callback)s
conf_int
Whether to compute and show confidence interval. If the ``model`` is :class:`cellrank.ul.models.GAMR`,
it can also specify the confidence level, the default is `0.95`.
same_plot
Whether to plot all lineages for each gene in the same plot.
hide_cells
If `True`, hide all cells.
perc
Percentile for colors. Valid values are in interval `[0, 100]`.
This can improve visualization. Can be specified individually for each lineage.
lineage_cmap
Categorical colormap to use when coloring in the lineages. If `None` and ``same_plot``,
use the corresponding colors in :attr:`anndata.AnnData.uns`, otherwise use `'black'`.
abs_prob_cmap
Continuous colormap to use when visualizing the absorption probabilities for each lineage.
Only used when ``same_plot = False``.
cell_color
Key in :attr:`anndata.AnnData.obs` or :attr:`anndata.AnnData.var_names` used for coloring the cells.
cell_alpha
Alpha channel for cells.
lineage_alpha
Alpha channel for lineage confidence intervals.
size
Size of the points.
lw
Line width of the smoothed values.
cbar
Whether to show colorbar. Always shown when percentiles for lineages differ.
Only used when ``same_plot = False``.
margins
Margins around the plot.
sharex
Whether to share x-axis. Valid options are `'row'`, `'col'` or `'none'`.
sharey
Whether to share y-axis. Valid options are `'row'`, `'col'` or `'none'`.
gene_as_title
Whether to show gene names as titles instead on y-axis.
legend_loc
Location of the legend displaying lineages. Only used when `same_plot = True`.
obs_legend_loc
Location of the legend when ``cell_color`` corresponds to a categorical variable.
ncols
Number of columns of the plot when plotting multiple genes. Only used when ``same_plot = True``.
suptitle
Suptitle of the figure.
%(return_models)s
%(parallel)s
%(plotting)s
plot_kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.plot`.
kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.prepare`.
Returns
-------
%(plots_or_returns_models)s
"""
if isinstance(genes, str):
genes = [genes]
genes = _unique_order_preserving(genes)
_check_collection(
adata,
genes,
"obs" if data_key == "obs" else "var_names",
use_raw=kwargs.get("use_raw", False),
)
lineage_key = Key.obsm.abs_probs(backward)
if lineage_key not in adata.obsm:
raise KeyError(f"Lineages key `{lineage_key!r}` not found in `adata.obsm`.")
if lineages is None:
lineages = adata.obsm[lineage_key].names
elif isinstance(lineages, str):
lineages = [lineages]
elif all(ln is None for ln in lineages): # no lineage, all the weights are 1
lineages = [None]
cbar = False
logg.debug("All lineages are `None`, setting the weights to `1`")
lineages = _unique_order_preserving(lineages)
if isinstance(time_range, (tuple, float, int, type(None))):
time_range = [time_range] * len(lineages)
elif len(time_range) != len(lineages):
raise ValueError(
f"Expected time ranges to be of length `{len(lineages)}`, found `{len(time_range)}`."
)
kwargs["time_key"] = time_key
kwargs["data_key"] = data_key
kwargs["backward"] = backward
kwargs["conf_int"] = conf_int # prepare doesnt take or need this
models = _create_models(model, genes, lineages)
all_models, models, genes, lineages = _fit_bulk(
models,
_create_callbacks(adata, callback, genes, lineages, **kwargs),
genes,
lineages,
time_range,
return_models=True,
filter_all_failed=False,
parallel_kwargs={
"show_progress_bar": show_progress_bar,
"n_jobs": _get_n_cores(n_jobs, len(genes)),
"backend": _get_backend(models, backend),
},
**kwargs,
)
lineages = sorted(lineages)
tmp = adata.obsm[lineage_key][lineages].colors
if lineage_cmap is None and not transpose:
lineage_cmap = tmp
plot_kwargs = dict(plot_kwargs)
plot_kwargs["obs_legend_loc"] = obs_legend_loc
if transpose:
all_models = pd.DataFrame(all_models).T.to_dict()
models = pd.DataFrame(models).T.to_dict()
genes, lineages = lineages, genes
hide_cells = same_plot or hide_cells
else:
# information overload otherwise
plot_kwargs["lineage_probability"] = False
plot_kwargs["lineage_probability_conf_int"] = False
tmp = pd.DataFrame(models).T.astype(bool)
start_rows = np.argmax(tmp.values, axis=0)
end_rows = tmp.shape[0] - np.argmax(tmp[::-1].values, axis=0) - 1
if same_plot:
gene_as_title = True if gene_as_title is None else gene_as_title
sharex = "all" if sharex is None else sharex
if sharey is None:
sharey = "row" if plot_kwargs.get("lineage_probability", False) else "none"
ncols = len(genes) if ncols >= len(genes) else ncols
nrows = int(np.ceil(len(genes) / ncols))
else:
gene_as_title = False if gene_as_title is None else gene_as_title
sharex = "col" if sharex is None else sharex
if sharey is None:
sharey = (
"row"
if not hide_cells or plot_kwargs.get("lineage_probability", False)
else "none"
)
nrows = len(genes)
ncols = len(lineages)
plot_kwargs = dict(plot_kwargs)
if plot_kwargs.get("xlabel", None) is None:
plot_kwargs["xlabel"] = time_key
fig, axes = plt.subplots(
nrows=nrows,
ncols=ncols,
sharex=sharex,
sharey=sharey,
figsize=(6 * ncols, 4 * nrows) if figsize is None else figsize,
tight_layout=True,
dpi=dpi,
)
axes = np.reshape(axes, (nrows, ncols))
cnt = 0
plot_kwargs["obs_legend_loc"] = None if same_plot else obs_legend_loc
logg.info("Plotting trends")
for row in range(len(axes)):
for col in range(len(axes[row])):
if cnt >= len(genes):
break
gene = genes[cnt]
if (
same_plot
and plot_kwargs.get("lineage_probability", False)
and transpose
):
lpc = adata.obsm[lineage_key][gene].colors[0]
else:
lpc = None
if same_plot:
plot_kwargs["obs_legend_loc"] = (
obs_legend_loc if row == 0 and col == len(axes[0]) - 1 else None
)
_trends_helper(
models,
gene=gene,
lineage_names=lineages,
transpose=transpose,
same_plot=same_plot,
hide_cells=hide_cells,
perc=perc,
lineage_cmap=lineage_cmap,
abs_prob_cmap=abs_prob_cmap,
lineage_probability_color=lpc,
cell_color=cell_color,
alpha=cell_alpha,
lineage_alpha=lineage_alpha,
size=size,
lw=lw,
cbar=cbar,
margins=margins,
sharey=sharey,
gene_as_title=gene_as_title,
legend_loc=legend_loc,
figsize=figsize,
fig=fig,
axes=axes[row, col] if same_plot else axes[cnt],
show_ylabel=col == 0,
show_lineage=same_plot or (cnt == start_rows),
show_xticks_and_label=((row + 1) * ncols + col >= len(genes))
if same_plot
else (cnt == end_rows),
**plot_kwargs,
)
# plot legend on the 1st plot
cnt += 1
if not same_plot:
plot_kwargs["obs_legend_loc"] = None
if same_plot and (col != ncols):
for ax in np.ravel(axes)[cnt:]:
ax.remove()
fig.suptitle(suptitle, y=1.05)
if save is not None:
save_fig(fig, save)
if return_models:
return all_models | 5,330,191 |
def create_hostclass_snapshot_dict(snapshots):
"""
Create a dictionary of hostclass name to a list of snapshots for that hostclass
:param list[Snapshot] snapshots:
:return dict[str, list[Snapshot]]:
"""
snapshot_hostclass_dict = {}
for snap in snapshots:
# build a dict of hostclass+environment to a list of snapshots
# use this dict for the --keep-num option to know how many snapshots are there for each hostclass
if snap.tags and snap.tags.get('hostclass') and snap.tags.get('env'):
key_name = snap.tags.get('hostclass') + '_' + snap.tags.get('env')
hostclass_snapshots = snapshot_hostclass_dict.setdefault(key_name, [])
hostclass_snapshots.append(snap)
return snapshot_hostclass_dict | 5,330,192 |
def refund_order(id):
"""
List all departments
"""
check_admin()
order = Order.query.filter_by(id=id).first()
payment_id = order.payment_id
try:
payment = Payment.find(payment_id)
except ResourceNotFound:
flash("Payment Not Found", "danger")
return redirect(redirect_url())
except ServerError:
flash("There was a problem with PayPal. Please try again later.", "warning")
return redirect(redirect_url())
sale_id = payment.transactions[0].related_resources[0].sale.id
# refund full ammount
sale_amount = {
'amount': {
'currency': payment.transactions[0].related_resources[0].sale.amount.currency,
'total': payment.transactions[0].related_resources[0].sale.amount.total }
}
sale = Sale.find(sale_id)
refund = sale.refund(sale_amount) # refund full ammount
if refund.success():
flash("Refund [%s] Success" % (refund.id), 'info')
status = OrderStatus.query.filter_by(name='Refunded').first()
order.status_id = status.id
order.cancelled = True
order.updated_at = datetime.now()
order_items = OrderItem.query.filter_by(order_id=order.id).all()
product_items = []
try:
for item in order_items:
product = item.product
product.quantity += item.quantity
db.session.merge(item)
db.session.merge(order)
db.session.flush()
activity = Activity(verb='update', object=order)
db.session.add(activity)
db.session.commit()
except:
flash('Items counld not be returned to inventory', "warning")
# XXX: this can fail at any point below. Not good, as refund is registed
else:
flash(refund.error['message'], 'warning')
print(refund.error)
return redirect(redirect_url()) | 5,330,193 |
def process_sign_in_or_up(firebase_id_token: str, **kwargs):
""" Processes the sign in or sign up request.
:param firebase_id_token: The firebase id token of the user.
:param kwargs:
if User Sign in/up:
{sso_token: str, sso_provider: str}
elif Store Sign up:
{pos_number: int, business_registration_number: str, iso4217: str}
"""
if 'sso_token' in kwargs and 'sso_provider' in kwargs:
# User Sign in/up
args = (kwargs['sso_token'], kwargs['sso_provider']), (str, str)
method = User.sign_in_or_up
elif 'iso4217' in kwargs and 'business_registration_number' in kwargs and 'pos_number' in kwargs:
# Store Sign up
args = (kwargs['pos_number'], kwargs['business_registration_number'], kwargs['iso4217']), (int, str, str)
method = Store.sign_up
else:
raise JsonParseError("Invalid arguments.")
if not [arg for arg, T in zip(*args) if not isinstance(arg, T)]:
raise JsonParseError("Invalid argument type.")
method(firebase_id_token, *args[0]) | 5,330,194 |
def test_univariate_robust_scale():
"""
A scale estimator which is robust to outliers
Testing against R code [R version 3.6.0 (2019-04-26)]
> library(robustbase)
# All code has the default argument: "finite.corr=TRUE"
> robustbase::Sn(c(0, 1)) # 0.8861018: FAILS in our implementation
> robustbase::Sn(c(0, 1, 2)) # 2.207503
> robustbase::Sn(c(0, 1, 2, 3)) # 1.13774: FAILS in our implementation
> robustbase::Sn(c(0, 1, 2, 3, 4)) # 1.611203
> robustbase::Sn(c(0, 1, 2, 3, 4, 5)) # 2.368504: FAILS in our implementation
> robustbase::Sn(c(0, 1, 2, 3, 4, 5, 6)) # 2.85747
> robustbase::Sn(c(0, 1, 2, 3, 4, 50, 6)) # 2.85747
> robustbase::Sn(c(0, 1, 20, 3, 4, 50, 6)) # 5.714939: FAILS in our implementation
> robustbase::Sn(c(0, 10, 20, 3, 4, 50, 6)) # 8.572409
> robustbase::Sn(seq(1, 10)) # 3.5778: FAILS in our implementation
> robustbase::Sn(seq(1, 11)) # 3.896614
> robustbase::Sn(seq(1, 19)) # 6.259503
> robustbase::Sn(seq(1, 1500)) # 447.225
TODO: found this weird sequence that gives Sn of zero, even though there is variability:
99, 95, 95, 100, 100, 100, 100, 95, 100, 100, 100, 100, 105, 105, 100, 95, 105, 100, 95, 100
How to make it robust to this weird situation?
"""
# Tests with an even number of samples, and small sample sizes do not agree with R.
# This is because the R implementation aims for efficiency of calculation, and does not
# follow the formula presented in the original paper.
# Since we aim to be using this on medium/larger data sets, it should not matter.
assert univariate.Sn(list(range(3))) == approx(2.207503, rel=1e-6)
assert univariate.Sn(list(range(5))) == approx(1.611203, rel=1e-6)
assert univariate.Sn(list(range(7))) == approx(2.85747, rel=1e-6)
assert univariate.Sn([0, 10, 20, 3, 4, 50, 6]) == approx(8.572409, rel=1e-7)
assert univariate.Sn(list(range(1, 12))) == approx(3.896614, rel=1e-6)
assert univariate.Sn(list(range(1, 19))) == approx(5.3667, rel=1e-6)
assert univariate.Sn(list(range(1, 20))) == approx(6.259503, rel=1e-6)
# Corner cases:
assert np.isnan(univariate.Sn([]))
assert univariate.Sn([13]) == 0.0 | 5,330,195 |
def get_dprime_from_regions(*regions):
"""Get the full normalized linkage disequilibrium (D') matrix for n
regions.
This is a wrapper which determines the correct normalized linkage
function to call based on the number of regions. Only two-dimensional
normalized linkage matrices are currently supported. Where only one
region is given, normalized linkage is calculated for that region against
itself.
:param list regions: List of :ref:`regions <regions>`.
:returns: :ref:`proximity matrix <proximity_matrices>` giving the normalized linkage \
disequilibrium of all possible combinations of windows within the different regions.
"""
regions = prepare_regions(regions)
if len(regions) == 2:
dprime_func = dprime_2d
else:
raise NotImplementedError(
'There is currently no implementation of normalized linkage '
'disequilibrium for more than 2 dimensions')
return dprime_func(*regions) | 5,330,196 |
def get_nav_class_state(url, request, partial=False):
""" Helper function that just returns the 'active'/'inactive'
link class based on the passed url. """
if partial:
_url = url_for(
controller=request.environ['pylons.routes_dict']['controller'],
action=None,
id=None
)
else:
_url = url_for(
controller=request.environ['pylons.routes_dict']['controller'],
action=request.environ['pylons.routes_dict']['action'],
id=None
)
if url == request.path_info:
return 'active'
elif url.startswith(_url) and partial:
return 'active'
elif url == _url:
return 'active'
else:
return 'inactive' | 5,330,197 |
def needs_htcondor(test_item):
"""
Use a decorator before test classes or methods to only run them if the HTCondor Python bindings are installed.
"""
test_item = _mark_test('htcondor', test_item)
try:
import htcondor
htcondor.Collector(os.getenv('TOIL_HTCONDOR_COLLECTOR')).query(constraint='False')
except ImportError:
return unittest.skip("Install the HTCondor Python bindings to include this test.")(test_item)
except IOError:
return unittest.skip("HTCondor must be running to include this test.")(test_item)
except RuntimeError:
return unittest.skip("HTCondor must be installed and configured to include this test.")(test_item)
else:
return test_item | 5,330,198 |
def tile_image(
image: Image.Image, tile_size: Tuple[int, int], overlap: int
) -> Tuple[torch.Tensor, List[Tuple[int, int]]]:
"""Take in an image and tile it into smaller tiles for inference.
Args:
image: The input image to tile.
tile_size: The (width, height) of the tiles.
overlap: The overlap between adjacent tiles (height, width).
Returns:
A tensor of the tiles and a list of the (x, y) offset for the tiles.
The offets are needed to keep track of which tiles have targets.
Examples::
>>> tiles, coords = tile_image(Image.new("RGB", (1000, 1000)), (512, 512), 50)
>>> tiles.shape[0]
9
>>> len(coords)
9
>>> tiles.shape[-2:]
torch.Size([512, 512])
"""
tiles, coords = [], []
width, height = image.size
x_step = width if width == tile_size[0] else tile_size[0] - overlap
y_step = height if height == tile_size[1] else tile_size[1] - overlap
for x in range(0, width - overlap, x_step):
# Shift back to extract tiles on the image
if x + tile_size[0] >= width and x != 0:
x = width - tile_size[0]
for y in range(0, height - overlap, y_step):
if y + tile_size[1] >= height and y != 0:
y = height - tile_size[1]
tile = normalize(
np.array(image.crop((x, y, x + tile_size[0], y + tile_size[1])))
)
tiles.append(torch.Tensor(tile))
coords.append((x, y))
# Transpose the images from BHWC -> BCHW
tiles = torch.stack(tiles).permute(0, 3, 1, 2)
return tiles, coords | 5,330,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.