content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def test_3(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 80k (16k 1, 64k 0)
test: 20k (4k 1, 16k 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:16000]
df_zeros_training = df_zeros.loc[:64000]
df_ones_test = df_ones.loc[16000:20000]
df_zeros_test = df_zeros.loc[64000:80000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test | 9211d7b9ca027aaf1ddb8372cafa0a541269b6ac | 3,627,600 |
def getStores(customer):
"""
This function returns the stores linked to the given customer.
This function does not care about token, it is done before.
Returns the Stores in case of success, otherwise will return an error (OBJECT_NOT_FOUND if the store was not found, DB_ERROR otherwise).
"""
try:
stores = {}
cities = []
storeList = Stores.objects.filter(customer = customer).values(c.CITY, c.ADDRESS)
for store in storeList:
cities.append(store[c.CITY])
if store[c.CITY] in stores:
stores[store[c.CITY]].append(store[c.ADDRESS])
else:
stores[store[c.CITY]] = [store[c.ADDRESS]]
stores[c.CITIES] = set(cities)
return stores
except (Stores.DoesNotExist, Error) as dbe:
dblogger.exception(dbe)
return DB_ERRORS[1] if type(dbe) == Stores.DoesNotExist else DB_ERRORS[2] | 9265064fddd8099fd39110ee4385bd952e968a5c | 3,627,601 |
def create_header(multiobj_bool, constr_func):
""" Creates header to save data.
Args:
multiobj_bool (:obj:`bool`): True if multiobjective function is used.
constr_func (:obj:`list`) : Constraint functions applied.
Returns:
Header.
"""
if multiobj_bool:
header = "iter,f0val,fvirg,f0val2,fvirg2"
else:
header = "iter,f0val,fvirg"
for func in constr_func:
header += ',' + func
return header | 519e66c8437f972cd3ad6d604bc01ab858c8abed | 3,627,602 |
import logging
def authenticate_password(dir_cli):
"""
Function to authenticate SSO password
:param dir_cli:
:return:
"""
try:
if password.strip() == '':
logging.info('password should not be empty')
return False
dir_cli.get_services_list()
return True
except Exception as e:
return False | 1b727944a05d61b3008a8bc3e77d51b5749cac1b | 3,627,603 |
def colour_by_year(year, train_thresh, update1_thresh, update2_thresh, colours=None):
"""
Assign/return a colour depending on the year the data point was published.
Parameters
----------
year :
publication year of data point
train_thresh :
Last year threshold to assign to training set
update1_thresh :
Last year threshold to assign to update1 set
update2_thresh :
Last year threshold to assign to update2 set
colours :
List of colours for training, update1, update2 and test set
Returns
-------
Colour based on the publication year
"""
if colours is None:
colours = ["navy", "plum", "mediumaquamarine", "green"]
if year <= train_thresh:
return colours[0]
elif year <= update1_thresh:
return colours[1]
elif year <= update2_thresh:
return colours[2]
elif year <= 2020:
return colours[3] | 179b4a5d7f8cccaaa398fdffe43c59d02478dff2 | 3,627,604 |
import http.client as http_client
import httplib as http_client
import shutil
import pip
import subprocess
import py_compile
from tzlocal import get_localzone
import subprocess
import dateutil.relativedelta
import dateutil.relativedelta
import py_compile
import shutil
import traceback
def main(argv=None):
"""Command line options."""
global TAR
global _bot_logger
global _https_proxy
global _servermode
global playback_current_timestamp
global playback_timer_timestamp
global playback_user_info
global playback_variables
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
_servermode = '--servermode' in argv
if _servermode:
return execute_on_server(argv)
if sys.version_info < (2, 7) or sys.version_info >= (3, 0):
print('Python version info: ' + str(sys.version_info))
raise RuntimeError('This BotEngine is designed for Python 2.7 for cloud compatibility')
try:
importlib.import_module('requests')
except ImportError:
sys.stderr.write("Missing the 'requests' module!\n")
sys.stderr.write("Please install this module by running 'pip install requests'\n")
return 1
else:
try:
importlib.import_module('dateutil')
except ImportError:
sys.stderr.write("Missing the 'python-dateutil' module!\n")
sys.stderr.write("Please install this module by running 'pip install python-dateutil'\n")
return 1
else:
try:
importlib.import_module('dill')
except ImportError:
sys.stderr.write("Missing the 'dill' module!\n")
sys.stderr.write("Please install this module by running 'pip install dill'\n")
return 1
try:
importlib.import_module('tzlocal')
except ImportError:
sys.stderr.write("Missing the 'tzlocal' module!\n")
sys.stderr.write("Please install this module by running 'pip install tzlocal'\n")
return 1
program_version = 'v%s' % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split('\n')[1]
program_license = '%s\n\n Created by David Moss\n\n Copyright 2019 People Power Company. All rights reserved.\n\n Distributed on an "AS IS" basis without warranties\n or conditions of any kind, either express or implied.\n\nUSAGE\n' % program_shortdesc
try:
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter, add_help=False)
developer_group = parser.add_argument_group(Color.BOLD + 'BOT LAB - Create and manage your own Bot services' + Color.END)
developer_group.add_argument('--commit', dest='commit_bundle_id', help='Commit the given bot bundle to the server')
developer_group.add_argument('--my_developer_bots', dest='listapps', action='store_true', help='Get a list of the bots you created')
developer_group.add_argument('--publish', dest='publish_bundle_id', help='Submit this bot for review to become publicly available')
developer_group.add_argument('--makeitso', dest='make_it_so', help='Commit, publish, review, and approve - for senior admin-level bot developers only')
developer_group.add_argument('--approve', dest='approve_bundle_id', help='Used by administrators to approve a bot for publishing')
developer_group.add_argument('--botinfo', dest='info_bundle_id', help='Get the details of your given bot bundle')
developer_group.add_argument('--stats', dest='stats_bundle_id', help='Get the statistics of your given bot bundle')
developer_group.add_argument('--errors', dest='errors_bundle_id', help='Get the errors from your given bot bundle executing on the cloud across all users')
developer_group.add_argument('--logs', dest='logs_bundle_id', help='Get the logs from your given bot bundle executing on the cloud across all users')
developer_group.add_argument('--reject_bot_under_review', dest='reject_bundle_id', help='Reject the given bot from being reviewed or published publicly. Used after you --publish and need to make changes.')
developer_group.add_argument('--add_organization', dest='bundle_for_organization', help='Allow the given Bundle ID to be purchased by the given --organization')
developer_group.add_argument('--remove_organization', dest='remove_organization', help='Bundle ID to prevent from being purchased by the --organization')
developer_group.add_argument('--organization_development_mode', dest='organization_development_mode', action='store_true', help='Allow the organization to use the bot in developer mode instead of only purchasing publicly available bots. This is always used in conjunction with --add_organization')
beta_group = parser.add_argument_group(Color.BOLD + 'BOT BETA TESTING - Privately beta test your bots' + Color.END)
beta_group.add_argument('--beta_test_bot', dest='beta_bundle_id', help='Specify a bot bundle ID to configure for beta testing. Typically used in conjunction with --beta_add_user and --beta_delete_user. If used alone, it returns a list of existing beta tester user IDs.')
beta_group.add_argument('--beta_add_user', dest='beta_add_user_id', help='Specify a user ID to add as a beta tester for the given --beta_test_bot, or used with --beta_purchase_bot for an admin to purchase the bot into the given account.')
beta_group.add_argument('--beta_delete_user', dest='beta_delete_user_id', help='Specify a user ID to remove as a beta tester for the given --beta_test_bot')
beta_group.add_argument('--beta_purchase_bot', dest='beta_purchase_bot', help='As an admin, purchase the given bot bundle ID into a user account to begin running it. Use this in conjunction with --beta_add_user to specify the recipient user ID.')
run_group = parser.add_argument_group(Color.BOLD + 'BOT ENGINE - Execute Bots' + Color.END)
run_group.add_argument('-r', '--run', dest='run_bundle_id', help="Run a bot. Pass in the bundle identifier of the bot, which must also be the name of the bot's directory below your current working directory")
run_group.add_argument('-i', '--instance', dest='run_instance_id', help='Run the specific bot instance ID')
run_group.add_argument('-j', '--json', dest='json', help='The JSON that would be passed to the bot over the command line, in the format \'{"hello": "world"}\'')
run_group.add_argument('-a', '--apikey', dest='user_key', help="User's API key, instead of a username / password")
run_group.add_argument('-l', '--location', dest='location_id', help='Location ID')
run_group.add_argument('--servermode', dest='servermode', action='store_true', help='Run this bot on server environment')
run_group.add_argument('--https_proxy', dest='https_proxy', help='If your corporate network requires a proxy, type in the full HTTPS proxy address here (i.e. http://10.10.1.10:1080)')
appstore_group = parser.add_argument_group(Color.BOLD + 'BOT SHOP - Browse the Bot Shop and manage purchased bots.' + Color.END)
appstore_group.add_argument('--search', dest='search', action='store_true', help='Search the bot store for all available bots')
appstore_group.add_argument('--searchfor', dest='search_criteria', help='Search the bot store for bots matching the given search criteria')
appstore_group.add_argument('--lookat', dest='view_bundle_id', help='View the details of an bot on the bot store')
appstore_group.add_argument('--purchase', dest='purchase_bundle_id', help='Obtain or purchase access to an bot on the bot store')
appstore_group.add_argument('--configure', dest='configure_bot_instance_id', help='Grant permission for an bot instance to access devices and communications')
appstore_group.add_argument('--my_purchased_bots', dest='my_purchased_bots', action='store_true', help='Get a list of the bots you have obtained or purchased')
appstore_group.add_argument('--pause', dest='pause_bot_instance_id', help='Stop the given bot instance from executing on your account')
appstore_group.add_argument('--play', dest='play_bot_instance_id', help='Resume execution of the given bot instance on your account')
appstore_group.add_argument('--delete', dest='delete_bot_instance_id', help='Delete the given bot instance ID or bundle ID out of my account')
appstore_group.add_argument('--permissions', dest='permissions_bot_instance_id', help='Discover what your purchased bot has permission to access')
appstore_group.add_argument('--questions', dest='questions_bot_instance_id', help='Answer questions asked by the given bot instance ID or bundle ID')
optional_group = parser.add_argument_group(Color.BOLD + 'Optional Arguments' + Color.END)
optional_group.add_argument('-o', '--organization_id', dest='organization_id', help="Add in the organization ID we're talking about, used in conjunction with --purchase, --add_organization, --remove_organization")
optional_group.add_argument('-h', '--help', dest='help', action='store_true', help='Show this help message and exit')
optional_group.add_argument('-u', '--username', dest='username', help='Username')
optional_group.add_argument('-p', '--password', dest='password', help='Password')
optional_group.add_argument('--admin_username', dest='admin_username', help='Administrative username')
optional_group.add_argument('--admin_password', dest='admin_password', help='Administrative password')
optional_group.add_argument('-b', '--brand', dest='brand', help="Brand name partner to interact with the correct servers: 'myplace', 'origin', 'presence', etc.")
optional_group.add_argument('-s', '--server', dest='server', help='Base server URL (default is ' + DEFAULT_BASE_SERVER_URL + ')')
optional_group.add_argument('-c', '--challenge', dest='challenge_id', help='Challenge ID')
optional_group.add_argument('--loglevel', dest='loglevel', choices=['debug', 'info', 'warn', 'error'], default='info', help='The logging level, default is debug')
optional_group.add_argument('--httpdebug', dest='httpdebug', action='store_true', help='HTTP debug logger output')
optional_group.add_argument('--logfile', dest='logfile', help='Append the debug output to the given filename')
optional_group.add_argument('--zip', dest='zip', action='store_true', help='Commit the bot using the .zip (old) method of bot generation, instead of .tar (new) method.')
tools_group = parser.add_argument_group(Color.BOLD + 'Handy Developer Tools' + Color.END)
tools_group.add_argument('--my_devices', dest='list_devices', action='store_true', help='Get a list of your devices')
tools_group.add_argument('--my_locations', dest='list_locations', action='store_true', help='Get a list of the locations your account has access to')
tools_group.add_argument('--user_id', dest='user_id', action='store_true', help='Get your user ID')
tools_group.add_argument('--device_types', dest='device_types', action='store_true', help='Get a list of available device types on this server')
tools_group.add_argument('--model', dest='device_type_model', help='Get a list of parameters to be expected for a given device type')
tools_group.add_argument('--parameter', dest='parameter', help='Get a description of a specific parameter name')
tools_group.add_argument('--download_device', dest='download_device_id', help='Download data from a specific device ID in CSV format')
tools_group.add_argument('--download_type', dest='download_device_type', help='Download data from all devices of a specific device type in CSV format')
# SLR addition
tools_group.add_argument('--download_devices', dest='download_devices', action='store_true', help='Download data from all your devices in CSV format')
tools_group.add_argument('--record', dest='record', action='store_true', help='Record all device and mode data from your account for rapid playback and bot testing')
tools_group.add_argument('--playback', dest='playback', help='Specify a recorded .json filename to playback. Use the --run command to specify the bot.')
tools_group.add_argument('--generate', dest='generate_bot_bundle_id', help='Generate the bot locally for analysis, without installing dependencies or uploading.')
settings_group = parser.add_argument_group(Color.BOLD + 'Version Control' + Color.END)
settings_group.add_argument('--version', action='version', version=program_version_message)
settings_group.add_argument('--update', dest='update', action='store_true', help='Update this BotEngine framework from the server')
args = parser.parse_args()
sys.argv = []
_bot_logger = _create_logger('bot', LOGGING_LEVEL_DICT[args.loglevel], True, args.logfile)
if args.help:
parser.print_help()
return 0
username = args.username
password = args.password
botname = args.run_bundle_id
instance = args.run_instance_id
server = args.server
challenge_id = args.challenge_id
device_server = None
commit = args.commit_bundle_id
update = args.update
publish = args.publish_bundle_id
reject = args.reject_bundle_id
httpdebug = args.httpdebug
listdevices = args.list_devices
listapps = args.listapps
botinfo = args.info_bundle_id
botstats = args.stats_bundle_id
boterrors = args.errors_bundle_id
botlogs = args.logs_bundle_id
forever = args.json is None
download_device_id = args.download_device_id
download_device_type = args.download_device_type
# SLR addition
download_devices = args.download_devices
# end SLR addition
search_criteria = args.search_criteria
search = args.search or search_criteria is not None
appstore_view_bundle_id = args.view_bundle_id
my_purchased_bots = args.my_purchased_bots
delete_bot_instance_id = args.delete_bot_instance_id
purchase_bundle_id = args.purchase_bundle_id
configure_bot_instance_id = args.configure_bot_instance_id
pause_bot_instance_id = args.pause_bot_instance_id
play_bot_instance_id = args.play_bot_instance_id
organization_id = args.organization_id
organization_development_mode = args.organization_development_mode
add_organization = args.bundle_for_organization
remove_organization = args.remove_organization
permissions_bot_instance_id = args.permissions_bot_instance_id
devicetypes = args.device_types
parameter = args.parameter
device_type_model = args.device_type_model
user_key = args.user_key
record = args.record
playback = args.playback
beta_add_user_id = args.beta_add_user_id
beta_purchase_bot = args.beta_purchase_bot
beta_delete_user_id = args.beta_delete_user_id
beta_test_bot = args.beta_bundle_id
generate = args.generate_bot_bundle_id
user_id = args.user_id
brand = args.brand
approve = args.approve_bundle_id
admin_username = args.admin_username
admin_password = args.admin_password
location_id = args.location_id
list_locations = args.list_locations
if brand is not None:
brand = brand.lower()
if brand == 'presence':
print(Color.BOLD + '\nPresence by People Power' + Color.END)
server = 'app.presencepro.com'
elif brand == 'myplace':
print(Color.BOLD + '\nMyPlace - Smart. Simple. Secure.' + Color.END)
server = 'iot.peoplepowerco.com'
elif brand == 'origin':
print(Color.BOLD + '\nOrigin Home HQ' + Color.END)
server = 'app.originhomehq.com.au'
elif brand == 'innogy':
print(Color.BOLD + '\ninnogy SmartHome' + Color.END)
server = 'innogy.presencepro.com'
else:
sys.stderr.write('This brand does not exist: ' + str(brand) + '\n\n')
return 1
if location_id is not None:
print(Color.BOLD + ('Location ID: {}').format(location_id) + Color.END)
location_id = int(location_id)
_https_proxy = None
if args.https_proxy is not None:
_https_proxy = {'https': args.https_proxy}
if args.zip is not None:
if args.zip:
TAR = False
make_it_so = args.make_it_so
if make_it_so is not None:
commit = make_it_so
publish = make_it_so
if approve:
publish = approve
if httpdebug:
try:
except ImportError:
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('requests.packages.urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
user_info = None
botengine_key = None
inputs = None
if args.json is not None:
json_input = args.json
if json_input.startswith("'"):
json_input = json_input[1:]
if json_input.endswith("'"):
json_input = json_input[:-1]
try:
inputs = json.loads(str(json_input))
botengine_key = inputs['apiKey']
server = inputs['apiHost']
except:
sys.stderr.write("Couldn't parse the JSON input data\n")
sys.stderr.write(json_input)
sys.stderr.write('\n\n')
if _servermode:
_bot_logger.error("Couldn't parse the JSON input data, date=" + str(json_input))
return 1
if not server:
server = DEFAULT_BASE_SERVER_URL
if 'http' not in server:
server = 'https://' + server
print('Bot Server: ' + server)
if listdevices:
if user_key is None:
user_key = _login(server, username, password)
_summarize_devices(server, user_key, location_id)
return 0
if list_locations:
if user_key is None:
user_key = _login(server, username, password)
user_info = _get_user_info(server, user_key)
if 'locations' not in user_info:
print(Color.RED + 'This user account does not have access to any locations.\n\n' + Color.END)
else:
print(Color.BOLD + 'Locations' + Color.END)
print('-' * 50)
for location_object in user_info['locations']:
print(('\t{}{}{}: {}').format(Color.BOLD, str(location_object['id']), Color.END, location_object['name'].encode('utf-8')))
print()
return 0
if devicetypes:
if user_key is None:
user_key = _login(server, username, password)
_summarize_device_types(server, user_key)
return 0
if device_type_model:
if user_key is None:
user_key = _login(server, username, password)
_print_model(server, user_key, device_type_model)
print('\n' + the_bot() + 'Done!')
return 0
if parameter:
if user_key is None:
user_key = _login(server, username, password)
p = _get_parameters(server, user_key, parameter)
if 'deviceParams' not in p:
print(Color.RED + 'This parameter is not defined on this server.' + Color.END)
else:
p = p['deviceParams'][0]
print(Color.BOLD + Color.GREEN + parameter + Color.END)
if 'numeric' in p:
if p['numeric']:
print(Color.BOLD + 'Type: ' + Color.END + 'Numeric values')
else:
print(Color.BOLD + 'Type: ' + Color.END + 'Non-numeric values')
if 'systemUnit' in p:
print(Color.BOLD + 'Units: ' + Color.END + p['systemUnit'])
if 'scale' in p:
print(Color.BOLD + 'Accuracy: ' + Color.END + 'Store up to ' + str(p['scale']) + ' digits after the decimal')
profiled = False
configured = False
historical = None
if 'profiled' in p:
profiled = p['profiled']
if 'configured' in p:
configured = p['configured']
if 'historical' in p:
historical = p['historical']
if profiled and configured:
print(Color.BOLD + 'Usage: ' + Color.END + 'Measurement & Command')
elif profiled and not configured:
print(Color.BOLD + 'Usage: ' + Color.END + 'Measurement only')
elif configured and not profiled:
print(Color.BOLD + 'Usage: ' + Color.END + 'Command only')
if historical is not None:
if historical == 0:
print(Color.BOLD + 'Storage: ' + Color.END + 'Current state only, no database history')
elif historical == 1:
print(Color.BOLD + 'Storage: ' + Color.END + 'Update database history every 15 minutes or when the value changes more than 25%')
elif historical == 2:
print(Color.BOLD + 'Storage: ' + Color.END + 'Update database history on every change')
print('\n' + the_bot() + 'Done!')
return 0
if listapps:
if user_key is None:
user_key = _login(server, username, password)
_summarize_apps(server, user_key)
return 0
if beta_purchase_bot:
if user_key is None:
user_key = _login(server, username, password)
bundle = beta_purchase_bot
bundle = bundle.replace('/', '')
j = _beta_purchase_bot(server, user_key, bundle, beta_add_user_id)
print('Result: ' + json.dumps(j, indent=2, sort_keys=True))
print()
return 0
if beta_add_user_id or beta_delete_user_id:
if beta_test_bot is None:
sys.stderr.write('Need to also specify a --beta_test_bot bundle ID.\n')
return 1
if user_key is None:
user_key = _login(server, username, password)
if beta_add_user_id:
print('Adding user ID ' + str(beta_add_user_id) + ' to beta test ' + beta_test_bot + '...')
_add_bot_beta_testers(server, user_key, beta_test_bot, beta_add_user_id)
else:
_delete_bot_beta_testers(server, user_key, beta_test_bot, beta_delete_user_id)
if beta_test_bot:
if user_key is None:
user_key = _login(server, username, password)
j = _get_bot_beta_testers(server, user_key, beta_test_bot)
if 'betaTesters' in j:
if len(j['betaTesters']) == 0:
print('There are no beta testers for this bot.')
else:
if len(j['betaTesters']) == 1:
print('There is one beta tester for this bot.')
else:
print('There are ' + str(j['betaTesters']) + ' beta testers for this bot.')
for tester in j['betaTesters']:
print('\t* User ID: ' + str(tester['userId']))
print()
return 0
if user_id:
if user_key is None:
user_key = _login(server, username, password)
user_info = _get_user_info(server, user_key)
print(('User ID: {}{}{}\n').format(Color.BOLD, user_info['user']['id'], Color.END))
return 0
if botinfo:
if user_key is None:
user_key = _login(server, username, password)
bundle = botinfo
bundle = bundle.replace('/', '')
j = _get_apps(server, user_key, bundle)
bots = j.get('bots')
if bots is None:
sys.stderr.write("The bot '" + bundle + "' does not belong to you, or you haven't created it yet.\n")
return 1
sys.stderr.write(Color.BOLD + '\nAPP MARKETING INFO ON THE SERVER\n' + Color.END)
sys.stderr.write(json.dumps(bots[0], indent=2, sort_keys=True))
sys.stderr.write('\n\n')
j = _get_versions(server, user_key, bundle)
versions = j.get('versions')
if versions is None:
sys.stderr.write(Color.RED + "The bot '" + bundle + "' does not have any version!" + Color.END)
sys.stderr.write(Color.RED + "\nUse 'botengine --commit " + bundle + "' to commit your first version.\n\n" + Color.END)
return 1
sys.stderr.write(Color.BOLD + '\nAPP VERSION INFO ON THE SERVER\n' + Color.END)
sys.stderr.write(json.dumps(versions, indent=2, sort_keys=True))
sys.stderr.write('\n\n')
return 0
if botstats:
if user_key is None:
user_key = _login(server, username, password)
bundle = botstats
bundle = bundle.replace('/', '')
stats = _get_app_statistics(server, user_key, bundle)
print(Color.BOLD + 'RATINGS' + Color.END)
print('Average rating across all versions: ' + str(stats['rating']['average']))
stars_dict = stats['rating']
TOTAL_STARS = 20
ordered_keys = ['star5', 'star4', 'star3', 'star2', 'star1']
if stars_dict['total'] > 0:
for key in ordered_keys:
stars = int(float(stars_dict[key]) / float(stars_dict['total']) * TOTAL_STARS)
title = Color.BOLD + key.replace('star', '') + '-stars' + Color.END
print(title + ': [' + '*' * stars + ' ' * (TOTAL_STARS - stars) + ']')
versions_dict = {}
for v in stats['versions']:
versions_dict[int(v['creationDateMs'])] = v
first = True
i = 0
for key in sorted(versions_dict.keys(), reverse=False):
i += 1
print('-' * 50)
if i == len(versions_dict.keys()):
print(Color.GREEN + Color.BOLD + 'NEWEST VERSION' + Color.END)
print(Color.BOLD + 'VERSION ' + str(versions_dict[key]['version']) + Color.END)
print('\t' + Color.BOLD + 'Creation Date: ' + Color.END + str(versions_dict[key]['creationDate']))
print('\t' + Color.BOLD + 'Average Execution Time: ' + Color.END + Color.PURPLE + str(versions_dict[key]['averageExecutionTime']) + ' [ms]' + Color.END)
print('\t' + Color.BOLD + 'Total Executions: ' + Color.END + str(versions_dict[key]['totalExecutions']))
print('\t' + Color.BOLD + 'Failed Executions: ' + Color.END + str(versions_dict[key]['failedExecutions']))
if float(versions_dict[key]['totalExecutions']) > 0:
print('\t' + Color.BOLD + 'Failure Rate: ' + Color.END + '%.2f' % (100 * (float(versions_dict[key]['failedExecutions']) / float(versions_dict[key]['totalExecutions']))) + '%')
print('\t' + Color.BOLD + 'Current Status: ' + Color.END + str(VERSION_STATUS_DICT[versions_dict[key]['status']]))
print('\t' + Color.BOLD + 'Status updated on: ' + Color.END + str(versions_dict[key]['statusChangeDate']))
print()
stars_dict = versions_dict[key]['rating']
if stars_dict['total'] > 0:
for key in ordered_keys:
stars = int(float(stars_dict[key]) / float(stars_dict['total']) * TOTAL_STARS)
title = key.replace('star', '') + '-stars'
print('\t' + title + ': [' + '*' * stars + ' ' * (TOTAL_STARS - stars) + ']')
else:
print('\tNo ratings.')
print('\n')
print(Color.GREEN + Color.BOLD + 'TOTAL STATISTICS' + Color.END)
print(Color.BOLD + 'Total current users: ' + Color.END + str(stats['totalCurrentUsers']))
print(Color.BOLD + 'Total current bot instances: ' + Color.END + str(stats['totalCurrentInstances']))
print(Color.BOLD + 'Total executions: ' + Color.END + str(stats['totalExecutions']))
print(Color.BOLD + 'Total execution time: ' + Color.END + str(float(stats['totalExecutionTime']) / 1000.0) + ' [sec]; %.2f' % (float(stats['totalExecutionTime']) / 1000.0 / 60.0 / 60.0) + ' [hours]; %.2f' % (float(stats['totalExecutionTime']) / 1000.0 / 60.0 / 60.0 / 24.0) + ' [days]; %.2f' % (float(stats['totalExecutionTime']) / 1000.0 / 60.0 / 60.0 / 24.0 / 30.0) + ' [months]')
print()
return 0
if boterrors:
if user_key is None:
user_key = _login(server, username, password)
bundle = boterrors
bundle = bundle.replace('/', '')
exists = False
try:
j = _get_bot_errors(server, user_key, bundle, developer=False)
print(Color.BOLD + '\n\nPUBLIC VERSION' + Color.END)
print(json.dumps(j, indent=2, sort_keys=True))
exists = True
except:
pass
else:
try:
j = _get_bot_errors(server, user_key, bundle, developer=True)
print(Color.BOLD + '\n\nDEVELOPER VERSION' + Color.END)
print(json.dumps(j, indent=2, sort_keys=True))
exists = True
except:
pass
if not exists:
print(Color.BOLD + Color.RED + '\n\nThat bot does not exist.' + Color.END)
return 0
if botlogs:
if user_key is None:
user_key = _login(server, username, password)
bundle = botlogs
bundle = bundle.replace('/', '')
j = _get_bot_errors(server, user_key, bundle, errors_only=False)
print(json.dumps(j, indent=2, sort_keys=True))
return 0
if search:
if user_key is None:
user_key = _login(server, username, password)
search_results = _botstore_search(server, user_key, searchBy=search_criteria)
for bot in search_results:
if bot['compatible']:
compatibility = Color.GREEN + '(Compatible)' + Color.END
else:
compatibility = Color.RED + '(Incompatible)' + Color.END
print(Color.BOLD + '+ ' + bot['name'] + ' - by ' + bot['author'] + ' ' + compatibility + Color.END)
print('\t' + Color.UNDERLINE + bot['bundle'] + Color.END)
try:
print('\t' + bot['description'].replace('\n', '\n\t\t'))
except:
pass
else:
print('\n')
return 0
if appstore_view_bundle_id:
if user_key is None:
user_key = _login(server, username, password)
appstore_view_bundle_id = appstore_view_bundle_id.replace('/', '')
app_info = _botstore_botinfo(server, user_key, appstore_view_bundle_id)
sys.stderr.write(json.dumps(app_info, indent=2, sort_keys=True))
sys.stderr.write('\n\n')
return 0
if my_purchased_bots:
if user_key is None:
user_key = _login(server, username, password)
bots = _botstore_mybots(server, user_key, location_id=location_id, organization_id=organization_id)
if bots is not None:
for bot in bots:
status = Color.RED + 'Unknown status' + Color.END
if bot['status'] == 0:
status = Color.RED + 'NOT CONFIGURED' + Color.END
elif bot['status'] == 1:
status = Color.GREEN + 'ACTIVE' + Color.END
elif bot['status'] == 2:
status = Color.RED + 'INACTIVE' + Color.END
print('Bot Instance ' + str(bot['appInstanceId']) + ': ' + bot['bundle'] + '; Version ' + bot['version'] + '; ' + status)
else:
sys.stderr.write(Color.RED + 'You have not obtained or purchased any bots.' + Color.END)
print('\n' + the_bot() + 'Done!')
return 0
if pause_bot_instance_id:
if user_key is None:
user_key = _login(server, username, password)
try:
_botstore_mybots(server, user_key, pause_bot_instance_id, location_id=location_id, organization_id=organization_id)
except BotError as e:
bundle = pause_bot_instance_id
print('Trying bundle ' + bundle)
bundle = bundle.replace('/', '')
pause_bot_instance_id = _get_instance_id_from_bundle_id(server, user_key, bundle)
if pause_bot_instance_id is None:
sys.stderr.write(Color.RED + 'This bot instance is not in your personal account.\n\n' + Color.END)
return 1
print('Found bot instance ' + Color.BOLD + str(pause_bot_instance_id) + Color.END + ' matching the bundle ID you provided')
else:
result = _botstore_configure(server, user_key, pause_bot_instance_id, None, STATUS_BOT_INACTIVE, location_id)
if result:
print(the_bot() + 'Paused!')
return 0
print('Something went wrong during configuration.')
return 1
if play_bot_instance_id:
if user_key is None:
user_key = _login(server, username, password)
try:
current_app_configuration = _botstore_mybots(server, user_key, play_bot_instance_id, location_id=location_id, organization_id=organization_id)
except Exception as e:
bundle = play_bot_instance_id
print('Trying bundle ' + play_bot_instance_id)
bundle = bundle.replace('/', '')
play_bot_instance_id = _get_instance_id_from_bundle_id(server, user_key, bundle, location_id=location_id)
if play_bot_instance_id is None:
sys.stderr.write(Color.RED + 'This bot instance is not in your personal account.\n\n' + Color.END)
return 1
print('Found bot instance ' + Color.BOLD + str(play_bot_instance_id) + Color.END + ' matching the bundle ID you provided')
else:
result = _botstore_configure(server, user_key, play_bot_instance_id, None, STATUS_BOT_ACTIVE, location_id)
if result:
print(the_bot() + 'Resuming execution!')
return 0
print('Something went wrong during configuration.')
return 1
if delete_bot_instance_id:
if user_key is None:
user_key = _login(server, username, password)
if _botstore_deletebot(server, user_key, delete_bot_instance_id, location_id):
print(the_bot() + 'Bot instance ' + str(delete_bot_instance_id) + ' deleted!')
else:
delete_bot_instance_id = delete_bot_instance_id.replace('/', '')
delete_bot_instance_id = _get_instance_id_from_bundle_id(server, user_key, delete_bot_instance_id, location_id=location_id)
print('Found bot instance ' + Color.BOLD + str(delete_bot_instance_id) + Color.END + ' matching the bundle ID you provided')
if _botstore_deletebot(server, user_key, delete_bot_instance_id, location_id):
print(the_bot() + 'Bot instance ' + str(delete_bot_instance_id) + ' deleted!')
else:
print(Color.RED + 'That bot instance is not in your account.' + Color.END)
sys.stderr.write('\n\n')
return 0
if args.questions_bot_instance_id:
if user_key is None:
user_key = _login(server, username, password)
instance_id = None
try:
instance_id = int(args.questions_bot_instance_id)
except:
bundle = args.questions_bot_instance_id
print('Trying bundle ' + bundle)
bundle = bundle.replace('/', '')
instance_id = _get_instance_id_from_bundle_id(server, user_key, bundle, location_id=location_id)
if instance_id is None:
sys.stderr.write(Color.RED + 'This bot instance is not in your personal account.\n\n' + Color.END)
return 1
print('Found bot instance ' + Color.BOLD + str(instance_id) + Color.END + ' matching the bundle ID you provided')
all_questions = False
while True:
if all_questions:
response = _get_questions(server, user_key, answer_status=(1,
2,
3,
4), location_id=location_id)
else:
response = _get_questions(server, user_key, instance_id=instance_id, answer_status=(1,
2,
3,
4), location_id=location_id)
if 'questions' not in response:
sys.stderr.write(Color.RED + '\n\nThis bot has asked no questions.' + Color.END)
print('QUESTIONS: ' + str(json.dumps(response, indent=2, sort_keys=True)))
questions = []
if 'questions' in response:
questions = response['questions']
front_page_questions = []
editable_questions = {}
questions_by_id = {}
question_id = 0
for q in questions:
if q['front']:
front_page_questions.append(q)
questions_by_id[question_id] = q
question_id += 1
if q['editable']:
if q['sectionId'] not in editable_questions:
editable_questions[q['sectionId']] = []
editable_questions[q['sectionId']].append(q)
for section_id in sorted(editable_questions.iterkeys()):
editable_questions[section_id].sort(key=lambda x: x['questionWeight'], reverse=False)
if 'sectionTitle' in editable_questions[section_id][0]:
print('\n' + Color.BOLD + editable_questions[section_id][0]['sectionTitle'] + Color.END)
for q in editable_questions[section_id]:
print('[' + Color.GREEN + str(question_id) + Color.END + '] : ' + q['question'])
questions_by_id[question_id] = q
question_id += 1
print('\n\n' + Color.BOLD + 'Settings' + Color.END)
if all_questions:
print('[' + Color.PURPLE + 'B' + Color.END + '] : Show only the questions for the bot instance you selected.')
else:
print('[' + Color.PURPLE + 'A' + Color.END + '] : Show all questions, not filtered by a bot instance.')
print('[' + Color.PURPLE + 'Enter' + Color.END + '] : Exit.')
choice = raw_input('\nSelect a question ID to answer (ENTER to quit): ')
if choice == '':
print()
exit(0)
if choice.lower() == 'a':
all_questions = True
continue
elif choice.lower() == 'b':
all_questions = False
continue
try:
choice = int(choice)
except:
print(Color.RED + 'Please type a number.' + Color.END)
continue
else:
_print_question(questions_by_id[choice])
answer = raw_input('\nYour answer (ENTER to skip): ')
if answer == '':
print()
continue
else:
_answer_question(server, user_key, questions_by_id[choice], answer, location_id)
return
if commit:
if len(commit.split('.')) != 3:
sys.stderr.write(Color.RED + "Your new bot name must conform to reverse domain-name notation, as in 'com.yourname.BotName'" + Color.END)
sys.stderr.write('\n\n')
return 1
bundle = commit.replace('/', '')
base_path = os.path.join(os.getcwd(), '.precommit_' + bundle)
temporary_bot_directory = None
_merge_redirects(os.path.join(os.getcwd(), commit), base_path)
if user_key is None:
user_key = _login(server, username, password)
marketing_file = os.path.join(base_path, 'marketing.json')
version_file = os.path.join(base_path, 'runtime.json')
if not os.path.exists(marketing_file):
sys.stderr.write(marketing_file + ' does not exist')
sys.stderr.write('\n\n')
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
return 1
if not os.path.exists(version_file):
sys.stderr.write(version_file + ' does not exist')
sys.stderr.write('\n\n')
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
return 1
marketing_text = ''
with open(marketing_file) as (f):
for line in f:
line = line.strip()
if not line.startswith('#'):
marketing_text += line
version_text = ''
with open(version_file) as (f):
for line in f:
line = line.strip()
if not line.startswith('#'):
version_text += line
try:
marketing_data = json.loads(marketing_text)
except:
sys.stderr.write(Color.RED + "Your 'marketing.json' file isn't fully JSON-compliant.\n" + Color.END)
sys.stderr.write(Color.RED + 'Make sure all quotations are closed, and that your commas are not too many or too few.\n' + Color.END)
sys.stderr.write(Color.RED + 'How about taking it over to a JSON validator, like http://jsonlint.com or https://jsonformatter.curiousconcept.com\n\n' + Color.END)
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
return 1
else:
try:
version_data = json.loads(version_text)
except:
sys.stderr.write(Color.RED + "Your 'runtime.json' file isn't fully JSON-compliant.\n" + Color.END)
sys.stderr.write(Color.RED + 'Make sure all quotations are closed, and that your commas are not too many or too few.\n' + Color.END)
sys.stderr.write(Color.RED + 'How about taking it over to a JSON validator, like http://jsonlint.com or https://jsonformatter.curiousconcept.com' + Color.END)
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
return 1
bot_path = None
try:
_bot_filename = '_bot_' + bundle
print('Uploading the marketing file...')
_create_or_update_app(server, user_key, bundle, marketing_data)
organizational_bot = False
if 'organizational' in marketing_data['app']:
organizational_bot = marketing_data['app']['organizational'] == 1
if 'version' not in version_data:
print("Your runtime.json file is missing a 'version' element. That's bizarre. Please fix it.")
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
return -1
if 'runtime' not in version_data['version']:
version_data['version']['runtime'] = 1
print('Uploading the runtime configuration...')
_update_latest_version(server, user_key, bundle, version_data)
print('Generating the bot...')
aws_lambda = version_data['version']['runtime'] == 1
temporary_bot_directory = '.' + os.sep + '.bot_' + bundle
bot_subdirectory = ''
if not aws_lambda:
bot_subdirectory = os.sep + 'content'
if os.path.isdir(temporary_bot_directory):
shutil.rmtree(temporary_bot_directory, ignore_errors=True)
ignore_list = [
'.botignore', '.DS_Store', 'icon.png', '.redirect']
botignore_file = base_path + os.sep + '.botignore'
if os.path.isfile(botignore_file):
with open(botignore_file) as (f):
for line in f:
if not line.startswith('#') and line.strip() != '':
ignore_list.append(line.strip())
print('Ignoring files (add more in your .botignore file): \n' + str(ignore_list))
shutil.copytree(base_path, temporary_bot_directory + bot_subdirectory, ignore=shutil.ignore_patterns(*ignore_list))
if aws_lambda:
pip_install = ['dill', 'requests', 'python-dateutil', 'tzlocal']
pip_install = list(set(pip_install) | set(_extract_packages(temporary_bot_directory + bot_subdirectory, True)))
pip_install_remotely = _extract_packages(temporary_bot_directory + bot_subdirectory, False)
print('Locally installed packages: ' + str(pip_install))
print('Remotely installed packages: ' + str(pip_install_remotely))
command_line = [
sys.executable, '-m', 'pip', 'install']
command_line.extend(pip_install)
command_line.extend(['-t', temporary_bot_directory])
reqs = subprocess.check_output(command_line)
if os.path.exists('botengine'):
with open('botengine', 'r') as (f):
first_line = f.readline()
if first_line.strip() == '#!/usr/bin/env python':
if os.path.exists('botengine_bytecode'):
os.remove('botengine_bytecode')
py_compile.compile('botengine', 'botengine_bytecode')
if aws_lambda:
if os.path.exists('.' + os.sep + 'botengine_bytecode'):
shutil.copyfile('.' + os.sep + 'botengine_bytecode', temporary_bot_directory + os.sep + 'botengine.pyc')
if os.path.exists('.' + os.sep + 'lambda.py'):
shutil.copyfile('.' + os.sep + 'lambda.py', temporary_bot_directory + os.sep + 'lambda.py')
elif os.path.exists('.' + os.sep + 'botengine'):
shutil.copyfile('.' + os.sep + 'botengine', temporary_bot_directory + os.sep + 'botengine')
else:
if os.path.exists('.' + os.sep + 'botengine_bytecode'):
shutil.copyfile('.' + os.sep + 'botengine_bytecode', temporary_bot_directory + os.sep + 'botengine_bytecode')
if TAR:
shutil.make_archive(_bot_filename, 'tar', temporary_bot_directory)
size = os.path.getsize(_bot_filename + '.tar')
else:
shutil.make_archive(_bot_filename, 'zip', temporary_bot_directory)
size = os.path.getsize(_bot_filename + '.zip')
print('Uploading the bot [' + str(size / 1000000) + 'MB]...')
response = _upload_bot(server, user_key, bundle, _bot_filename, TAR)
icon_path = os.path.join(base_path, 'icon.png')
if os.path.exists(icon_path):
print('Uploading the icon...')
_upload_icon(server, user_key, bundle, icon_path)
else:
print('Missing the icon...')
if TAR:
if 'requestId' not in response:
sys.stderr.write(Color.RED + 'This bot was not uploaded properly.\n' + Color.END)
sys.stderr.write(Color.RED + 'The response from the server was : ' + json.dumps(response, indent=2, sort_keys=True) + '\n\n' + Color.END)
return -1
sys.stdout.write('Processing the bot at the server...')
sys.stdout.flush()
while True:
status = _check_bot_processing(server, user_key, response['requestId'])
if 'resultCode' in status['result']:
if status['result']['resultCode'] == 0:
break
elif 'resultCodeMessage' in status['result']:
sys.stderr.write(Color.RED + '\n\n' + status['result']['resultCodeMessage'] + '\n' + Color.END)
return -1
else:
sys.stderr.write(Color.RED + '\n\nThis bot was not processed properly at the server.\n' + Color.END)
sys.stderr.write(json.dumps(status, indent=2, sort_keys=True))
return -1
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except BotError as e:
sys.stderr.write('BotEngine Error: ' + e.msg)
sys.stderr.write('\n\n')
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
return 2
finally:
if bot_path:
os.remove(bot_path)
if temporary_bot_directory is not None:
if os.path.isdir(temporary_bot_directory):
shutil.rmtree(temporary_bot_directory, ignore_errors=True)
if base_path is not None:
if os.path.isdir(base_path):
shutil.rmtree(base_path, ignore_errors=True)
if os.path.exists(_bot_filename + '.tar'):
os.remove(_bot_filename + '.tar')
if make_it_so is None:
bots = _botstore_mybots(server, user_key, location_id=location_id, organization_id=organization_id)
if bots is not None:
for bot in bots:
if bot['bundle'] == bundle:
print(the_bot() + 'Done!')
return 0
purchase_bundle_id = bundle
choice = 'n'
if not organizational_bot:
choice = raw_input('Purchase this bot into your personal account (y/n)? ')
if choice.lower() != 'y':
print(the_bot() + 'Done!\n')
return 0
if purchase_bundle_id:
if user_key is None:
user_key = _login(server, username, password)
purchase_bundle_id = purchase_bundle_id.replace('/', '')
bot_instance_id = _botstore_purchasebot(server, user_key, purchase_bundle_id, location_id=location_id, organization_id=organization_id)
print(the_bot() + Color.BOLD + 'Purchased bot instance ID: ' + Color.GREEN + str(bot_instance_id) + Color.END + '\n')
configure_bot_instance_id = bot_instance_id
return 0
if configure_bot_instance_id:
if user_key is None:
user_key = _login(server, username, password)
user_info = _get_user_info(server, user_key)
bundle = ''
try:
full_configuration = _botstore_mybots(server, user_key, configure_bot_instance_id, location_id=location_id, organization_id=organization_id)
current_app_configuration = full_configuration[0]
bundle = current_app_configuration['bundle']
new_app_configuration = get_editable_bot_configuration(current_app_configuration)
except Exception:
bundle = configure_bot_instance_id
bundle = bundle.replace('/', '')
configure_bot_instance_id = _get_instance_id_from_bundle_id(server, user_key, bundle, location_id=location_id)
if configure_bot_instance_id is None:
sys.stderr.write(Color.RED + 'This bot instance is not in your personal account.\n\n' + Color.END)
return 1
print('Found bot instance ' + Color.BOLD + str(configure_bot_instance_id) + Color.END + ' matching the bundle ID you provided')
full_configuration = _botstore_mybots(server, user_key, configure_bot_instance_id, location_id=location_id, organization_id=organization_id)
current_app_configuration = full_configuration[0]
new_app_configuration = get_editable_bot_configuration(current_app_configuration)
else:
general_app_info = _botstore_botinfo(server, user_key, bundle)
devices = _get_devices_from_location(server, user_key, location_id)
is_organizational_app = False
if 'organizational' in general_app_info:
is_organizational_app = general_app_info['organizational'] == 1
print(Color.BOLD + '\n\nNICKNAME' + Color.END)
try:
nickname = current_app_configuration['nickname']
except:
nickname = None
else:
if not nickname:
try:
nickname = current_app_configuration['name']
except:
nickname = None
print("The bot's current nickname in your account is '" + Color.BOLD + str(nickname) + Color.END + "'.")
new_nickname = raw_input(Color.GREEN + 'Change the nickname, or press enter to keep the current nickname: ' + Color.END)
if new_nickname:
nickname = new_nickname
new_app_configuration['app']['nickname'] = nickname
new_timezone = str(get_localzone())
try:
original_timezone = new_app_configuration['app']['timezone']
except:
original_timezone = None
else:
if original_timezone:
print(Color.BOLD + '\n\nTIMEZONE' + Color.END)
print("The bot's current timezone is '" + Color.BOLD + original_timezone + Color.END + "'.")
change_timezone = raw_input(Color.GREEN + "Update it to '" + Color.BOLD + new_timezone + Color.END + "'? (y/n): " + Color.END)
if change_timezone:
if change_timezone.lower() != 'y':
new_timezone = original_timezone
new_app_configuration['app']['timezone'] = new_timezone
print(Color.BOLD + '\n\nMODE AND FILE PERMISSIONS' + Color.END)
try:
access_block = general_app_info['access']
except:
access_block = None
new_access_block = []
if access_block:
for access in access_block:
if access['category'] == 1:
for location in user_info['locations']:
print("At your '" + Color.BOLD + location['name'] + Color.END + "' location:")
print('This bot wants access to your modes')
for r in access['reason']:
print('\t' + r + ': ' + access['reason'][r])
ok = raw_input(Color.GREEN + "\tEnter to accept, 'n' to opt-out: " + Color.END)
print('')
configured_access_block = {}
if ok and ok.lower() != 'y':
pass
else:
configured_access_block['category'] = access['category']
configured_access_block['locationId'] = location['id']
configured_access_block['trigger'] = access['trigger']
configured_access_block['read'] = access['read']
configured_access_block['control'] = access['control']
new_access_block.append(configured_access_block)
elif access['category'] == 2:
print('This bot wants to access your ' + Color.BOLD + 'Media Files' + Color.END + '.')
for r in access['reason']:
print('\t' + r + ': ' + access['reason'][r])
ok = raw_input(Color.GREEN + "\tEnter to accept, 'n' to opt-out: " + Color.END)
print('')
configured_access_block = {}
if ok and ok.lower() != 'y':
pass
else:
configured_access_block['category'] = access['category']
configured_access_block['trigger'] = access['trigger']
configured_access_block['read'] = access['read']
configured_access_block['control'] = access['control']
new_access_block.append(configured_access_block)
continue
if access['category'] == 3:
print('This bot wants to access your ' + Color.BOLD + 'Professional Monitoring Services' + Color.END + '.')
for r in access['reason']:
print('\t' + r + ': ' + access['reason'][r])
ok = raw_input(Color.GREEN + "\tEnter to accept, 'n' to opt-out: " + Color.END)
print('')
configured_access_block = {}
if ok and ok.lower() != 'y':
pass
else:
configured_access_block['category'] = access['category']
configured_access_block['trigger'] = access['trigger']
configured_access_block['read'] = access['read']
configured_access_block['control'] = access['control']
new_access_block.append(configured_access_block)
elif access['category'] == 5:
print('This bot wants to access your ' + Color.BOLD + 'Challenges' + Color.END + '.')
for r in access['reason']:
print('\t' + r + ': ' + access['reason'][r])
ok = raw_input(Color.GREEN + "\tEnter to accept, 'n' to opt-out: " + Color.END)
print('')
configured_access_block = {}
if ok and ok.lower() != 'y':
pass
else:
configured_access_block['category'] = access['category']
configured_access_block['trigger'] = access['trigger']
configured_access_block['read'] = access['read']
configured_access_block['control'] = access['control']
new_access_block.append(configured_access_block)
elif access['category'] == 6:
print('This bot wants to access your ' + Color.BOLD + 'Rules' + Color.END + '.')
for r in access['reason']:
print('\t' + r + ': ' + access['reason'][r])
ok = raw_input(Color.GREEN + "\tEnter to accept, 'n' to opt-out: " + Color.END)
print('')
configured_access_block = {}
if ok and ok.lower() != 'y':
pass
else:
configured_access_block['category'] = access['category']
configured_access_block['trigger'] = access['trigger']
configured_access_block['read'] = access['read']
configured_access_block['control'] = access['control']
new_access_block.append(configured_access_block)
continue
try:
devices_block = general_app_info['deviceTypes']
except:
devices_block = None
if devices_block and not is_organizational_app:
print(Color.BOLD + '\n\nDEVICE PERMISSIONS' + Color.END)
for device_block in devices_block:
for focused_device in devices:
if focused_device['type'] == device_block['id']:
print("This bot wants to access your '" + Color.BOLD + focused_device['desc'].encode('utf-8') + Color.END + "'.")
for r in device_block['reason']:
print('\t' + r + ': ' + device_block['reason'][r])
ok = raw_input(Color.GREEN + "\tEnter to accept, 'n' to opt-out: " + Color.END)
print('')
configured_access_block = {}
configured_access_block['category'] = 4
configured_access_block['deviceId'] = focused_device['id']
if ok and ok.lower() != 'y':
configured_access_block['excluded'] = True
else:
configured_access_block['excluded'] = False
new_access_block.append(configured_access_block)
try:
communications_block = general_app_info['communications']
except:
communications_block = None
new_communications_block = []
if communications_block:
print(Color.BOLD + '\n\nCOMMUNICATION PERMISSIONS' + Color.END)
for comm in communications_block:
destinations = []
if comm['email']:
destinations.append('email')
if comm['msg']:
destinations.append('in-bot messages')
if comm['push']:
destinations.append('push notifications')
if comm['sms']:
destinations.append('sms')
phrase = ''
i = 0
for m in destinations:
i = i + 1
if len(destinations) > 1:
if i == len(destinations):
phrase = phrase + 'and ' + m
if i < len(destinations):
phrase = phrase + m + ', '
else:
phrase = m
if comm['category'] == 0:
print(Color.GREEN + 'This bot wants to send' + Color.BOLD + ' you ' + Color.END + Color.GREEN + phrase + ". Enter to accept, 'n' to opt-out: " + Color.END)
ok = raw_input('> ')
elif comm['category'] == 1:
print(Color.GREEN + 'This bot wants to send' + Color.BOLD + ' your friends ' + Color.END + Color.GREEN + phrase + ". Enter to accept, 'n' to opt-out: " + Color.END)
ok = raw_input('> ')
elif comm['category'] == 2:
print(Color.GREEN + 'This bot wants to send' + Color.BOLD + ' your family ' + Color.END + Color.GREEN + phrase + ". Enter to accept, 'n' to opt-out: " + Color.END)
ok = raw_input('> ')
elif comm['category'] == 3:
print(Color.GREEN + 'This bot wants to send' + Color.BOLD + ' your community group ' + Color.END + Color.GREEN + phrase + ". Enter to accept, 'n' to opt-out: " + Color.END)
ok = raw_input('> ')
elif comm['category'] == 4:
print(Color.GREEN + 'This bot wants to send' + Color.BOLD + ' your admins ' + Color.END + Color.GREEN + phrase + ". Enter to accept, 'n' to opt-out: " + Color.END)
ok = raw_input('> ')
if ok:
if ok.lower() != 'y':
continue
configured_comms_block = {}
configured_comms_block['category'] = comm['category']
configured_comms_block['email'] = comm['email']
configured_comms_block['push'] = comm['push']
configured_comms_block['sms'] = comm['sms']
configured_comms_block['msg'] = comm['msg']
new_communications_block.append(configured_comms_block)
new_app_configuration['app']['access'] = new_access_block
new_app_configuration['app']['communications'] = new_communications_block
new_app_configuration['app']['status'] = STATUS_BOT_ACTIVE
status = STATUS_BOT_ACTIVE
result = _botstore_configure(server, user_key, configure_bot_instance_id, new_app_configuration, status, location_id)
if result:
print(the_bot() + 'Configured!')
return 0
print('Something went wrong during configuration.')
return 1
if permissions_bot_instance_id:
if user_key is None:
user_key = _login(server, username, password)
bundle = ''
try:
full_configuration = _botstore_mybots(server, user_key, permissions_bot_instance_id, location_id=location_id, organization_id=organization_id)
current_app_configuration = full_configuration[0]
bundle = current_app_configuration['bundle']
new_app_configuration = get_editable_bot_configuration(current_app_configuration)
except Exception:
bundle = permissions_bot_instance_id
bundle = bundle.replace('/', '')
permissions_bot_instance_id = _get_instance_id_from_bundle_id(server, user_key, bundle, location_id=location_id)
if permissions_bot_instance_id is None:
sys.stderr.write(Color.RED + 'This bot instance is not in your personal account.\n\n' + Color.END)
return 1
print('Found bot instance ' + Color.BOLD + str(permissions_bot_instance_id) + Color.END + ' matching the bundle ID you provided')
full_configuration = _botstore_mybots(server, user_key, permissions_bot_instance_id, location_id=location_id, organization_id=organization_id)
current_app_configuration = full_configuration[0]
new_app_configuration = get_editable_bot_configuration(current_app_configuration)
devices = _get_devices_from_location(server, user_key, location_id)
print(Color.BOLD + '\nLOCATIONS' + Color.END)
found = False
for access in current_app_configuration['access']:
if access['category'] == 1:
found = True
permissions = ''
if access['read']:
permissions += Color.GREEN + 'r' + Color.END
else:
permissions += Color.RED + '-' + Color.END
if access['control']:
permissions += Color.GREEN + 'w' + Color.END
else:
permissions += Color.RED + '-' + Color.END
if access['trigger']:
permissions += Color.GREEN + 'x' + Color.END
else:
permissions += Color.RED + '-' + Color.END
print(permissions + ' Location ' + str(access['locationId']))
if not found:
print(Color.RED + '---' + Color.END + ' This bot cannot access any of your Locations or modes' + Color.END)
print(Color.BOLD + '\nPROFESSIONAL MONITORING' + Color.END)
found = False
for access in current_app_configuration['access']:
if access['category'] == 3:
found = True
permissions = ''
if access['read']:
permissions += Color.GREEN + 'r' + Color.END
else:
permissions += Color.RED + '-' + Color.END
if access['control']:
permissions += Color.GREEN + 'w' + Color.END
else:
permissions += Color.RED + '-' + Color.END
if access['trigger']:
permissions += Color.GREEN + 'x' + Color.END
else:
permissions += Color.RED + '-' + Color.END
print(permissions + ' Professional Monitoring Services')
if not found:
print(Color.RED + '---' + Color.END + ' This bot cannot access professional monitoring services.')
print(Color.BOLD + '\nDEVICES' + Color.END)
found = False
for access in current_app_configuration['access']:
if access['category'] == 4:
for device in devices:
if 'deviceId' in access:
if device['id'] == access['deviceId']:
found = True
permissions = ''
if access['read']:
permissions += Color.GREEN + 'r' + Color.END
else:
permissions += Color.RED + '-' + Color.END
if access['control']:
permissions += Color.GREEN + 'w' + Color.END
else:
permissions += Color.RED + '-' + Color.END
if access['trigger']:
permissions += Color.GREEN + 'x' + Color.END
else:
permissions += Color.RED + '-' + Color.END
print((permissions + ' [' + access['deviceId'] + '] ' + device['desc']).encode('utf-8'))
if not found:
print(Color.RED + '---' + Color.END + ' This bot cannot access any of your devices' + Color.END)
print('\n' + the_bot() + 'Done!')
return 0
if update:
print("If asked, please provide your computer's password to install an update")
subprocess.call('curl -s https://raw.githubusercontent.com/peoplepower/botlab/master/installer.sh | sudo /bin/bash', shell=True)
return 0
if publish:
if make_it_so is None and approve is None:
are_you_sure = raw_input('Are you sure you want to submit this bot for review to make it public? (y/n): ')
if are_you_sure.lower() != 'y':
print(the_bot() + 'Ok, aborting.')
return 0
if user_key is None:
if username is not None and password is not None:
user_key = _login(server, username, password)
elif admin_username is None and password is None:
user_key = _login(server, username, password)
bundle = publish.replace('/', '')
if user_key is not None:
_update_version_status(server, user_key, bundle, 2, ignore_errors=True)
try:
if make_it_so is not None or approve is not None:
if admin_username is None:
admin_username = username
if admin_password is None:
admin_password = password
admin_key = _login(server, admin_username, admin_password, admin=True)
print(the_bot() + 'Submitted for review.')
_update_version_status(server, admin_key, bundle, 3)
print(the_bot() + 'Under review.')
_update_version_status(server, admin_key, bundle, 4)
print(the_bot() + 'Published. Done!\n')
return 0
except BotError as e:
sys.stderr.write('BotEngine Error: ' + e.msg)
sys.stderr.write('\n\n')
return 2
print(the_bot() + 'Awaiting review! You can always --reject this version if you need to make updates.')
return 0
if reject:
if user_key is None:
user_key = _login(server, username, password)
bundle = reject.replace('/', '')
try:
_update_version_status(server, user_key, bundle, 6)
except BotError as e:
sys.stderr.write('BotEngine Error: ' + e.msg)
sys.stderr.write('\n\n')
return 2
print(the_bot() + 'Developer rejected!')
return 0
if add_organization:
if user_key is None:
user_key = _login(server, username, password)
if organization_id is None:
sys.stderr.write('Supply the ID of the organization with --organization\n\n')
return 2
try:
_allow_organization_to_purchase_bot(server, user_key, add_organization, organization_id, organization_development_mode)
except BotError as e:
sys.stderr.write('BotEngine Error: ' + e.msg)
sys.stderr.write('\n\n')
return 2
print(the_bot() + 'Done!')
return 0
if remove_organization:
if user_key is None:
user_key = _login(server, username, password)
if organization_id is None:
sys.stderr.write('Supply the ID of the organization with --organization\n\n')
return 2
try:
_prevent_organization_from_purchasing_bot(server, user_key, add_organization, organization_id)
except BotError as e:
sys.stderr.write('BotEngine Error: ' + e.msg)
sys.stderr.write('\n\n')
return 2
print(the_bot() + 'Done!')
return 0
if download_device_id or download_device_type or download_devices:
if user_key is None:
user_key = _login(server, username, password)
daysAgo = raw_input('How many days ago to start collecting data: ')
try:
initialization_days = int(daysAgo)
except:
initialization_days = 1
else:
start_date = datetime.date.today() + dateutil.relativedelta.relativedelta(days=-initialization_days)
if download_device_id:
device_name = 'NONAME'
device_type = 'NOTYPE'
all_devices = _get_devices_from_location(server, user_key, location_id)
for device in all_devices:
if device['id'] == download_device_id:
device_name = str(device['desc'].encode('utf-8'))
device_type = str(device['type'])
break
_downloaded_data_to_csv(server, user_key, start_date, download_device_id, device_type, device_name, location_id=location_id)
print(the_bot() + 'Done!')
return 0
if download_device_type:
if organization_id:
devices = _get_devices_from_organization(server, user_key, organization_id, download_device_type)
for device in devices['devices']:
print('Downloading ' + str(device['id'].encode('utf-8')) + " '" + str(device['desc'].encode('utf-8')) + "' from user " + str(device['user']['id']) + ' ...')
_downloaded_data_to_csv(server, user_key, start_date, device['id'].encode('utf-8'), download_device_type, str(device['desc'].encode('utf-8')), location_id=location_id, user_id=device['user']['id'])
else:
devices = _get_devices_from_location(server, user_key, location_id)
for device in devices:
if int(device['type']) == int(download_device_type):
print('Downloading ' + str(device['id'].encode('utf-8')) + " - '" + str(device['desc'].encode('utf-8')) + "' ...")
_downloaded_data_to_csv(server, user_key, start_date, device['id'].encode('utf-8'), location_id=location_id, device_type=download_device_type, device_name=str(device['desc'].encode('utf-8')))
print(the_bot() + ' Done!')
return 0
if download_devices:
print('Downloading all devices')
device_name = 'NONAME'
device_type = 'NOTYPE'
all_devices = _get_devices_from_location(server, user_key, location_id)
for device in all_devices:
device_name = str(device['desc'].encode('utf-8'))
device_type = str(device['type'])
print('Downloading ' + device_name + ' ' + device_type)
_downloaded_data_to_csv(server, user_key, start_date, device['id'].encode('utf-8'), device_type, device_name, location_id=location_id)
print(the_bot() + 'Done!')
return 0
if record:
if user_key is None:
user_key = _login(server, username, password)
daysAgo = raw_input('How many days ago to record: ')
try:
initialization_days = int(daysAgo)
except:
initialization_days = 1
destination = raw_input('What directory should we save this into: ')
if not os.path.exists(destination):
os.makedirs(destination)
if organization_id is not None:
org_users = _get_organization_users(server, user_key, organization_id)
if len(org_users) == 0:
print('This organization has no users.')
exit(1)
users = []
print(('Capturing information on {} users...').format(len(org_users)))
iteration = 0
for u in org_users:
iteration += 1
amount_done = float(iteration) / float(len(org_users))
sys.stdout.write(('\rProgress: [{0:50s}] {1:.1f}%').format('#' * int(amount_done * 50), amount_done * 100))
users += [_get_user_info(server, user_key, u['id'])]
else:
users = [
_get_user_info(server, user_key)]
for user_info in users:
location_id = user_info['locations'][0]['id']
user_id = user_info['user']['id']
destination_directory = destination + os.sep + str(user_id)
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
filenames = []
start_date = datetime.date.today() + dateutil.relativedelta.relativedelta(days=-initialization_days)
filenames.append(_download_modes_history_to_csv(server, user_key, location_id, start_date, destination_directory=destination_directory))
if organization_id is not None:
devices = _get_devices_from_organization(server, user_key, organization_id, user_id=user_id)
else:
devices = _get_devices_from_location(server, user_key, location_id)
for device in devices:
print('Capturing ' + str(device['id'].encode('utf-8')) + " - '" + str(device['desc'].encode('utf-8')) + "' ...")
filenames += _downloaded_data_to_csv(server, user_key, start_date, device['id'].encode('utf-8'), user_id=user_id, location_id=location_id, device_type=device['type'], device_name=str(device['desc'].encode('utf-8')), destination_directory=destination_directory)
open_files = {}
headers = {}
for filename in filenames:
headers[filename] = {}
open_files[filename] = open(filename)
headers[filename]['headers'] = open_files[filename].readline().replace('\n', '').split(',')
for header in headers[filename]['headers']:
if header != '':
headers[filename][header] = None
values = open_files[filename].readline().replace('\n', '').split(',')
for i in range(0, len(values)):
if len(values) != len(headers[filename]['headers']):
print('# values != # headers for filename ' + filename)
print('values: ' + str(values))
print('headers: ' + str(headers[filename]['headers']))
headers[filename][headers[filename]['headers'][i]] = values[i]
output_filename = destination_directory + os.sep + 'recording_user_' + str(user_info['user']['id']) + '-' + str(initialization_days) + '_days.json'
print('Writing ' + output_filename + '...')
with open(output_filename, 'w') as (out):
out.write('{\n')
out.write('"user_info":' + json.dumps(user_info) + ',\n')
out.write('"data":[\n')
first_entry = True
while True:
oldest_timestamp_ms = None
oldest_filename = None
for filename in filenames:
if 'timestamp_ms' in headers[filename]:
if headers[filename]['timestamp_ms'] is not None:
if oldest_timestamp_ms is None:
oldest_timestamp_ms = int(headers[filename]['timestamp_ms'])
oldest_filename = filename
elif int(headers[filename]['timestamp_ms']) < oldest_timestamp_ms:
oldest_timestamp_ms = int(headers[filename]['timestamp_ms'])
oldest_filename = filename
if oldest_filename is None:
break
output = headers[oldest_filename].copy()
del output['headers']
if '' in output:
del output['']
if first_entry:
first_entry = False
else:
out.write(',\n')
out.write(str(json.dumps(output)))
values = open_files[oldest_filename].readline().replace('\n', '').split(',')
headers[oldest_filename]['timestamp_ms'] = None
for i in range(0, len(values)):
headers[oldest_filename][headers[oldest_filename]['headers'][i]] = values[i]
out.write('\n]}\n')
for file in open_files:
open_files[file].close()
print(the_bot() + ' Done!')
return 0
if playback:
try:
os.remove('playback.txt')
except:
pass
recording = json.load(open(playback))
print('Loaded ' + str(len(recording['data'])) + ' records from ' + playback)
raw_access_content = {}
user_info = recording['user_info']
playback_user_info = user_info
location_id = user_info['locations'][0]['id']
location_timezone = user_info['locations'][0]['timezone']
location_name = user_info['locations'][0]['name']
location_zip = None
location_lat = None
location_long = None
if 'zip' in user_info['locations'][0]:
location_zip = user_info['locations'][0]['zip']
if 'latitude' in user_info['locations'][0]:
location_lat = user_info['locations'][0]['latitude']
if 'longitude' in user_info['locations'][0]:
location_long = user_info['locations'][0]['longitude']
base_path = os.path.join(os.getcwd(), '.' + botname)
_merge_redirects(os.path.join(os.getcwd(), botname), base_path)
sys.path.insert(0, base_path)
bot = importlib.import_module('bot')
runtime_text = ''
with open(os.path.join(base_path, 'runtime.json')) as (f):
for line in f:
line = line.strip()
if not line.startswith('#'):
runtime_text += line
runtime = json.loads(runtime_text)['version']
if runtime['trigger'] & 2 != 0:
raw_access_content['location'] = {'category': 1,
'control': True,
'location': {'event': 'HOME',
'latitude': location_lat,
'locationId': location_id,
'longitude': location_long,
'name': location_name,
'timezone': location_timezone,
'zip': location_zip},
'read': True,
'trigger': False}
device_type_triggers = []
device_id_params = {}
for device in runtime['deviceTypes']:
device_type_triggers.append(device['id'])
botengine = BotEngine({'apiKey': None, 'apiHosts': []})
botengine._download_binary_variable = playback_download_binary_variable
botengine.flush_binary_variables = playback_flush_binary_variables
botengine.flush_commands = playback_flush_commands
botengine.flush_questions = playback_flush_questions
botengine.flush_rules = playback_flush_rules
botengine.flush_tags = playback_flush_tags
botengine.delete_all_rules = playback_delete_all_rules
botengine.get_user_info = playback_get_user_info
botengine._execute_again_at_timestamp = playback_execute_again_at_timestamp
botengine.notify = playback_notify
botengine.set_mode = playback_set_mode
botengine.cancel_timers = playback_cancel_timers
if 'run' in dir(bot):
for d in recording['data']:
inputs = {}
inputs['access'] = []
trigger = int(d['trigger'])
timestamp = int(d['timestamp_ms'])
if 'location' in raw_access_content:
if 'prevEvent' in raw_access_content['location']:
del raw_access_content['location']['prevEvent']
for access_id in raw_access_content:
raw_access_content[access_id]['trigger'] = False
if trigger == 2:
if 'location' in raw_access_content:
raw_access_content['location']['location']['prevEvent'] = raw_access_content['location']['location']['event']
raw_access_content['location']['location']['event'] = d['event']
raw_access_content['location']['trigger'] = True
else:
continue
elif trigger == 8:
if int(d['device_type']) in device_type_triggers:
raw_access_content[d['device_id']] = {'category': 4, 'control': False,
'device': {'connected': True,
'description': d['description'],
'deviceId': d['device_id'],
'deviceType': int(d['device_type']),
'locationId': int(location_id),
'measureDate': timestamp,
'startDate': 0,
'updateDate': timestamp},
'read': True,
'trigger': True}
if d['device_id'] not in device_id_params:
device_id_params[d['device_id']] = {}
inputs['measures'] = []
for parameter in d:
measure = {'deviceId': d['device_id'],
'name': parameter}
if parameter in device_id_params[d['device_id']]:
measure['prevTime'] = device_id_params[d['device_id']][parameter]['time']
measure['prevValue'] = device_id_params[d['device_id']][parameter]['value']
measure['updated'] = d[parameter] != measure['prevValue']
else:
measure['updated'] = True
measure['time'] = timestamp
measure['value'] = d[parameter]
device_id_params[d['device_id']][parameter] = measure
inputs['measures'].append(measure)
else:
continue
for access_id in raw_access_content:
inputs['access'].append(raw_access_content[access_id])
inputs['time'] = timestamp
inputs['trigger'] = trigger
inputs['userId'] = user_info['user']['id']
while playback_timer_timestamp is not None and playback_timer_timestamp < timestamp:
timer_inputs = {}
timer_inputs['time'] = playback_timer_timestamp
timer_inputs['trigger'] = 64
timer_inputs['userId'] = user_info['user']['id']
timer_inputs['access'] = []
for access_id in raw_access_content:
timer_inputs['access'].append(raw_access_content[access_id])
playback_current_timestamp = playback_timer_timestamp
playback_timer_timestamp = None
_run(bot, {'inputs': [timer_inputs]}, _bot_logger, botengine_override=botengine)
playback_variables = botengine.variables
playback_current_timestamp = timestamp
_run(bot, {'inputs': [inputs]}, _bot_logger, botengine_override=botengine)
playback_variables = botengine.variables
print(the_bot() + ' Done!')
return 0
if generate:
botname = generate.replace('/', '')
base_path = os.path.join(os.getcwd(), '.' + botname)
_merge_redirects(os.path.join(os.getcwd(), botname), base_path)
print(('Bot generated in directory: {}').format(os.path.join(os.getcwd(), base_path)))
print(the_bot() + ' Done!')
return 0
if not botname and not instance:
sys.stderr.write('No bot selected to run, use --help\n')
if _servermode:
_bot_logger.error('No bot selected to run\n\n')
return 1
if botname or instance:
if botname:
botname = botname.replace('/', '')
if botname is not None and organization_id is not None and instance is None:
sys.stderr.write(Color.RED + 'Missing the bot instance ID.\n' + Color.END)
sys.stderr.write(Color.RED + 'To run a bot under an organization: -r <bot_bundle_id> -o <organization_id> -i <bot_instance_id>' + Color.END)
sys.stderr.write('\n\n')
return 1
if not botengine_key:
if user_key is None:
user_key = _login(server, username, password)
try:
if not instance:
instance = _get_instance_id_from_bundle_id(server, user_key, botname, challenge_id, location_id=location_id)
if instance is None:
sys.stderr.write(Color.RED + 'You must first purchase and configure a bot in your account before you can run it.' + Color.END)
sys.stderr.write('\n\n')
return 1
except BotError as e:
sys.stderr.write('BotEngine Error: ' + e.msg)
sys.stderr.write('\n\n')
return 2
base_path = os.path.join(os.getcwd(), '.' + botname)
_merge_redirects(os.path.join(os.getcwd(), botname), base_path)
if os.path.exists('botengine'):
with open('botengine', 'r') as (f):
first_line = f.readline()
if first_line.strip() == '#!/usr/bin/env python':
if os.path.exists('botengine_bytecode'):
os.remove('botengine_bytecode')
py_compile.compile('botengine', 'botengine_bytecode')
if os.path.exists('.' + os.sep + 'botengine_bytecode'):
shutil.copyfile('.' + os.sep + 'botengine_bytecode', base_path + os.sep + 'botengine.pyc')
if os.path.exists('.' + os.sep + 'lambda.py'):
shutil.copyfile('.' + os.sep + 'lambda.py', base_path + os.sep + 'lambda.py')
sys.path.insert(0, base_path)
bot = importlib.import_module('bot')
if 'run' in dir(bot):
if forever:
version_file = os.path.join(base_path, 'runtime.json')
if not os.path.exists(version_file):
sys.stderr.write(Color.RED + version_file + ' does not exist' + Color.END)
sys.stderr.write(Color.RED + "You must run the BotEngine a level below your bot's directory" + Color.END)
sys.stderr.write('\n\n')
return 1
if not device_server:
device_server = _get_ensemble_server_url(server)
if 'http' not in device_server:
device_server = 'https://' + device_server
device_server = device_server.replace('sbox2', 'sbox1').replace('sboxall', 'sbox1')
print('Device Server: ' + device_server)
print('Running forever, until you press CTRL+Z to quit\n')
_run_locally_forever(server, device_server, user_key, bot, instance)
else:
_run(bot, inputs, _bot_logger)
else:
sys.stderr.write("This bot does not contain a 'run' method\n\n")
_bot_logger.error("This bot does not contain a 'run' method")
return 1
return 0
except KeyboardInterrupt:
return 0
except SystemExit:
return 0
except BotError as e:
_bot_logger.error('BotEngine Error: ' + e.msg)
if _servermode:
_bot_logger.error('BotEngine Error: code=%s, msg=%s' % (e.code, e.msg))
return 2
except:
e = sys.exc_info()[0]
s = traceback.format_exc()
sys.stderr.write(s + '\n\n')
_bot_logger.error(s)
if _servermode:
_bot_logger.error(s)
return 3
return | 076cf570126b0da1fa7ac2db42ece4c34d81197e | 3,627,605 |
import os
from datetime import datetime
def last_modification_datetime(path):
"""
Returns last modification datetime for a given path
"""
mtime=os.path.getmtime(path)
return datetime.datetime.fromtimestamp(mtime) | 94a86b973b857403c4cecaea492ebc21f4471ab0 | 3,627,606 |
def create_text_blocks(font: pygame.freetype.Font, padding: int) -> dict:
"""Initializes all the text blocks for later use.
:param font: The font that will be used to generate the texts.
:param padding: The size of the padding that will be created.
:return: A dictionary containing :py:class:`TextBlock` objects.
"""
text_blocks = {
"title": TextBlock("Dane atomu czerwonego", 0, 0, pygame.Color(0, 0, 0), font, padding),
"bounces": TextBlock("Ilość odbić", 3, 0, pygame.Color(0, 0, 0), font, padding),
"average": TextBlock("Średnia droga swobodna", 3, 2, pygame.Color(0, 0, 0), font, padding)
}
return text_blocks | d9943b074716f02125d663f86462d8ace2e7a539 | 3,627,607 |
import json
import os
def root_data(path):
"""Root path of data reference. 数据资源引用路径。
1.出于安全原因,该函数不允许客户端直接读取任何*.py代码文件;
2.所有其他类型的文件将被映射到项目的 data 目录中。
"""
state = {
'success' : 0,
'message' : "不允许客户端直接读取任何*.py代码文件"
}
if path.rfind('py') > (len(path)-4):
return json.dumps(state)
data_path = os.environ.get("ApiData") # 该API服务的数据源
return send_from_directory(data_path, path) | c10da33692b27e3e4611edadab3b63df45c27aa4 | 3,627,608 |
def testable_files(files):
"""
Given a list of files, return the files that have an extension listed in TESTABLE_FILE_EXTENSIONS and are
not blacklisted by NON_TESTABLE_FILES (metrics.yaml, auto_conf.yaml)
"""
return [f for f in files if f.endswith(TESTABLE_FILE_EXTENSIONS) and not f.endswith(NON_TESTABLE_FILES)] | 21069e9f7c955d7936bafd0261c2f9210bb2ec1d | 3,627,609 |
def longest_cont_matrix(pool):
"""Takes in a list of sequences (pool) and returns
a matrix with longest stretch of identity"""
# Make sure pool variable is a list
if type(pool) != list:
raise ValueError('Pool is not a list')
# Array dimension
dimLen = len(pool)
# Generate array of zeros
longestMatrix = np.zeros((dimLen,dimLen))
# Fill longestMatrix with identity values - symmetrical matrix
for i,ele_1 in enumerate(pool):
for j,ele_2 in enumerate(pool):
# Only fill in upper triangle
if j >= i:
break
# Calculate distance
idents = longest_cont(ele_1,ele_2)
# Fill in array
longestMatrix[i,j] = idents
# Same for symmetrical element
longestMatrix[j,i] = idents
return longestMatrix | 3dc2788df9db23ee55d5c9dcede0a68084c85ac8 | 3,627,610 |
def as_int(val):
"""
Tries to convert a string to an int.
Returns None if string is empty
"""
try:
return(int(val))
except ValueError:
return(None) | 87586fbc47c37354e34d10116b86d013a98d20b9 | 3,627,611 |
def stream(xp,yp):
"""
Calculates the stream function in physical space.
Clockwise rotation. One full rotation corresponds to 1 (second).
"""
streamValue = np.pi*(xp**2 + yp**2)
return streamValue | bc1220a84a1520df70b90c19eb482f3c4feb97ca | 3,627,612 |
def _get_switch_by_ip(switch_ip, session=None, **kwargs):
"""Get switch by switch ip."""
switch_ip_int = long(netaddr.IPAddress(switch_ip))
return utils.get_db_object(
session, models.Switch,
ip_int=switch_ip_int, **kwargs
) | 913e41cf1d9152fc9353601812250a58bc8e50d3 | 3,627,613 |
def decodeFont(name):
"""
Parses a font string into a tkinter <code>Font</code> object.
This method accepts a font in either the <code>Font.decode</code>
used by Java or in the form of a CSS-based style string.
"""
font = parseJSFont(name)
if font is None:
font = parseJavaFont(name)
return font | f0f38b3c9cb3f6bef1f83f6ad2eafc2e1e72f7cc | 3,627,614 |
def get_value(obj, key, default=None):
"""
Returns dictionary item value by name for dictionary objects or property value by name for other types.
Also list of lists obj is supported.
:param obj: dict or object
:param key: dict item or property name
:param default: default value
:return:
"""
if isinstance(obj, dict):
return obj.get(key, default)
elif hasattr(obj, '__iter__'):
for item in obj:
if hasattr(obj, '__iter__') and len(item) > 1 and item[0] == key:
return item[1]
return default
else:
return getattr(obj, key, default) | 5885fa5ae944fd6967c21b97cd7955a94ad0dce2 | 3,627,615 |
def validate_edges(g, max_kmph=200):
"""
Make sure all edges can possibly be traversed with reasonable speed
"""
good_edges, bad_edges = [], []
for u, v, data in g.edges(data=True):
dur, dist_km = data.get("duration"), data.get("distance")
# Distances < 1km are not precise enough
if None in [dur, dist_km] or dist_km < 1.0:
good_edges.append((u, v))
continue
# At least one minute
dur_hrs = max(abs(dur.total_seconds()), 60) / 60.0 / 60.0
# Consider bad edge if train speed would have to be faster than max_kmph to pass edge
# print("Required speed: %f (%f km in %f hrs)" % (dist_km/dur_hrs, dist_km, dur_hrs))
if dist_km / dur_hrs > max_kmph:
bad_edges.append((u, v))
else:
good_edges.append((u, v))
return good_edges, bad_edges | 3d0b176f1de7fd7e81a72c44b9d3775f6daaa17c | 3,627,616 |
from typing import Union
import pathlib
def uploadFile(localPath:pathlike, remotePath:Union[str, S3Path]) -> Outcome[str, Union[str, S3Path]]:
"""Assumes AWS credentials exist in the environment
Though the types involved are different, the signature for
downloadFile, uploadFile, and uploadData follows the same pattern:
source -> destination -> destination
"""
s3path = S3Path(remotePath)
try:
obj = boto3.resource("s3").Object(s3path.bucket, s3path.key)
if isCurrent(obj, pathlib.Path(localPath)):
return Success(remotePath)
else:
obj.upload_file(str(localPath))
return Success(remotePath)
except Exception as err:
return Failure(track("Unable to upload {localPath} to {remotePath}. Reason: {err}")) | f36dc2cd6b24b7e9c8d33a8666f325d3123b8862 | 3,627,617 |
def calculate_bayes_probability(df: np.ndarray = 0, clusters_means: np.ndarray = 0, clusters_variances: np.ndarray = 0,
clusters_weights: np.ndarray = 0, k: int = 0):
""" calculate the normalized Bayes probability of each point per all clusters
# Arguments
df: arrays of points
clusters_means: position of a cluster's center
clusters_variance: variance of the cluster
clusters_weights: weight of each cluster
k: amount of clusters
# Output
2-D matrix of normalized Bayes probabilities. Each row represents a point, and each column represents a cluster
"""
points_probability_matrix = np.zeros((150, k))
for idx, point in enumerate(df):
for cluster_idx, cluster_mean in enumerate(clusters_means):
# calculates initial not normalized probability:
points_probability_matrix[idx, cluster_idx] = clusters_weights[cluster_idx] * calculate_normal_distribution(point, cluster_mean, clusters_variances[cluster_idx])
# normalize probabilities
points_probability_matrix[idx, :] = clusters_weights * points_probability_matrix[idx, :] / (np.sum(points_probability_matrix[idx, :])+10**-10)
return points_probability_matrix | a6f43e7950f7b1413cbc5bd4e7d680ced7c72980 | 3,627,618 |
from typing import Tuple
import requests
def comanage_check_person_couid(person_id, couid) -> Tuple[int, bool]:
"""
Check if a given person is a member of couid. Return tuple of API status code
and True or False. Strings or integers accepted as parameters
"""
assert person_id is not None
assert couid is not None
params = {'coid': str(COID), 'copersonid': str(person_id)}
try:
response = requests.get(url=CO_REGISTRY_URL + 'co_person_roles.json',
params=params, auth=HTTPBasicAuth(COAPI_USER, COAPI_KEY))
except requests.exceptions.RequestException as e:
log.debug(f"COmanage request exception {e} encountered in co_person_roles.json, "
f"returning status 500")
return 500, False
if response.status_code == 204:
# we got nothing back, just say so
return 200, False
if response.status_code != 200:
return response.status_code, False
response_obj = response.json()
if response_obj.get('CoPersonRoles', None) is None:
log.debug(f"COmanage request returned no personal roles in co_person_roles.json")
return 500, False
for role in response_obj['CoPersonRoles']:
if role.get('CouId', None) is not None and role['CouId'] == str(couid):
return response.status_code, True
return response.status_code, False | 9f42a1161c12046ad2ac5e86ed63493b3e49e941 | 3,627,619 |
from operator import concat
def tensor_contract(*tensors, output_inds=None, get=None,
backend=None, **contract_opts):
"""Efficiently contract multiple tensors, combining their tags.
Parameters
----------
tensors : sequence of Tensor
The tensors to contract.
output_inds : sequence of str
If given, the desired order of output indices, else defaults to the
order they occur in the input indices.
get : {None, 'expression', 'path-info', 'opt_einsum'}, optional
What to return. If:
* ``None`` (the default) - return the resulting scalar or Tensor.
* ``'expression'`` - return the ``opt_einsum`` expression that
performs the contraction and operates on the raw arrays.
* ``'symbol-map'`` - return the dict mapping ``opt_einsum`` symbols
to tensor indices.
* ``'path-info'`` - return the full ``opt_einsum`` path object with
detailed information such as flop cost. The symbol-map is also
added to the ``quimb_symbol_map`` attribute.
backend : {'numpy', 'cupy', 'tensorflow', 'theano', 'dask', ...}, optional
Which backend to use to perform the contraction. Must be a valid
``opt_einsum`` backend with the relevant library installed.
contract_opts
Passed to ``opt_einsum.contract_expression`` or
``opt_einsum.contract_path``.
Returns
-------
scalar or Tensor
"""
check_opt('get', get, _VALID_CONTRACT_GET)
if backend is None:
backend = _CONTRACT_BACKEND
i_ix = tuple(t.inds for t in tensors) # input indices per tensor
total_ix = tuple(concat(i_ix)) # list of all input indices
all_ix = tuple(unique(total_ix))
if output_inds is None:
# sort output indices by input order for efficiency and consistency
o_ix = tuple(_gen_output_inds(total_ix))
else:
o_ix = output_inds
# possibly map indices into the range needed by opt-einsum
eq = _maybe_map_indices_to_alphabet(all_ix, i_ix, o_ix)
if get == 'symbol-map':
return {oe.get_symbol(i): ix for i, ix in enumerate(all_ix)}
if get == 'path-info':
ops = (t.shape for t in tensors)
path_info = get_contraction(eq, *ops, path=True, **contract_opts)
path_info.quimb_symbol_map = {
oe.get_symbol(i): ix for i, ix in enumerate(all_ix)
}
return path_info
if get == 'expression':
# account for possible constant tensors
cnst = contract_opts.get('constants', ())
ops = (t.data if i in cnst else t.shape for i, t in enumerate(tensors))
expression = get_contraction(eq, *ops, **contract_opts)
return expression
# perform the contraction
shapes = (t.shape for t in tensors)
expression = get_contraction(eq, *shapes, **contract_opts)
o_array = expression(*(t.data for t in tensors), backend=backend)
if not o_ix:
if isinstance(o_array, np.ndarray):
o_array = realify_scalar(o_array.item(0))
return o_array
# unison of all tags
o_tags = set_union(t.tags for t in tensors)
return Tensor(data=o_array, inds=o_ix, tags=o_tags) | 481ec9cce4551377c8dc6f9aebea7d945dac14fc | 3,627,620 |
def registry(db, term=None):
"""List the organizations available in the registry.
The function will return the list of organizations. If term
parameter is set, it will only return the information about
the organizations which match that term. When the given term does
not match with any organization from the registry a 'NotFounError'
exception will be raised.
:param db: database manager
:param term: term to match with organizations names
:returns: a list of organizations sorted by their name
:raises NotFoundError: raised when the given term is not found on
any organization from the registry
"""
orgs = []
with db.connect() as session:
if term:
orgs = session.query(Organization).\
filter(Organization.name.like('%' + term + '%')).\
order_by(Organization.name).all()
if not orgs:
raise NotFoundError(entity=term)
else:
orgs = session.query(Organization).\
order_by(Organization.name).all()
# Detach objects from the session
session.expunge_all()
return orgs | 5f09b2bdf2a44214be32cd4e8dbce6e3516fa66c | 3,627,621 |
def get_column_names(connection,schema_name, table_name):
"""
Returns a list of column names for a given table.
"""
cur = connection.cursor()
try:
cur.execute("SELECT column_name,data_type FROM information_schema.columns WHERE table_schema = '%s' AND table_name = '%s';" % (schema_name, table_name))
rows = cur.fetchall()
cur.close()
return rows
except (Exception, psycopg2.DatabaseError) as error:
print(error) | 70cdcffe27398ecc13c0e5454517050b6a722845 | 3,627,622 |
import pathlib
from typing import List
def _write_dataset_files(
root_path: pathlib.Path, namespace: str, datasets: List[str]
) -> str:
"""Write the repo content containing the datasets."""
repo_path = root_path / namespace
# Create all datasets
for ds_name in datasets:
ds_path = repo_path / ds_name / f'{ds_name}.py'
ds_path.parent.mkdir(parents=True) # Create the containing dir
ds_path.touch() # Create the file
# Additional noisy files should be ignored
(repo_path / '__init__.py').touch()
(repo_path / 'empty_dir').mkdir()
return str(repo_path) | 49baafff58a08802830208382180ce32d8aaf8c0 | 3,627,623 |
def get(params, query, offset, limit):
"""Get the data from BigQuery."""
sql = SQL.format(
table_id='%ss' % params['type'],
where_clause=query.get_where_clause(),
prefix=params['type'],
offset=offset,
limit=limit)
client = big_query.Client()
result = client.query(query=sql, offset=offset, limit=limit)
return result.rows, result.total_count | 8f41051360d51693547858b06f7603e0ea0bae81 | 3,627,624 |
def get_rose_username():
"""
Get the Rose username from Subversion's config file
"""
try:
config = SafeConfigParser()
config.read(svn_servers)
return config.get('metofficesharedrepos','username')
except Exception:
return None | 87e423d1417d3ce6d6714e7ae6744844c8cc7568 | 3,627,625 |
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse() | 0b7b965b7f28b6e4482be4a7e7f07aa34ddfef92 | 3,627,626 |
def findLargestGap(depth_og, min_dist, barrier_h=0, min_gap=0):
"""
Given depth image, find the largest gap that goes from the bottom of
the image to the top. Use min_dist as threshold below which objects are
shown to be too close. Return the position in the middle of the largest
gap.
"""
depth = depth_og > min_dist # true where gap exists
try:
if np.sum(depth[int(barrier_h*depth.shape[0]):]) == 0:
return None
except:
return None
npad = ((0, 0), (1, 1))
d_padded = np.pad(depth, pad_width=npad, mode='constant', constant_values=0)
indices = np.nonzero(np.diff(d_padded))
row_indices = indices[0][0::2] # row indices
data = __GapData(row_indices, indices[1][0::2], indices[1][1::2],
len(np.unique(row_indices)))
__addNextRow(0, 0, np.inf, data)
sf = data.gap
if sf is None:
return None
if sf[1] - sf[0] < min_gap:
return None
return (sf[0]+sf[1])/2. | 191f3f8ab9724d50d06386df399e827a58395464 | 3,627,627 |
import time
def _CalcProjectAlert(project):
"""Return a string to be shown as red text explaning the project state."""
project_alert = None
if project.read_only_reason:
project_alert = 'READ-ONLY: %s.' % project.read_only_reason
if project.moved_to:
project_alert = 'This project has moved to: %s.' % project.moved_to
elif project.delete_time:
delay_seconds = project.delete_time - time.time()
delay_days = delay_seconds // framework_constants.SECS_PER_DAY
if delay_days <= 0:
project_alert = 'Scheduled for deletion today.'
else:
days_word = 'day' if delay_days == 1 else 'days'
project_alert = (
'Scheduled for deletion in %d %s.' % (delay_days, days_word))
elif project.state == project_pb2.ProjectState.ARCHIVED:
project_alert = 'Project is archived: read-only by members only.'
return project_alert | 5d2288753654a0275b6d4c4f77123691be941433 | 3,627,628 |
def normalized_8UC3_image(image):
"""
:param image: two-dimensional image
:return: normalized to [0, 255] three-dimensional image
"""
assert len(image.shape) == 2, 'two-dimensional images are only supported'
image = normalized_8U_image(image)
return np.stack((image,) * 3, axis=-1) | 3a6f334a5264b235bbcbcebcc3e39168ff15d878 | 3,627,629 |
import argparse
import sys
def _parse_arguments() -> argparse.Namespace:
"""Plot argument parser.
"""
parser = argparse.ArgumentParser(description="Plot")
parser.add_argument(
"--inputs",
type=str,
required=True,
help=
"comma-separated list of input data filenames (e.g., --input input1,input2)\n"
+ "The data for multiple files is concatenated into a single graph.")
parser.add_argument("--output",
type=str,
required=True,
help="output plot filename (e.g., --output output)")
parser.add_argument("--plot_name",
type=str,
required=True,
help="plot name (e.g., --plot_name name)")
parser.add_argument("--print_available_benchmarks",
type=bool,
required=False,
help="print the existing list of benchmarks in the data")
parser.add_argument("--benchmarks_to_plot",
type=str,
required=False,
help="comma-separated names of benchmarks to plot",
default='all')
parser.add_argument("--sizes_to_plot",
type=str,
required=False,
help="semicolon-separated lost of problem sizes to plot "
"(e.g., --sizes_to_plot=\"m=32,n=48;m=90,n=32\")",
default='all')
parser.add_argument("--num_sizes_to_plot",
type=int,
required=False,
help="sample the given number of problem sizes to plot",
default=-1)
parser.add_argument("--metric_to_plot",
type=str,
required=True,
choices=["gflop_per_s_per_iter", "gbyte_per_s_per_iter"])
parser.add_argument("--group_by_strides_and_dilations",
type=bool,
required=False,
help="plot separate bars for strides and dilations")
###############################################################################
# Not used atm
###############################################################################
parser.add_argument("--peak_compute",
type=int,
nargs="?",
help="peak compute (e.g., --peak_compute 192)",
default=192)
parser.add_argument("--peak_bandwidth_hi",\
type=int,
nargs="?",
help="high peak bandwidth (e.g., --peak_bandwidth_hi 281)",
default=281)
parser.add_argument("--peak_bandwidth_lo",
type=int,
nargs="?",
help="low peak bandwidth (e.g., -peak_bandwidth_lo 281)",
default=281)
return parser.parse_args(sys.argv[1:]) | f69c69040d9003e32b27e3195218cad06c807e69 | 3,627,630 |
import sys
import click
def _read_plan_yaml(yaml_path: str) -> PlanSchema:
"""Read YAML, either from a path on disk or from stdin."""
try:
if yaml_path == "-":
data = sys.stdin.read()
result = PlanSchema.parse_raw(data)
else:
with open(yaml_path, "r", encoding="utf8") as fp:
result = PlanSchema.parse_raw(fp.read())
return result
except ValidationError as e:
raise click.ClickException("%s" % e) from e | fd939fa5a93c1c9a150a6dff3015794319245bb0 | 3,627,631 |
def remove_sex(beta, array_type='450k'):
"""Remove non-autosomal cpgs from beta matrix.
Parameters
----------
array_type
450k/850k array?
"""
beta = robjects.r("""function (beta,array.type){
featureset<-array.type
autosomal.sites <- meffil.get.autosomal.sites(featureset)
autosomal.sites <- intersect(autosomal.sites, rownames(norm.beta))
norm.beta <- norm.beta[autosomal.sites,]
return(beta)
}""")(beta,array_type)
return beta | 1ec585b2bd589d8cab5e3dc747f91604c4561be0 | 3,627,632 |
from datetime import datetime
def create_netcdf(valid):
"""Create and return the netcdf file"""
ncfn = "/mesonet/data/iemre/cfs_%s.nc" % (valid.strftime("%Y%m%d%H"), )
nc = ncopen(ncfn, 'w')
nc.title = "IEM Regridded CFS Member 1 Forecast %s" % (valid.year,)
nc.platform = "Grided Forecast"
nc.description = "IEM Regridded CFS on 0.125 degree grid"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = 'CF-1.0'
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.history = ("%s Generated"
) % (datetime.datetime.now().strftime("%d %B %Y"),)
nc.comment = "No comment at this time"
# Setup Dimensions
nc.createDimension('lat', iemre.NY)
nc.createDimension('lon', iemre.NX)
days = iemre.daily_offset(valid.replace(month=12, day=31)) + 1
nc.createDimension('time', int(days))
# Setup Coordinate Variables
lat = nc.createVariable('lat', np.float, ('lat',))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable('lon', np.float, ('lon',))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable('time', np.float, ('time',))
tm.units = "Days since %s-01-01 00:00:0.0" % (valid.year,)
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm[:] = np.arange(0, int(days))
high = nc.createVariable('high_tmpk', np.uint16, ('time', 'lat', 'lon'),
fill_value=65535)
high.units = "K"
high.scale_factor = 0.01
high.long_name = "2m Air Temperature Daily High"
high.standard_name = "2m Air Temperature"
high.coordinates = "lon lat"
low = nc.createVariable('low_tmpk', np.uint16, ('time', 'lat', 'lon'),
fill_value=65535)
low.units = "K"
low.scale_factor = 0.01
low.long_name = "2m Air Temperature Daily Low"
low.standard_name = "2m Air Temperature"
low.coordinates = "lon lat"
p01d = nc.createVariable('p01d', np.uint16, ('time', 'lat', 'lon'),
fill_value=65535)
p01d.units = 'mm'
p01d.scale_factor = 0.01
p01d.long_name = 'Precipitation'
p01d.standard_name = 'Precipitation'
p01d.coordinates = "lon lat"
p01d.description = "Precipitation accumulation for the day"
rsds = nc.createVariable('srad', np.uint16, ('time', 'lat', 'lon'),
fill_value=65535)
rsds.units = "W m-2"
rsds.scale_factor = 0.01
rsds.long_name = 'surface_downwelling_shortwave_flux_in_air'
rsds.standard_name = 'surface_downwelling_shortwave_flux_in_air'
rsds.coordinates = "lon lat"
rsds.description = "Global Shortwave Irradiance"
nc.close()
nc = ncopen(ncfn, 'a')
return nc | 80c1eddb0850d496500a1b9d7b1c512fc459abb7 | 3,627,633 |
def check_fortran_type(typestr, error=False):
"""Return <typestr> if a valid Fortran type, otherwise, None
if <error> is True, raise an Exception if <typestr> is not valid.
>>> check_fortran_type("real")
'real'
>>> check_fortran_type("integer")
'integer'
>>> check_fortran_type("InteGer")
'InteGer'
>>> check_fortran_type("character")
'character'
>>> check_fortran_type("double precision")
'double precision'
>>> check_fortran_type("double precision")
'double precision'
>>> check_fortran_type("doubleprecision")
'doubleprecision'
>>> check_fortran_type("complex")
'complex'
>>> check_fortran_type("char", error=True) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
CCPPError: 'char' is not a valid Fortran type
>>> check_fortran_type("int")
>>> check_fortran_type("char", error=False)
>>> check_fortran_type("type")
>>> check_fortran_type("type", error=True) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
CCPPError: 'char' is not a valid derived Fortran type
>>> check_fortran_type("type(hi mom)", error=True) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
CCPPError: 'type(hi mom)' is not a valid derived Fortran type
"""
dt = ""
match = check_fortran_intrinsic(typestr, error=False)
if match is None:
match = registered_fortran_ddt_name(typestr)
dt = " derived"
# End if
# DH* 20190913 - skip checking if a DDT is registered at this time
#if match is None:
# if error:
# raise CCPPError("'{}' is not a valid{} Fortran type".format(typestr, dt))
# else:
# typestr = None
# # End if
## End if
# *DH 20190913
return typestr | 70c63c78c4b33016bc9ee508eea5ddac197aa96d | 3,627,634 |
def pyro_service_process(auto_start=False, *args, **kwargs):
"""Start a pyro service in a separate process."""
# Set up nameserver process.
logger.debug(f'Setting up Pyro service process.')
service_process = Process(target=pyro_service, args=args, kwargs=kwargs)
if auto_start: # noqa
logger.info("Auto-starting pyro service")
service_process.start()
logger.success("Pyro service started, will block until finished...(Ctrl-c/Cmd-c to exit)")
service_process.join()
return service_process | 8a57201342c2556380fe61cac96121ed3905abb8 | 3,627,635 |
def topk_caculate(predict_p, true_labels, k=1, number_cat=None, epsilon=1e-19):
"""
topk准确率计算
Args:
predict_p: 预测的概率分布shape=(N,number_cat)
true_labels: 真实标签,shape=(N,)
k: top k
number_cat: number categories
eposion: e
Returns:
topk acc [top1,top2,...,topk]
cat acc [ [top1,top2,...,topk], [top1,top2,...,topk], [top1,top2,...,topk]]
"""
total = len(predict_p)
if not number_cat:
number_cat = max(np.unique(true_labels), np.max(true_labels))
real = np.array(true_labels)
arg_sort = predict_p.argsort(axis=-1)
total_acc = [] # 整体准确率
cat_acc = [] # 分类准确率
def acc_caculate(pred, real, prev_tp_sum, prev_cat_tp, real_cat_positive, number_cat, total):
all_eval = (pred == real)
cat_tp = [sum((real == i) & all_eval) for i in range(number_cat)]
cat_tp = [prev_cat_tp[i] + cat_tp[i] for i in range(number_cat)]
cat_precition = [(cat_tp[i] / (real_cat_positive[i] + epsilon)) for i in range(number_cat)]
tp_sum = sum(all_eval) + prev_tp_sum
topk = tp_sum / total
return topk, cat_precition, cat_tp, tp_sum
prev_tp_sum = 0
prev_cat_tp = [0 for i in range(number_cat)] # topk-1中的每个类别true positive总数
real_cat_positive = [sum(real == i) for i in range(number_cat)] # 每个类别的正样本总数
for i in range(k):
pred = arg_sort[:, -1 * (i + 1)]
topk, cat_precition, prev_cat_tp, prev_tp_sum = acc_caculate(pred, real, prev_tp_sum, prev_cat_tp,
real_cat_positive,
number_cat, total)
total_acc.append(topk)
cat_acc.append(cat_precition)
return total_acc, cat_acc | 0882b9b2fbace903f4d6afeda80d50400fc83b3f | 3,627,636 |
def mon_operator(xs):
"""xs = [unary_op, unary] OR primary"""
if not isinstance(xs, list):
return xs
return TokenOperator(xs[0], [xs[1]]) | 182f06edf958cbf4b30ae930097c1736e50fc9e5 | 3,627,637 |
from typing import List
def chunked_phrase_strict(m) -> List[BasePhraseChunk]:
"""A chunked phrase that must start with dictation.
This capture can be used to insert keywords that would otherwise be handled
as symbols or commands. For example, we could put this in a .talon file:
snake <chunked_phrase_strict>:
user.insert_complex(chunked_phrase_strict, "snake")
Now let's say we want to insert the word "dot":
"snake this is dot a test" -> "this_is.a_test"
"snake dot a test" -> "dot_a_test"
"snake dot dot a test" -> "dot.a_test"
"""
if hasattr(m, "chunked_phrase"):
return [m.dictation_chunk, *m.chunked_phrase]
else:
return [m.dictation_chunk] | 8aa6b58b0be5fffae70330f11ac41edf52294822 | 3,627,638 |
def merge_nodes(merge_nodes, player_nodes, edge_index):
"""
merge nodes by redirecting any connected edges, only the edges are changed
"""
m_nodes = merge_nodes.copy()
while m_nodes:
node = m_nodes.pop()
#if node not in merged:
# get edges going out from the node
c1_out_mask = (edge_index[0] == node)
if c1_out_mask.any():
# get connected nodes filled by same player
c1_nodes = edge_index[1, c1_out_mask]
c1_nodes = c1_nodes[np.in1d(c1_nodes, player_nodes)]
# get all edges in and out of these nodes
out_edge_mask = np.in1d(edge_index[0], c1_nodes)
in_edge_mask = np.in1d(edge_index[1], c1_nodes)
# form new edges to 2-hop adjacent nodes that are not filled by player
c2_nodes = np.unique(edge_index[1, out_edge_mask])
c2_nodes = c2_nodes[c2_nodes != node]
#c2_nodes = c2_nodes[~np.in1d(c2_nodes, nodes)]
new_edges = np.stack([c2_nodes, np.full_like(c2_nodes, node)])
# print('Node: ', node)
# print('Connected nodes: ', c1_nodes)
# print('Remaining edges', edge_index[:, ~(out_edge_mask|in_edge_mask)].T)
# print('New edges', new_edges.T)
# remove all edges from merged nodes and add new edges
edge_index = np.concatenate([
edge_index[:, ~(out_edge_mask|in_edge_mask)],
new_edges,
new_edges[[1,0]]
], axis=1)
edge_index = np.unique(edge_index, axis=1)
# return the node to the queue if it is still connected
c1_out_mask = (edge_index[0] == node)
if c1_out_mask.any():
# get connected nodes filled by same player
c1_nodes = edge_index[1, c1_out_mask]
c1_nodes = c1_nodes[np.in1d(c1_nodes, player_nodes)]
if not c1_nodes.size == 0:
m_nodes.append(node)
return edge_index | 9613a84df61aac314beec9459013ddf2cb0fd251 | 3,627,639 |
def load_portfolio(portfolio_filepath):
"""
Load Portfolio json file to dataframe
INPUT:
portfolio_filepath: string
OUTPUT:
portfolio: dataframe
"""
portfolio = pd.read_json(portfolio_filepath, orient='records', lines=True)
return portfolio | 07763140800697b91fb07eed33827e66c80e3972 | 3,627,640 |
import json
def transform_group_roles_data(data, okta_org_id):
"""
Transform user role data
:param data: data returned by Okta server
:param okta_org_id: okta organization id
:return: Array of dictionary containing role properties
"""
role_data = json.loads(data)
user_roles = []
for role in role_data:
role_props = {}
role_props["label"] = role["label"]
role_props["type"] = role["type"]
role_props["id"] = "{}-{}".format(okta_org_id, role["type"])
user_roles.append(role_props)
return user_roles | ea554dfb4e91e3647298a2ef0891452e423ff957 | 3,627,641 |
def price_lineplot(card_id, df):
"""in construction"""
fig, ax = plt.subplots()
df_price =(get_price_list_from_redis(card_id))
sns.lineplot(x=df_price.index, y=df_price['price'], ax=ax)
ax.set_title(df.loc[df['id'] == card_id]['name'].values[0])
ax.set_ylim(ymin=0)
return fig | ed95dbd9d68a6e267c9bd132aeec856d38501a5c | 3,627,642 |
def status_select_block(initial_option: str = None):
"""Builds the incident status select block"""
status_options = [option_from_template(text=x.value, value=x.value) for x in IncidentStatus]
block = {
"block_id": IncidentBlockId.status,
"type": "input",
"label": {"type": "plain_text", "text": "Status"},
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select Status"},
"options": status_options,
},
}
if initial_option:
block["element"].update(
{"initial_option": option_from_template(text=initial_option, value=initial_option)}
)
return block | a8ed54917097658462a7953e48c72704ec530920 | 3,627,643 |
def streaming_order_filter(
include_overall_position: bool = None,
customer_strategy_refs: list = None,
partition_matched_by_strategy_ref: bool = None,
) -> dict:
"""
:param bool include_overall_position: Returns overall / net position (OrderRunnerChange.mb / OrderRunnerChange.ml)
:param list customer_strategy_refs: Restricts to specified customerStrategyRefs; this will filter orders and
StrategyMatchChanges accordingly (Note: overall postition is not filtered)
:param bool partition_matched_by_strategy_ref: Returns strategy positions (OrderRunnerChange.smc=
Map<customerStrategyRef, StrategyMatchChange>) - these are sent in delta format as per overall position.
:return: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None} | c06bf2149410c64c82d0b8383ad1c195d03bef47 | 3,627,644 |
def fasterrcnn_resnet_fpn_x(*args, **kwargs):
"""Instantiate FRCNN-ResNet-FPN with extra RoI projection"""
return FasterRCNN_(fasterrcnn_resnet_fpn(*args, **kwargs)) | b77995c1ad362485cfd621fadc565a38d8df7fc3 | 3,627,645 |
def circuit_measure_max_once():
"""A fixture of a circuit that measures wire 0 once."""
return qml.expval(qml.PauliX(wires=0)) | 789eb0e2e51f7b8bd452b11460326d0594c8e766 | 3,627,646 |
def regenerate_image_filename_using_dimensions(filename, height, width):
"""Returns the name of the image file with dimensions in it.
Args:
filename: str. The name of the image file to be renamed.
height: int. Height of the image.
width: int. Width of the image.
Returns:
str. The name of the image file with its dimensions in it.
"""
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[filename.rfind('.') + 1:]
dimensions_suffix = '_height_%s_width_%s' % (
python_utils.UNICODE(height), python_utils.UNICODE(width))
new_filename = '%s%s.%s' % (
filename_wo_filetype, dimensions_suffix, filetype)
return new_filename | ed5f141c9c027c5a8469350c052ad88d0acbddde | 3,627,647 |
from datetime import datetime
def calculate_time_matrix(name):
"""Calucation and round in closest 15 minutes, delivery stops"""
gmaps = frappe.db.get_value('Google Maps', None,
['client_key', 'enabled', 'home_address'], as_dict=1)
if not gmaps.enabled:
frappe.throw(_("Google Maps integration is not enabled"))
try:
gmaps_client = googlemaps.Client(key=gmaps.client_key)
except Exception as e:
frappe.throw(e.message)
secs_15min = 900
doc = frappe.get_doc('Delivery Trip', name)
departure_time = doc.departure_time
matrix_duration = []
for i, stop in enumerate(doc.delivery_stops):
if i == 0:
# The first row is the starting pointing
origin = gmaps.home_address
destination = format_address(doc.delivery_stops[i].address)
distance_calc = gmaps_client.distance_matrix(origin, destination)
matrix_duration.append(distance_calc)
try:
distance_secs = distance_calc['rows'][0]['elements'][0]['duration']['value']
except Exception as e:
frappe.throw(_("Error '{0}' occured. Arguments {1}.").format(e.message, e.args))
stop.estimated_arrival = round_timedelta(
departure_time + datetime.timedelta(0, distance_secs + secs_15min),
datetime.timedelta(minutes=15))
else:
# Calculation based on previous
origin = format_address(doc.delivery_stops[i - 1].address)
destination = format_address(doc.delivery_stops[i].address)
distance_calc = gmaps_client.distance_matrix(origin, destination)
matrix_duration.append(distance_calc)
try:
distance_secs = distance_calc['rows'][0]['elements'][0]['duration']['value']
except Exception as e:
frappe.throw(_("Error '{0}' occured. Arguments {1}.").format(e.message, e.args))
stop.estimated_arrival = round_timedelta(
doc.delivery_stops[i - 1].estimated_arrival +
datetime.timedelta(0, distance_secs + secs_15min), datetime.timedelta(minutes=15))
stop.save()
frappe.db.commit()
return matrix_duration | 525621193f17920871be0eddb4660f5189a6815d | 3,627,648 |
import time
def revoke_token():
"""revoke token"""
token_string = get_token_from_headers(request)
try:
token = OARepoAccessToken.get_by_token(token_string)
assert token.is_valid()
except:
time.sleep(INVALID_TOKEN_SLEEP)
json_abort(401, {"message": f"Invalid token. ({token_string})"})
token.revoke()
return jsonify({
**token.to_json(filter_out=['token']),
'token': token_string,
'status': token.get_status(),
}) | c115905adf42b229467a65f66d3bde5e2acc606b | 3,627,649 |
def command_error_handler(e, cmd_descr, use_logger=False, warn_only=False,
exit_val=exitvals['startup']['num']):
"""
Handle external-command-related exceptions with various options.
If it returns, returns False.
Parameters:
cmd_descr: a string describing the command, used in messages
like 'starting rsync backup'
see generic_error_handler() for the rest
Dependencies:
globals: exitvals['startup']
functions: generic_error_handler()
"""
msg = ('problem running external {0} command'.format(cmd_descr))
return generic_error_handler(e, msg, render_command_exception,
use_logger, warn_only, exit_val) | a8cded1ca6d4447f5fb6a36607f03766c194efa7 | 3,627,650 |
import os
import numpy
import tqdm
import pickle
def emb_computation_loop(split, set_loader, stat_file):
"""Computes the embeddings and saves the in a stat file"""
# Extract embeddings (skip if already done)
if not os.path.isfile(stat_file):
embeddings = numpy.empty(
shape=[0, params["emb_dim"]], dtype=numpy.float64
)
modelset = []
segset = []
with tqdm(set_loader, dynamic_ncols=True) as t:
for batch in t:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Enrollment and test embeddings
embs = compute_embeddings(wavs, lens)
xv = embs.squeeze().cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
# Stat object (used to collect embeddings)
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.info(f"Saving stat obj for {split}")
stat_obj.save_stat_object(stat_file)
else:
logger.info(f"Skipping embedding Extraction for {split}")
logger.info(f"Loading previously saved stat_object for {split}")
with open(stat_file, "rb") as input:
stat_obj = pickle.load(input)
return stat_obj | 040bea86b89f489a26e9fceee2821f39a505b801 | 3,627,651 |
def register(*ids):
"""Register termination function class for environments with given ids."""
def librarian(cls):
for id_ in ids:
TERMINATIONS[id_] = cls
_raylab_registry.register(RAYLAB_TERMINATION, id_, cls)
return cls
return librarian | 50e3199011c86c1d740675f2b975dfe84033b009 | 3,627,652 |
import logging
import os
def process_sdk_options(parser, options, app_dir):
"""Handles values of options added by 'add_sdk_options'.
Modifies global process state by configuring logging and path to GAE SDK.
Args:
parser: OptionParser instance to use to report errors.
options: parsed options, as returned by parser.parse_args.
app_dir: path to application directory to use by default.
Returns:
New instance of Application configured based on passed options.
"""
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
if not app_dir and not options.app_dir:
parser.error('--app-dir option is required')
app_dir = os.path.abspath(app_dir or options.app_dir)
try:
runtime = get_app_runtime(find_app_yamls(app_dir))
except (Error, ValueError) as exc:
parser.error(str(exc))
sdk_path = options.sdk_path or find_gae_sdk(RUNTIME_TO_SDK[runtime], app_dir)
if not sdk_path:
parser.error('Failed to find the AppEngine SDK. Pass --sdk-path argument.')
setup_gae_sdk(sdk_path)
try:
return Application(app_dir, options.app_id, options.verbose)
except (Error, ValueError) as e:
parser.error(str(e)) | eb0ecb347640bbd59d1b1c01911df3c26985737a | 3,627,653 |
def IsProjectAddressOnToLine(project_addr, to_addrs):
"""Return True if an email was explicitly sent directly to us."""
return project_addr in to_addrs | 7907bbb313b3e6d8439af79539e469b04a62da63 | 3,627,654 |
def FID3_factual(instance, rule_list, threshold=0.01):
"""Returns the factual extracted for the Fuzzy ID3
tree in this package
Parameters
----------
instance : dict, {feature: {set_1: pert_1, set_2: pert_2, ...}, ...}
Fuzzy representation of the instance with all the features and pertenence
degrees to each fuzzy set
rule_list : list(Rule)
List of candidate rules to form part of the factual
threshold : float, optional
Activation threshold with which a rule is
considered to be fired by the instance, by default 0.01
Returns
-------
list(Rule)
List of factual rules
"""
fired_rules = _fired_rules(instance, rule_list, threshold)
return max(fired_rules, key=lambda rule: rule.matching(instance)) | 95160c477f003931b9094b52eb475c8d93cba4c1 | 3,627,655 |
def percent_decode(s, encoding='utf-8', decodable=None, errors='replace'):
"""
[*** Experimental API ***] Reverses the percent-encoding of the given
string.
Similar to urllib.parse.unquote()
By default, all percent-encoded sequences are decoded, but if a byte
string is given via the 'decodable' argument, only the sequences
corresponding to those octets will be decoded.
Percent-encoded sequences are converted to bytes, then converted back to
string (Unicode) according to the given encoding.
For example, by default, 'abc%E2%80%A2' will be converted to 'abc\u2022',
because byte sequence E2 80 A2 represents character U+2022 in UTF-8.
This function is intended for use on the portions of a URI that are
delimited by reserved characters (see percent_encode), or on a value from
data of media type application/x-www-form-urlencoded.
>>> from amara3.iri import percent_decode
>>> u0 = 'http://host/abc%E2%80%A2/x/y/z'
>>> u1 = percent_decode(u0)
>>> hex(ord(u1[15]))
'0x2022'
"""
# Most of this comes from urllib.parse.unquote().
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
# If given a string argument, does not decode
# percent-encoded octets above %7F.
if '%' not in s:
#s.split
return s
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _ASCII_PAT.split(s)
res = [bits[0]]
append = res.append #Saving the func lookups in the tight loop below
for i in range(1, len(bits), 2):
append(_unquote_to_bytes(bits[i], decodable=decodable).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res) | d1d57503c2b75d13f962ffc80139a32901d8c3ef | 3,627,656 |
def get_card_key(word_type: WordType, russian: AccentedText,
english: AccentedText) -> tuple:
"""
Get the key that identifies a card.
"""
return (word_type,
AccentedText(russian).text.lower(),
AccentedText(english).text.lower()) | e31915cf523ec64578f62674fa6019629df28dad | 3,627,657 |
def parse_text_content(html: str) -> list:
"""
获取每个查询结果显示的数据量,然后通过每页20条的数据转换成共计多少页。
:return:
"""
html = etree.HTML(html)
need_content = html.xpath('//*[@id="__next"]/div[1]/div/div[1]/section[1]/article/p')
return need_content | 2fb4f1e271253260e53590499f5ef5241a06f5cc | 3,627,658 |
def create_input(config=None, src_data=None, tgt_data=None, src_pos_dict=None):
"""
:return:
"""
assert src_data.shape[0] == tgt_data.shape[0]
n_datapoints = src_data.shape[0]
sequence_length = config['sequence_length']
data = []
close_price_idx = src_pos_dict['close_price']
for i in range(sequence_length, n_datapoints, sequence_length):
src = src_data[i - sequence_length:i]
tgt = tgt_data[i]
assert bool(tgt[0]) == (src[-1, close_price_idx] < src_data[i, close_price_idx])
data.append((src, tgt))
return data | 5b72bd884023eba621667b640f76baf03203a75e | 3,627,659 |
import numpy
def std_histogrammed_function(t, y, **kwargs):
"""Compute standard deviation of data *y* in bins along *t*.
Returns the standard deviation-regularised function *F* and the centers of the bins.
.. SeeAlso:: :func:`regularized_function` with *func* = :func:`numpy.std`
"""
return apply_histogrammed_function(numpy.std, t, y, **kwargs) | d4730530046a4f65f58c7f07bd68c9444b8be8e9 | 3,627,660 |
def _crown_div(out_bound, lhs, rhs):
"""Backward propagation of LinearBounds through an addition.
This is a linear operation only in the case where this is a division by a
constant.
Args:
out_bound: CrownBackwardBound, linear function of network outputs bounds
with regards to the results of the division.
lhs: Numerator of the division.
rhs: Denominator of the division.
Returns:
new_in_args: List of CrownBackwardBounds or Nones
"""
if isinstance(rhs, bound_propagation.Bound):
raise ValueError('Bound propagation through the denominator unsupported.')
return _crown_linear_op(lax.div_p, out_bound, lhs, rhs) | 13c2fb9f2cea401119d5a19435ae897f643b5520 | 3,627,661 |
def cartesian_to_polar(x, y, vx, vy, THRESH=0.0001):
"""
Converts 2d cartesian position and velocity coordinates to polar coordinates
Args:
x, y, vx, vy : floats - position and velocity components in cartesian respectively
THRESH : float - minimum value of rho to return non-zero values
Returns:
rho, drho : floats - radius and velocity magnitude respectively
phi : float - angle in radians
"""
rho = sqrt(x * x + y * y)
phi = np.arctan2(y, x)
if rho < THRESH:
print("WARNING: in cartesian_to_polar(): d_squared < THRESH")
rho, phi, drho = 0, 0, 0
else:
drho = (x * vx + y * vy) / rho
return rho, phi, drho | 82655bf23d08198f106dfb1ecd9d3967627e21c1 | 3,627,662 |
import time
import requests
def processing(channel):
"""
获取代理,缓存1小时
:param channel: nn-国内高匿/nt-国内普通/wn-国内https/wt-国内http
:return:
"""
if len(settings.CACHE['proxies']['items']) == 0 or int(time.time()) > settings.CACHE['proxies']['update'] + 60 * 60 * 0.5:
items_http = []
items_https = []
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
}
r = requests.get("http://www.xicidaili.com/%s/1" % channel, headers=headers)
r.encoding = 'utf-8'
selector = etree.HTML(r.text)
tr_list = selector.xpath('//tr[@class="odd"]')
for tr in tr_list:
td_list = tr.xpath('td')
data = {
'ip': td_list[1].text,
'port': td_list[2].text,
'type': str(td_list[5].text).lower(),
'survival_time': td_list[8].text,
'verification_time': td_list[9].text,
}
if data['type'] == 'http':
items_http.append(data)
if data['type'] == 'https':
items_https.append(data)
items = []
print(len(items_http), len(items_https))
for item in range(len(items_https) if len(items_http) > len(items_https) else len(items_http)):
proxies = {
"%s" % items_http[item]['type']: "%s://%s:%s" % (items_http[item]['type'], items_http[item]['ip'], items_http[item]['port']),
"%s" % items_https[item]['type']: "%s://%s:%s" % (items_https[item]['type'], items_https[item]['ip'], items_https[item]['port']),
}
try:
r2 = requests.get("http://api.douban.com/v2/movie/subject/27021220", headers=headers, proxies=proxies)
r2.encoding = r2.apparent_encoding
print(r2.text)
if 'avatars' in r2.text and 'rating' in r2.text:
print('新增代理')
items.append(proxies)
except Exception as e:
print(e)
settings.CACHE['proxies']['items'] = items
settings.CACHE['proxies']['update'] = int(time.time())
return items
else:
print('使用缓存的代理')
return settings.CACHE['proxies']['items'] | 0d9bc70b16d0b16c39a56a47123483b017407a60 | 3,627,663 |
def _to_voxel_coordinates(streamline, lin_T, offset):
"""Applies a mapping from streamline coordinates to voxel_coordinates,
raises an error for negative voxel values."""
inds = np.dot(streamline, lin_T)
inds += offset
if inds.min().round(decimals=6) < 0:
raise IndexError('streamline has points that map to negative voxel'
' indices')
return inds.astype(int) | da8991e7265e994268df99dda038f406073c0cc5 | 3,627,664 |
def check_if_branch_exist(db, root_hash, key_prefix):
"""
Given a key prefix, return whether this prefix is
the prefix of an existing key in the trie.
"""
validate_is_bytes(key_prefix)
return _check_if_branch_exist(db, root_hash, encode_to_bin(key_prefix)) | 960cc3082bcfb8ec2030a56122e1366347dc3daf | 3,627,665 |
def add_trace(rule, mbox_response, request_type, request_detail, tracer):
"""
:param rule: (target_decisioning_engine.types.decisioning_artifact.Rule) rule
:param mbox_response: (delivery_api_client.Model.mbox_response.MboxResponse) mbox response
:param request_type: ( "mbox"|"view"|"pageLoad") request type
:param request_detail: (delivery_api_client.Model.request_details.RequestDetails) request details
:param tracer: (target_decisioning_engine.trace_provider.RequestTracer) request tracer
:return: (delivery_api_client.Model.mbox_response.MboxResponse)
Returns mbox response with updated options and metrics
"""
result = deepcopy(mbox_response)
result.trace = tracer.get_trace_result()
return result | d50e40fac8d1b22cd2243c7f876ab0b27a16a09c | 3,627,666 |
from typing import List
def _get_axis_labels(axes: List["CalibratedAxis"]) -> List[str]:
"""Get the axes labels from a List of 'CalibratedAxis'.
Extract the axis labels from a List of 'CalibratedAxis'.
:param axes: A List of 'CalibratedAxis'.
:return: A list of the axis labels.
"""
return [str((axes[idx].type().getLabel())) for idx in range(len(axes))] | ed22aeab55d874d6728399c0e4928805ec479c19 | 3,627,667 |
import json
async def show():
"""
Dumps shits table for test
:return:
"""
cur = current_app.db.cursor()
cur.execute('''SELECT * FROM shits''')
return await make_response(json.dumps(cur.fetchall())) | fbf5a21fea1e7a8f1dd6bbcd17ef362192de6be8 | 3,627,668 |
def set_filter(info_dict, norm=False, norm_val=0.95):
"""
Compute the transmittance of the filter with respect to wavelength
Parameters
----------
info_dict: dictionary
wavelength : array
wavelengths in angstrom
norm: boolean
enables to normalise the values to 'norm_val' (default: False)
norm_val: float
value used for normalising the data
Returns
---------
trans : array
transmittance of the filter at a given wavelength (0-1)
"""
if info_dict["detailed_trans"] == 1:
filter_path = (
("%s/transmissions/filters/" % info_dict["path"])
+ info_dict["filter_folder"]
+ "/"
+ info_dict["filter_band"]
+ ".txt"
)
else:
filter_path = (
("%s/transmissions/throughput_curves/" % info_dict["path"])
+ info_dict["filter_folder"]
+ "/"
+ info_dict["filter_band"]
+ ".txt"
)
File = open(filter_path, "r")
lines = File.readlines()
wvl = []
trans = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
trans.append(float(bits[1]))
wvl.append(float(bits[0]))
if info_dict["detailed_trans"] == 1:
wvl = np.array(wvl) * 10.0 # nm --> angstroms
trans = np.array(trans, dtype=np.float64) * 1e-2
else:
wvl = np.array(wvl) # should be given in Angstroms
trans = np.array(trans, dtype=np.float64) # should be betwwen 0 and 1
# Normalisation
if norm:
trans = trans / max(trans) * norm_val
# Resample the transmission to the
trans = utils.resample(wvl, trans, info_dict["wavelength_ang"], 0.0, 1.0)
return trans | 314238c37fbbe9803e1da0c0aae91c8f94bf29b8 | 3,627,669 |
from .auth import auth as auth_blueprint
from .main import main as main_blueprint
def create_app(config_name):
"""
application factory function
:param config_name:
:return:
"""
app = Flask(__name__)
app.config.from_object(config_options[config_name])
# initialising bootstrap
bootstrap.init_app(app)
# initialising db model
db.init_app(app)
# initialising flask login
login_manager.init_app(app)
# configure uploads set
configure_uploads(app, photos)
mail.init_app(app)
simple.init_app(app)
db.init_app(app)
app.register_blueprint(auth_blueprint, url_prefix='/authenticate')
app.register_blueprint(main_blueprint)
"""
registering blueprint instance
url_prefix adds prefix to all routes registered with blueprint
"""
return app | 15d67ca46dc5ec022831f58ec7ba2a6f332ff0a0 | 3,627,670 |
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name) | 969d6ceb2255a7d7107518e7e36a9ad80e114897 | 3,627,671 |
def simulate(rate, bdepth, policy, dset_mr, rsz_is=(1e5, 1e2)):
"""Simulate policy on a single camera."""
qpm = ut.getqpm(rate, bdepth)
rsz_is = (int(rsz_is[0]), int(rsz_is[1]))
return _simulate(qpm, policy, dset_mr, rsz_is) | f732c25ff9e537c1e27b98dcf4d3ce3859bc4141 | 3,627,672 |
def CreateUser():
"""Route for checking if user exists."""
profile = check_if_user_profile(users.get_current_user().user_id())
return str(profile) | 67841a7ec7c6edd98e43a13d1cff4f8293ac02a9 | 3,627,673 |
from typing import Tuple
import os
import shutil
def process_file(file_url: str) -> Tuple[str, Tuple[str, ...]]:
"""Process file with download, cache and upgrade."""
_, file_ext = os.path.splitext(file_url)
folder_hash = md5(file_url.encode('utf-8')).hexdigest()
path = f"/notebooks/{folder_hash}"
original = f"original{file_ext}"
converted = f"converted{file_ext}"
# TODO: delete the folder completely if `force`
if not os.path.exists(path):
file_content = _download_file(file_url)
os.mkdir(path)
with open(f"{path}/{original}", "w") as original_file:
original_file.write(file_content)
try:
output = _convert_file(f"{path}/{original}", f"{path}/{converted}")
except ConvertionException as error:
shutil.rmtree(path)
raise error
with open(f"{path}/output", "w") as summary_output:
summary_output.write('\n'.join(output))
shutil.copy('report.txt', f"{path}/report")
# persist `report.txt` to GCS
storage = FileStorage()
storage.save_file('report.txt', folder_hash)
# found a python file, need to encode separately
if original.endswith('.py'):
result_filenames = []
for py_file in [original, converted]:
result_filenames.append(_save_ipynb_from_py(path, py_file))
assert len(result_filenames) == 2
return path, tuple(result_filenames[:2])
if original.endswith('.py'):
return path, (original.replace('.py', '.ipynb'),
converted.replace('.py', '.ipynb'))
return path, (original, converted) | 089d9195fb99abc41616bcf484b004c890c9846b | 3,627,674 |
def is_list(string):
"""
Checks to see if a string contains a list in the form [A, B]
:param string: string to evaluate
:return: Boolean
"""
if string:
if '[' == string[0] and ']' == string[-1] and ',' in string:
return True
return False | 77b86e7480a2a591e18ea21989cfefa06282c5f2 | 3,627,675 |
def load(input_file):
"""
Loads a pcap file
"""
header = pcap.read_pcap_header(input_file)
return header | 2a1b75e2547e9d438ab3e074bf989c0e3386c70c | 3,627,676 |
def personal_wiki_pages(request):
"""
List personal wiki pages.
"""
username = request.user.username
if request.cloud_mode and request.user.org is not None:
org_id = request.user.org.org_id
joined_groups = seaserv.get_org_groups_by_user(org_id, username)
else:
joined_groups = seaserv.get_personal_groups_by_user(username)
if joined_groups:
joined_groups.sort(key=lambda x: x.group_name.lower())
try:
repo = get_personal_wiki_repo(username)
pages = get_wiki_pages(repo)
except SearpcError:
return render_error(request, _('Internal Server Error'))
except WikiDoesNotExist:
return render_error(request, _('Wiki does not exists.'))
return render(request, "wiki/personal_wiki_pages.html", {
"pages": pages,
"repo_id": repo.id,
"search_repo_id": repo.id,
"search_wiki": True,
"grps": joined_groups,
}) | 60ece13b54c0d8af26279df31a7855de8b17e42f | 3,627,677 |
def parse_args():
"""This function parses and return arguments passed in """
descr = 'Plot the Bifurcation Diagram of Logistic, Cubic, and Sine Maps'
examples = '''
%(prog)s -r 1:4
%(prog)s -r 4:6.5 --map=cubic
%(prog)s --map=sine -s 200 -n 200
%(prog)s -r 3.:4. -s 500 -n 600
%(prog)s -r 3.5:3.6 -y .3:.6 -s 800 -n 1000'''
parser = argparser(descr, examples)
# By default, make 300 iterations (n) and do no plot the first 200 ones (s)
# By default select the Logistic Equation
parser.add_argument(
"-r", "--rate",
action="store", dest="r",
help="range of the growth rate parameter (default: the entire range)")
parser.add_argument(
"-y", "--people",
action="store", dest="y",
help="normalized range of the population (default: the entire range)")
parser.add_argument(
"-s", "--skip",
action="store", dest="s", type=int, default=200,
help="skip plotting the first 's' iterations (default: %(default)s)")
parser.add_argument(
"-n", "--steps",
action="store", dest="n", type=int, default=100,
help="number of iterations (default: %(default)s)")
parser.add_argument(
"-m", "--map",
action="store", dest="map_name", default="logistic",
choices=["logistic", "cubic", "sine"],
help="select the desired map (logistic, cubic, or sine)")
return parser.parse_args() | cb0f1c54b1fb9c07712b206a19057e4dc742f00f | 3,627,678 |
import string
import random
def get_random_mac_address():
"""Generate and return a MAC address in the format of WINDOWS"""
# get the hexdigits uppercased
uppercased_hexdigits = ''.join(set(string.hexdigits.upper()))
# 2nd character must be 2, 4, A, or E
return random.choice(uppercased_hexdigits) + random.choice("24AE") + "".join(random.sample(uppercased_hexdigits, k=10)) | 171eaca1df7b35def0d09a2ac1bc2e96163a3ceb | 3,627,679 |
def remove_fragments(mol):
"""Filters out fragments.
A predefined list contains numerous known fragments which can be
filtered out.
Parameters
----------
mol: rdkit.Chem.Mol
A molecule with various fragments.
Returns
-------
mol: rdkit.Chem.Mol
Returns a molecule filtered from known fragments.
Notes
-----
The predefined list containing fragments is a list REMOVE_FRAGMENTS
saved in rdkit/Chem/MolStandardize/fragments.py
"""
return rdMolStandardize.FragmentRemover().remove(mol) | a04f9d097edadc2e0e53f9ea43e1a41467cb880b | 3,627,680 |
def mooring_horizontal_volume_transport(od):
"""
Compute horizontal volume flux through a mooring array section (in/outflow).
If the array is closed, transport at the first mooring is not computed.
Otherwise, transport at both the first and last mooring is not computed.
Transport can be computed following two paths, so the dimension `path` is added.
.. math::
T(mooring, Z, time, path) = T_x + T_y = u \\Delta y \\Delta z + v \\Delta x \\Delta z
Parameters
----------
od: OceanDataset
oceandataset used to compute
Returns
-------
ds: xarray.Dataset
| transport : Horizontal volume transport
| Vtransport : Meridional volume transport
| Utransport : Zonal volume transport
| Y_transport : Y coordinate of horizontal volume transport
| X_transport : X coordinate of horizontal volume transport
| Y_Utransport : Y coordinate of zonal volume transport
| X_Utransport : X coordinate of zonal volume transport
| Y_Vtransport : Y coordinate of meridional volume transport
| X_Vtransport : X coordinate of meridional volume transport
| dir_Utransport: Direction of zonal volume transport
| dir_Vtransport: Direction of meridional volume transport
See Also
--------
subsample.mooring_array
"""
# Check input
if not isinstance(od, _ospy.OceanDataset):
raise TypeError('`od` must be OceanDataset')
if 'mooring' not in od._ds.dims:
raise ValueError('oceadatasets must be subsampled using `subsample.mooring_array`')
# Add missing variables
varList = ['XC', 'YC', 'dyG', 'dxG', 'drF', 'U', 'V', 'HFacS', 'HFacW', 'XU', 'YU', 'XV', 'YV']
od = _add_missing_variables(od, varList)
# Message
print('Computing horizontal volume transport.')
# Extract variables
mooring = od._ds['mooring']
XC = od._ds['XC'].squeeze(('Y', 'X'))
YC = od._ds['YC'].squeeze(('Y', 'X'))
XU = od._ds['XU'].squeeze(('Y'))
YU = od._ds['YU'].squeeze(('Y'))
XV = od._ds['XV'].squeeze(('X'))
YV = od._ds['YV'].squeeze(('X'))
# Compute transport
U_tran = (od._ds['U'] * od._ds['dyG'] * od._ds['HFacW'] * od._ds['drF']).squeeze('Y')
V_tran = (od._ds['V'] * od._ds['dxG'] * od._ds['HFacS'] * od._ds['drF']).squeeze('X')
# Extract left and right values
U1=U_tran.isel(Xp1=1); U0=U_tran.isel(Xp1=0)
V1=V_tran.isel(Yp1=1); V0=V_tran.isel(Yp1=0)
# Initialize direction
U0_dir = _np.zeros((len(XC),2)); U1_dir = _np.zeros((len(XC),2))
V0_dir = _np.zeros((len(YC),2)); V1_dir = _np.zeros((len(YC),2))
# Steps
diffX = _np.diff(XC); diffY = _np.diff(YC)
# Closed array?
if XC[0]==XC[-1] and YC[0]==YC[-1]:
# Add first at the end
closed = True
diffX = _np.append(diffX,diffX[0])
diffY = _np.append(diffY,diffY[0])
else:
closed = False
# Loop
Usign = 1; Vsign = 1
keepXf = False; keepYf = False
for i in range(len(diffX)-1):
if diffY[i]==0 and diffY[i+1]==0: # Zonal
V1_dir[i+1,:]=Vsign; V0_dir[i+1,:]=Vsign
elif diffX[i]==0 and diffX[i+1]==0: # Meridional
U1_dir[i+1,:]=Usign; U0_dir[i+1,:]=Usign
# Corners
elif (diffY[i]<0 and diffX[i+1]>0): # |_
Vsign=Usign; keepYf=keepXf
U0_dir[i+1,:]=Usign; V0_dir[i+1,:]=Vsign
elif (diffY[i+1]>0 and diffX[i]<0):
Usign=Vsign; keepXf=keepYf
U0_dir[i+1,:]=Usign; V0_dir[i+1,:]=Vsign
elif (diffY[i]>0 and diffX[i+1]>0): # |‾
Vsign=-Usign; keepYf=not keepXf
U0_dir[i+1,:]=Usign; V1_dir[i+1,:]=Vsign
elif (diffY[i+1]<0 and diffX[i]<0):
Usign=-Vsign; keepXf=not keepYf
U0_dir[i+1,:]=Usign; V1_dir[i+1,:]=Vsign
elif (diffX[i]>0 and diffY[i+1]<0): # ‾|
Usign=Vsign; keepXf=keepYf
V1_dir[i+1,:]=Vsign; U1_dir[i+1,:]=Usign
elif (diffX[i+1]<0 and diffY[i]>0):
Vsign=Usign; keepYf=keepXf
V1_dir[i+1,:]=Vsign; U1_dir[i+1,:]=Usign
elif (diffX[i]>0 and diffY[i+1]>0): # _|
Usign=-Vsign; keepXf=not keepYf
V0_dir[i+1,:]= Vsign; U1_dir[i+1,:]=Usign
elif (diffX[i+1]<0 and diffY[i]<0):
Vsign=-Usign; keepYf=not keepXf
V0_dir[i+1,:]= Vsign; U1_dir[i+1,:]=Usign
if keepXf: U1_dir[i+1,0]=0; U0_dir[i+1,1]=0
else: U0_dir[i+1,0]=0; U1_dir[i+1,1]=0
if keepYf: V1_dir[i+1,0]=0; V0_dir[i+1,1]=0
else: V0_dir[i+1,0]=0; V1_dir[i+1,1]=0
# Create direction DataArrays.
# Add a switch to return this? Useful to debug and/or plot velocities.
U1_dir = _xr.DataArray(U1_dir, coords={'mooring': mooring, 'path': [0, 1]}, dims=('mooring', 'path'))
U0_dir = _xr.DataArray(U0_dir, coords={'mooring': mooring, 'path': [0, 1]}, dims=('mooring', 'path'))
V1_dir = _xr.DataArray(V1_dir, coords={'mooring': mooring, 'path': [0, 1]}, dims=('mooring', 'path'))
V0_dir = _xr.DataArray(V0_dir, coords={'mooring': mooring, 'path': [0, 1]}, dims=('mooring', 'path'))
# Mask first mooring
U1_dir = U1_dir.where(U1_dir['mooring']!=U1_dir['mooring'].isel(mooring=0))
U0_dir = U0_dir.where(U0_dir['mooring']!=U0_dir['mooring'].isel(mooring=0))
V1_dir = V1_dir.where(V1_dir['mooring']!=V1_dir['mooring'].isel(mooring=0))
V0_dir = V0_dir.where(V0_dir['mooring']!=V0_dir['mooring'].isel(mooring=0))
if not closed:
# Mask first mooring
U1_dir = U1_dir.where(U1_dir['mooring']!=U1_dir['mooring'].isel(mooring=-1))
U0_dir = U0_dir.where(U0_dir['mooring']!=U0_dir['mooring'].isel(mooring=-1))
V1_dir = V1_dir.where(V1_dir['mooring']!=V1_dir['mooring'].isel(mooring=-1))
V0_dir = V0_dir.where(V0_dir['mooring']!=V0_dir['mooring'].isel(mooring=-1))
# Compute transport
transport = (U1*U1_dir+U0*U0_dir+V1*V1_dir+V0*V0_dir)*1.E-6
transport.attrs['units'] = 'Sv'
transport.attrs['long_name'] = 'Horizontal volume transport'
Vtransport = (V1*V1_dir+V0*V0_dir)*1.E-6
Vtransport.attrs['units'] = 'Sv'
Vtransport.attrs['long_name'] = 'Meridional volume transport'
Utransport = (U1*U1_dir+U0*U0_dir)*1.E-6
Utransport.attrs['units'] = 'Sv'
Utransport.attrs['long_name'] = 'Zonal volume transport'
# Additional info
Y_transport = YC
Y_transport.attrs['long_name'] = 'Y coordinate of horizontal volume transport'
X_transport = XC
X_transport.attrs['long_name'] = 'X coordinate of horizontal volume transport'
Y_Utransport = _xr.where(U1_dir!=0, YU.isel(Xp1=1), _np.nan)
Y_Utransport = _xr.where(U0_dir!=0, YU.isel(Xp1=0), Y_Utransport)
Y_Utransport.attrs['long_name'] = 'Y coordinate of zonal volume transport'
X_Utransport = _xr.where(U1_dir!=0, XU.isel(Xp1=1), _np.nan)
X_Utransport = _xr.where(U0_dir!=0, XU.isel(Xp1=0), X_Utransport)
X_Utransport.attrs['long_name'] = 'X coordinate of zonal volume transport'
Y_Vtransport = _xr.where(V1_dir!=0, YV.isel(Yp1=1), _np.nan)
Y_Vtransport = _xr.where(V0_dir!=0, YV.isel(Yp1=0), Y_Vtransport)
Y_Vtransport.attrs['long_name'] = 'Y coordinate of meridional volume transport'
X_Vtransport = _xr.where(V1_dir!=0, XV.isel(Yp1=1), _np.nan)
X_Vtransport = _xr.where(V0_dir!=0, XV.isel(Yp1=0), X_Vtransport)
X_Vtransport.attrs['long_name'] = 'X coordinate of meridional volume transport'
dir_Vtransport = _xr.where(V1_dir!=0, V1_dir, _np.nan)
dir_Vtransport = _xr.where(V0_dir!=0, V0_dir, dir_Vtransport)
dir_Vtransport.attrs['long_name'] = 'Direction of meridional volume transport'
dir_Vtransport.attrs['units'] = '1: original, -1: flipped'
dir_Utransport = _xr.where(U1_dir!=0, U1_dir, _np.nan)
dir_Utransport = _xr.where(U0_dir!=0, U0_dir, dir_Utransport)
dir_Utransport.attrs['long_name'] = 'Direction of zonal volume transport'
dir_Utransport.attrs['units'] = '1: original, -1: flipped'
# Create ds
ds = _xr.Dataset({'transport' : transport,
'Vtransport' : Vtransport,
'Utransport' : Utransport,
'Y_transport' : Y_transport,
'X_transport' : X_transport,
'Y_Utransport' : Y_Utransport,
'X_Utransport' : X_Utransport,
'Y_Vtransport' : Y_Vtransport,
'X_Vtransport' : X_Vtransport,
'dir_Utransport': dir_Utransport,
'dir_Vtransport': dir_Vtransport,}, attrs=od.dataset.attrs)
return _ospy.OceanDataset(ds).dataset | ca15da2d094994fc1e6645f3d8f528e890fc66d6 | 3,627,681 |
from typing import cast
def _cast_list(definition: dict, value: list) -> list:
"""
Convert a list botocore type into formatted values recursively casting its items.
:param definition:
Specification definition for the associated value to cast.
:param value:
A loaded value to be cast into its boto client response value.
:return:
The cast version of the specified value that matches the format of
the value as it would be returned in a boto client response.
"""
return [cast(definition.get("member"), v) for v in value] | 875b965c8e841c4963bc95a053aed615d7d55097 | 3,627,682 |
def demo_hello():
"""Example with simple swagger definition
This is the most simple swagger definition example.
---
tags:
- demo
# tags: [demo1, demo3]
parameters:
- name: username
in: query
type: string
required: true
- name: age
in: query
type: integer
responses:
200:
description: The person you called.
schema:
$ref: '#/definitions/Person'
"""
info = {'Header[User-Agent]': request.headers.get('User-Agent'),
'args': request.args,
'query_string': request.query_string,
'name': request.args.get('name')}
log.info('Request Info:\n%s', right_just_dict(info))
return jsonify(dict(rc=0, data=dict(id=2, username=request.args.get('username'), age=request.args.get('age', 23)), msg='success')) | 38b6b0073177b6bdb2519e09d4840a81d67f50f9 | 3,627,683 |
def get_signed_node(node, sign, reverse):
"""Given sign and direction, return a node
Assign the correct sign to the source node:
If search is downstream, source is the first node and the search must
always start with + as node sign. The leaf node sign (i.e. the end of
the path) in this case will then be determined by the requested sign.
If reversed search, the source is the last node and can have
+ or - as node sign depending on the requested sign.
"""
if sign is None:
return node
else:
# Upstream: return asked sign
if reverse:
return node, sign
# Downstream: return positive node
else:
return node, INT_PLUS | bdfb0aae5984de6f8ef0adf7919322e814b3263f | 3,627,684 |
def isChinese(word):
"""判断是否为中文"""
for uchar in word:
if not '\u4e00' <= uchar <= '\u9fa5': # 遇到非中文
return False
return True | 464f87a2211f6e3d2f00a9a2bf12d0669cd297ec | 3,627,685 |
from typing import Optional
import ssl
from typing import Sequence
import asyncio
from typing import Mapping
from typing import Iterable
from typing import Type
from typing import Any
from typing import cast
import websockets
async def serve(
ssl_context: Optional[ssl.SSLContext],
keys: Optional[Sequence[ServerSecretPermanentKey]],
paths: Optional['Paths'] = None,
host: Optional[str] = None,
port: int = 8765,
loop: Optional[asyncio.AbstractEventLoop] = None,
event_callbacks: Optional[Mapping[Event, Iterable[EventCallback]]] = None,
server_class: Optional[Type[ST]] = None,
ws_kwargs: Optional[Mapping[str, Any]] = None,
) -> ST:
"""
Start serving SaltyRTC Signalling Clients.
Arguments:
- `ssl_context`: An `ssl.SSLContext` instance for WSS.
- `keys`: A sorted sequence of :class:`libnacl.public.SecretKey`
instances containing permanent private keys of the server.
The first key will be designated as the primary key.
- `paths`: A :class:`Paths` instance that maps path names to
:class:`Path` instances. Can be used to share paths on
multiple WebSockets. Defaults to an empty paths instance.
- `host`: The hostname or IP address the server will listen on.
Defaults to all interfaces.
- `port`: The port the client should connect to. Defaults to
`8765`.
- `loop`: A :class:`asyncio.BaseEventLoop` instance or `None`
if the default event loop should be used.
- `event_callbacks`: An optional dict with keys being an
:class:`Event` and the value being a list of callback
coroutines. The callback will be called every time the event
occurs.
- `server_class`: An optional :class:`Server` class to create
an instance from.
- `ws_kwargs`: Additional keyword arguments passed to
:func:`websockets.server.serve`. Note that the fields `ssl`,
`host`, `port`, `loop`, `subprotocols`, `ping_interval` and
`select_subprotocol` will be overridden.
If the `compression` field is not explicitly set,
compression will be disabled (since the data to be compressed
is already encrypted, compression will have little to no
positive effect).
Raises :exc:`ServerKeyError` in case one or more keys have been repeated.
"""
if loop is None:
loop = asyncio.get_event_loop()
# Create paths if not given
if paths is None:
paths = Paths()
# Create server
if server_class is None:
server_class = cast('Type[ST]', Server)
server = server_class(keys, paths, loop=loop)
# Register event callbacks
if event_callbacks is not None:
for event, callbacks in event_callbacks.items():
for callback in callbacks:
server.register_event_callback(event, callback)
# Prepare arguments for the WS server
if ws_kwargs is None:
ws_kwargs = {}
else:
ws_kwargs = dict(ws_kwargs)
ws_kwargs['ssl'] = ssl_context
ws_kwargs['host'] = host
ws_kwargs['port'] = port
ws_kwargs.setdefault('compression', None)
ws_kwargs['ping_interval'] = None # Disable the keep-alive of the transport library
ws_kwargs['subprotocols'] = server.subprotocols
ws_kwargs['select_subprotocol'] = server.protocol_class.select_subprotocol
# Start WS server
ws_server = await websockets.serve(server.handler, **ws_kwargs)
# Set WS server instance
server.server = ws_server
# Return server
return server | 205089ff07067ff9eb0abfc623fdc79bdd696722 | 3,627,686 |
def ll_dist(lon1, lat1, lon2, lat2):
"""
Return distance between a pair of lat, long points.
"""
# functions based on radians
lat1, lat2, dlon, dlat = map(radians, [lat1, lat2, (lon2 - lon1), (lat2 - lat1)])
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
return R_EARTH * 2.0 * atan2(sqrt(a), sqrt(1 - a)) | 5c47948b84f1770c96448dfe84842903fcbd35d0 | 3,627,687 |
import json
def game(request, *, window_codename):
"""Display problems"""
# Process request
superuser = request.user.is_superuser
team = request.user.competitor.team
try:
window = queries.get_window(window_codename)
except models.Window.DoesNotExist:
raise Http404()
# Initialize context
context = windowed_context(window)
context['prob_list'] = queries.problem_list(team=team, window=window)
context['max_flag_size'] = MAX_FLAG_SIZE
js_context = {}
if not window.started() and not superuser:
template_name = 'ctflex/game/waiting.html'
js_context[COUNTDOWN_ENDTIME_KEY] = window.start.isoformat()
elif window.ended():
template_name = 'ctflex/game/ended.html'
current_window = queries.get_window()
context['current_window'] = current_window
# Check whether the current window is (in)active and
# so whether the team could still solve problems
context['can_compete_in_current_window'] = (
current_window.ongoing()
and (
not team.has_timer(window)
or team.has_active_timer(window)
)
)
elif not team.has_timer(window) and not superuser:
template_name = 'ctflex/game/inactive.html'
js_context[COUNTDOWN_ENDTIME_KEY] = window.end.isoformat()
js_context[COUNTDOWN_MAX_MICROSECONDS_KEY] = (
window.personal_timer_duration.total_seconds() * 1000
)
elif not team.has_active_timer(window) and not superuser:
template_name = 'ctflex/game/expired.html'
else:
template_name = 'ctflex/game/active.html'
if superuser:
messages.warning(request, "You are viewing this window as a superuser.")
else:
js_context[COUNTDOWN_ENDTIME_KEY] = team.timer(window).end.isoformat()
context['js_context'] = json.dumps(js_context)
return render(request, template_name, context) | 07b311431e930784f6a7f762845ffafed0351879 | 3,627,688 |
def to_video_labels(label=None, frames=None, warn_unsupported=True):
"""Converts the given labels to ``eta.core.video.VideoLabels`` format.
Args:
label (None): video-level labels provided as a
:class:`fiftyone.core.labels.Label` instance or dict mapping field
names to :class:`fiftyone.core.labels.Label` instances
frames (None): frame-level labels provided as a dict mapping frame
numbers to dicts mapping field names to
:class:`fiftyone.core.labels.Label` instances
warn_unsupported (True): whether to issue warnings if unsupported label
values are encountered
Returns:
a ``eta.core.video.VideoLabels``
"""
video_labels = etav.VideoLabels()
# Video labels
if label is not None:
if not isinstance(label, dict):
label = {"labels": label}
_add_video_labels(
video_labels, label, warn_unsupported=warn_unsupported
)
# Frame labels
if frames is not None:
for frame_number, frame in frames.items():
frame_labels = etav.VideoFrameLabels(frame_number)
_add_frame_labels(
frame_labels, frame, warn_unsupported=warn_unsupported
)
video_labels[frame_number] = frame_labels
return video_labels | 753a221e5ccbcedcf43165f08aaf7c0231845dd7 | 3,627,689 |
def api_create_user():
"""Creates a new user.
Example Request:
HTTP POST /api/v1/users/create
{
"username": "johndoe",
"password": "Password1",
"email": "johndoe@example.com",
"first_name": "John"
}
Example Response:
{
"success": "User 'johndoe' created successfully."
}
"""
data = request.get_json()
username = data.get("username", None)
if username is None:
return json_error("username is required.")
password = data.get("password", None)
if password is None:
return json_error("password is required.")
email = data.get("email", None)
if email is None:
return json_error("email is required.")
first_name = data.get("first_name", None)
if first_name is None:
return json_error("first_name is required.")
user_db = UserDB()
user = user_db.create_user(username=username,
password=password,
email=email,
first_name=first_name)
return json_success("User '{}' created successfully.".format(user.username)) | 31fd2c47c06438520e9d5af4f3219e466c40370e | 3,627,690 |
def plot_activity(preds, sort_map_list, label_list, window_size=2, active_length_cutoff=3, disable_inactive=True,
start=None, end=None, step=10, ax=None, fig=None, cbar=False, legend=True):
"""Visualize the progression of an outbreak through time. Apply activity cutoffs based on time and similar
infections.
Primary visualization method. Visualize the outbreak progression. Window size should match with the window size
of the model.
Visualize the progression of an outbreak through time. Horizontal axis is time-like and captures the index of
outbreak slices. Vertical axis captures the state of the samples. Following a single row across the visualization
shows how the outbreak progression occurs around this one individual and how their label changes through time.
Args:
preds (List[int]): Predictions output passed through to build a predictions matrix.
sort_map_list: Map to reorder the infections.
label_list (dict): Dict of labels
window_size (int): Window size of the model used to generate the preds
active_length_cutoff (int): How many years should an infection be considered active before changing the label?
start (int): Offset to start where the data is viewed.
end (int): Offset to end where the data is viewed. Default `None` shows all data
step (int): Step size for axis ticks.
fig (matplotlib figure): Optional matplotlib figure.
ax (matplotlib axis): Optionla Matplotlib axis.
cbar (bool): Draw a color bar on the figure.
disable_inactive (bool): Deprecated. Will be removed in a future release.
legend (bool): Draw a legend.
Returns:
axis: Axis with drawn plot
figure: Figure containing axis with plot.
"""
image = build_activity_matrix_overlay(preds=preds, sort_map_list=sort_map_list, label_list=label_list,
window_size=window_size, active_length_cutoff=active_length_cutoff)
if ax is None or fig is None:
fig, ax = plt.subplots()
print(image.shape)
cblabels = ['Reactivated outbreak', 'Inactive outbreak', 'NA', 'Active outbreak', 'Endemic']
cblabels_ = ['Reactivated epidemic', 'Inactive epidemic', 'Active epidemic', 'Endemic']
if end is None:
# set interpolation to nearest rather than interp or use antialiasing
predmap = ax.imshow(image, cmap=activity_colors, vmin=-3.5, vmax=1.5, interpolation='none')
else:
xend = np.min([end, image.shape[0]])
yend = np.min([end, image.shape[1]])
print([start, xend, start, yend])
predmap = ax.imshow(image[start:xend, start:yend], cmap=activity_colors, interpolation='none')
# noinspection DuplicatedCode
ax.set_xticks(np.arange(0, yend - start, step=step))
ax.set_xticklabels(labels=np.arange(start, yend, step=step))
ax.set_yticks(np.arange(0, xend - start, step=step))
ax.set_yticklabels(np.arange(start, xend, step=step))
if cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar_obj = fig.colorbar(predmap, cax=cax, ticks=[-3, -2, -1, 0, 1])
cbar_obj.ax.set_yticklabels(cblabels)
if legend:
ph = [ax.plot([], label=cblabels_[i], marker='s', linestyle=(0, (1, 100)), mfc=colors_[i], mec=colors_[i],
c='w', markersize=50)[0] for i in range(4)]
ph.reverse()
cblabels_.reverse()
ax.legend(ph, cblabels_, loc='lower left')
ax.set_xlabel('Outbreak progression')
ax.set_ylabel('Collected sequence')
return ax, fig | 380c741f575499c2521c25704fa58523a94fa131 | 3,627,691 |
import six
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
q = {}
ts_range = make_timestamp_range(event_filter.start_time,
event_filter.end_time)
if ts_range:
q['timestamp'] = ts_range
if event_filter.event_type:
q['event_type'] = event_filter.event_type
if event_filter.message_id:
q['_id'] = event_filter.message_id
if event_filter.traits_filter:
q.setdefault('traits')
for trait_filter in event_filter.traits_filter:
op = trait_filter.pop('op', 'eq')
dict_query = {}
for k, v in six.iteritems(trait_filter):
if v is not None:
# All parameters in EventFilter['traits'] are optional, so
# we need to check if they are in the query or no.
if k == 'key':
dict_query.setdefault('trait_name', v)
elif k in ['string', 'integer', 'datetime', 'float']:
dict_query.setdefault('trait_type',
EVENT_TRAIT_TYPES[k])
dict_query.setdefault('trait_value',
v if op == 'eq'
else {OP_SIGN[op]: v})
dict_query = {'$elemMatch': dict_query}
if q['traits'] is None:
q['traits'] = dict_query
elif q.get('$and') is None:
q.setdefault('$and', [{'traits': q.pop('traits')},
{'traits': dict_query}])
else:
q['$and'].append({'traits': dict_query})
return q | e71675b07c615d37078e1b7999f94600bb9efc63 | 3,627,692 |
def jpeg_next_marker(fh):
"""Scans to the start of the next valid-looking marker. Return
value is the marker id.
TODO use fh.read instead of read_exactly
"""
# Find 0xff byte. We should already be on it.
try:
byte = read_exactly(fh, 1)
while ord3(byte) != 0xff:
# logger.warn("jpeg_next_marker: bogus stuff in Jpeg file at: ')
byte = read_exactly(fh, 1)
# Now skip any extra 0xffs, which are valid padding.
while True:
byte = read_exactly(fh, 1)
if ord3(byte) != 0xff:
break
except EOFException:
return None
# byte should now contain the marker id.
logger.debug("jpeg_next_marker: at marker %02X (%d)", ord3(byte), ord3(byte))
return byte | 92d74d96ad33b4faca181615cfa39d3b34272784 | 3,627,693 |
def data_context_topology_context_topologyuuid_nodenode_uuid_owned_node_edge_pointowned_node_edge_point_uuid_cep_list_connection_end_pointconnection_end_point_uuid_otsi_connection_end_point_spec_otsi_termination_selected_central_frequency_get(uuid, node_uuid, owned_node_edge_point_uuid, connection_end_point_uuid): # noqa: E501
"""data_context_topology_context_topologyuuid_nodenode_uuid_owned_node_edge_pointowned_node_edge_point_uuid_cep_list_connection_end_pointconnection_end_point_uuid_otsi_connection_end_point_spec_otsi_termination_selected_central_frequency_get
returns tapi.photonic.media.CentralFrequency # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param node_uuid: Id of node
:type node_uuid: str
:param owned_node_edge_point_uuid: Id of owned-node-edge-point
:type owned_node_edge_point_uuid: str
:param connection_end_point_uuid: Id of connection-end-point
:type connection_end_point_uuid: str
:rtype: TapiPhotonicMediaCentralFrequency
"""
return 'do some magic!' | f31837559e5dc67c0b23d7dcf659d0b702572ce2 | 3,627,694 |
from typing import Optional
from typing import List
def get_edge_trace(
g: nx.Graph,
edge_colours: Optional[List[str]] = None,
) -> List[go.Scatter]:
"""Gets edge traces from PPI graph. Returns a list of traces enabling edge colours to be set individually.
:param g: _description_
:type g: nx.Graph
:return: _description_
:rtype: List[go.Scatter]
"""
if edge_colours is None:
edge_colours = ["red", "blue", "yellow"]
traces = []
for u, v, d in g.edges(data=True):
# Get positions
x0, y0 = g.nodes[u]["pos"]
x1, y1 = g.nodes[v]["pos"]
# Assign colour
if d["kind"] == {"string"}:
colour = edge_colours[0]
elif d["kind"] == {"biogrid"}:
colour = edge_colours[1]
else:
colour = edge_colours[2]
edge_trace = go.Scatter(
line=dict(width=2, color=colour),
hoverinfo="text",
x=(x0, x1),
y=(y0, y1),
mode="lines",
text=[
" / ".join(list(edge_type)) for edge_type in g[u][v]["kind"]
],
)
traces.append(edge_trace)
return traces | f220b50d6a78f54c5edcb48d4da1988ba32878e5 | 3,627,695 |
def matmul_A_BT(a, b):
"""
Computes A * B.T, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = modes.autodetect_mode(a, b)
if mode == modes.SINGLE or mode == modes.iMIXED:
# Single (rank(a)=2, rank(b)=2)
# Inverted mixed (rank(a)=3, rank(b)=2)
b_t = ops.transpose(b)
elif mode == modes.MIXED or mode == modes.BATCH:
# Mixed (rank(a)=2, rank(b)=3)
# Batch (rank(a)=3, rank(b)=3)
b_t = ops.transpose(b, (0, 2, 1))
else:
raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(K.ndim(a), K.ndim(b)))
return matmul_A_B(a, b_t) | c04f8e19834a48f3a26682ca6c653f665765aebc | 3,627,696 |
def entries_to_labels_scores(entries):
"""
Convert entries to labels and scores for barplot.
NOTE The labels only exist to discern entries: the actual labels are
set in set_label_legends.
"""
nicknames = [entry['algo-nickname'] for entry in entries]
scores = []
colors = []
for entry in entries:
# Special color for 'Human' and 'Random'
if entry['algo-title'] == 'Human':
color = 'red'
elif entry['algo-title'] == 'Random':
color = 'black'
else:
color = 'darkblue'
scores.append(entry['score'])
colors.append(color)
return list(range(len(scores))), scores, colors | 31c42ffa820d09bfbfb437904884c354f6fec257 | 3,627,697 |
import sys
def guessarraytype (arr, makeintfloats=False):
"""
guess the underlying datatype (out of 'i8', 'f4', 'a20') of an iterable
of strings. If the iterable contains strings that are guessed to be of
different types, the most 'general' type will be returned, where we mean
('i8', 'f4', 'a20') are assumed to be in increasing order of generality.
Parameters
----------
iterable : mandatory, array-like object of strings
collection of strings
makeintfloats: optional, bool, defaults to False
If true, assumes that strings that can be integers are actually
floats, so that strings like '3' are treated as '3.0'
Returns
-------
One of 'i8', 'f4', 'a20'
Examples
--------
>>> arr = ['3', '2', '4']
>>> guessarraytype(arr)
'i8'
>>> arr = ['3', '2', '4']
>>> guessarraytype(arr, makeintfloats=True)
'f4'
>>> arr = ['3', '2', '4', '7.0']
>>> guessarraytype(arr, makeintfloats=False)
'f4'
>>> arr = ['3.4', '2.7', '4.0']
>>> guessarraytype(arr)
'f4'
>>> arr = ['3.4', '2.7', '4.0', 's23']
>>> guessarraytype(arr)
'a20'
"""
typearr = np.array(map(lambda x: guesstype(x,
makeintfloats=makeintfloats)[0], arr))
if any(typearr == 'a20'):
return 'a20'
elif any(typearr == 'f4'):
return 'f4'
elif all(typearr == 'i8'):
return 'i8'
else:
raise ValueError('It seems that guesstype is not finding one of \
\'f4\', \'i8\' or \'a20\' as the types of all elements in arr')
sys.exit() | 0ace5e36aa1ef8cadd12b677449b28d86fbd3910 | 3,627,698 |
def get_current_system_datetime(device):
""" Returns current time of system
Args:
device ('obj'): device to use
Returns:
current time ('str')
Raises:
None
"""
log.info("Getting current system time")
try:
out = device.parse("show clock")
except SchemaEmptyParserError:
return None
if out and "time" in out and "month" in out and "day" in out:
return "{} {} {}".format(out["month"], out["day"], out["time"]) | 614d51af0a1be18901533f1920e2c5f8c07149a2 | 3,627,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.